1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
|
From 72cc76e9ea25d8e1acc782436e31a5bba849f097 Mon Sep 17 00:00:00 2001
From: Bruce Ashfield <bruce.ashfield@windriver.com>
Date: Wed, 11 Apr 2012 15:56:13 -0400
Subject: [PATCH] BFS: 3.4 compile fixes (temp)
Signed-off-by: Bruce Ashfield <bruce.ashfield@windriver.com>
---
kernel/sched/bfs.c | 28 +++++++++++++++++++++++++---
1 files changed, 25 insertions(+), 3 deletions(-)
diff --git a/kernel/sched/bfs.c b/kernel/sched/bfs.c
index 09cd792..4f246e1 100644
--- a/kernel/sched/bfs.c
+++ b/kernel/sched/bfs.c
@@ -75,6 +75,7 @@
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#endif
+#include <asm/switch_to.h>
#include "cpupri.h"
#include "../workqueue_sched.h"
@@ -315,7 +316,28 @@ struct rq {
DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
static DEFINE_MUTEX(sched_hotcpu_mutex);
+
+/**
+ * schedule_preempt_disabled - called with preemption disabled
+ *
+ * Returns with preemption disabled. Note: preempt_count must be 1
+ */
+void __sched schedule_preempt_disabled(void)
+{
+ sched_preempt_enable_no_resched();
+ schedule();
+ preempt_disable();
+}
+
+
#ifdef CONFIG_SMP
+
+bool cpus_share_cache(int this_cpu, int that_cpu)
+{
+ return 1;
+}
+
+
/*
* sched_domains_mutex serialises calls to init_sched_domains,
* detach_destroy_domains and partition_sched_domains.
@@ -1394,7 +1416,7 @@ can_preempt(struct task_struct *p, int prio, u64 deadline)
*/
static inline bool online_cpus(struct task_struct *p)
{
- return (likely(cpus_intersects(cpu_online_map, p->cpus_allowed)));
+ return (likely(cpus_intersects(cpu_online_mask, p->cpus_allowed)));
}
#else /* CONFIG_HOTPLUG_CPU */
/* All available CPUs are always online without hotplug. */
@@ -1442,7 +1464,7 @@ static void try_preempt(struct task_struct *p, struct rq *this_rq)
return;
if (likely(online_cpus(p)))
- cpus_and(tmp, cpu_online_map, p->cpus_allowed);
+ cpus_and(tmp, cpu_online_mask, p->cpus_allowed);
else
return;
@@ -3463,7 +3485,7 @@ EXPORT_SYMBOL(__wake_up);
/*
* Same as __wake_up but called with the spinlock in wait_queue_head_t held.
*/
-void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
+void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr)
{
__wake_up_common(q, mode, 1, 0, NULL);
}
--
1.7.5.4
|