aboutsummaryrefslogtreecommitdiffstats
path: root/features/rt/sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch
diff options
context:
space:
mode:
Diffstat (limited to 'features/rt/sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch')
-rw-r--r--features/rt/sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch115
1 files changed, 115 insertions, 0 deletions
diff --git a/features/rt/sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch b/features/rt/sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch
new file mode 100644
index 00000000..8de24806
--- /dev/null
+++ b/features/rt/sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch
@@ -0,0 +1,115 @@
+From 34b399662682e7d121a12d7f11c06f809c60d085 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sat, 25 Jun 2011 09:21:04 +0200
+Subject: [PATCH 074/191] sched: Add saved_state for tasks blocked on sleeping
+ locks
+
+Spinlocks are state preserving in !RT. RT changes the state when a
+task gets blocked on a lock. So we need to remember the state before
+the lock contention. If a regular wakeup (not a RTmutex related
+wakeup) happens, the saved_state is updated to running. When the lock
+sleep is done, the saved state is restored.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/sched.h | 3 +++
+ kernel/sched/core.c | 34 ++++++++++++++++++++++++++++++++--
+ kernel/sched/sched.h | 1 +
+ 3 files changed, 36 insertions(+), 2 deletions(-)
+
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index c8998312d7bc..3f05b8c29132 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -655,6 +655,8 @@ struct task_struct {
+ #endif
+ /* -1 unrunnable, 0 runnable, >0 stopped: */
+ volatile long state;
++ /* saved state for "spinlock sleepers" */
++ volatile long saved_state;
+
+ /*
+ * This begins the randomizable portion of task_struct. Only
+@@ -1780,6 +1782,7 @@ extern struct task_struct *find_get_task_by_vpid(pid_t nr);
+
+ extern int wake_up_state(struct task_struct *tsk, unsigned int state);
+ extern int wake_up_process(struct task_struct *tsk);
++extern int wake_up_lock_sleeper(struct task_struct *tsk);
+ extern void wake_up_new_task(struct task_struct *tsk);
+
+ #ifdef CONFIG_SMP
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 98191218d891..4efe6fd72719 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -3314,7 +3314,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
+ int cpu, success = 0;
+
+ preempt_disable();
+- if (p == current) {
++ if (!IS_ENABLED(CONFIG_PREEMPT_RT) && p == current) {
+ /*
+ * We're waking current, this means 'p->on_rq' and 'task_cpu(p)
+ * == smp_processor_id()'. Together this means we can special
+@@ -3344,8 +3344,26 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
+ */
+ raw_spin_lock_irqsave(&p->pi_lock, flags);
+ smp_mb__after_spinlock();
+- if (!(p->state & state))
++ if (!(p->state & state)) {
++ /*
++ * The task might be running due to a spinlock sleeper
++ * wakeup. Check the saved state and set it to running
++ * if the wakeup condition is true.
++ */
++ if (!(wake_flags & WF_LOCK_SLEEPER)) {
++ if (p->saved_state & state) {
++ p->saved_state = TASK_RUNNING;
++ success = 1;
++ }
++ }
+ goto unlock;
++ }
++ /*
++ * If this is a regular wakeup, then we can unconditionally
++ * clear the saved state of a "lock sleeper".
++ */
++ if (!(wake_flags & WF_LOCK_SLEEPER))
++ p->saved_state = TASK_RUNNING;
+
+ trace_sched_waking(p);
+
+@@ -3534,6 +3552,18 @@ int wake_up_process(struct task_struct *p)
+ }
+ EXPORT_SYMBOL(wake_up_process);
+
++/**
++ * wake_up_lock_sleeper - Wake up a specific process blocked on a "sleeping lock"
++ * @p: The process to be woken up.
++ *
++ * Same as wake_up_process() above, but wake_flags=WF_LOCK_SLEEPER to indicate
++ * the nature of the wakeup.
++ */
++int wake_up_lock_sleeper(struct task_struct *p)
++{
++ return try_to_wake_up(p, TASK_UNINTERRUPTIBLE, WF_LOCK_SLEEPER);
++}
++
+ int wake_up_state(struct task_struct *p, unsigned int state)
+ {
+ return try_to_wake_up(p, state, 0);
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index 10a1522b1e30..b65a4e244a77 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -1751,6 +1751,7 @@ static inline int task_on_rq_migrating(struct task_struct *p)
+ #define WF_SYNC 0x10 /* Waker goes to sleep after wakeup */
+ #define WF_MIGRATED 0x20 /* Internal use, task got migrated */
+ #define WF_ON_CPU 0x40 /* Wakee is on_cpu */
++#define WF_LOCK_SLEEPER 0x80 /* Wakeup spinlock "sleeper" */
+
+ #ifdef CONFIG_SMP
+ static_assert(WF_EXEC == SD_BALANCE_EXEC);
+--
+2.19.1
+