aboutsummaryrefslogtreecommitdiffstats
path: root/features/rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
diff options
context:
space:
mode:
Diffstat (limited to 'features/rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch')
-rw-r--r--features/rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch165
1 files changed, 165 insertions, 0 deletions
diff --git a/features/rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch b/features/rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
new file mode 100644
index 00000000..fb15eefc
--- /dev/null
+++ b/features/rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
@@ -0,0 +1,165 @@
+From e49cfce7661aafc70145d7647cace3b08a5cff26 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 29 Aug 2013 18:21:04 +0200
+Subject: [PATCH 128/191] ptrace: fix ptrace vs tasklist_lock race
+
+As explained by Alexander Fyodorov <halcy@yandex.ru>:
+
+|read_lock(&tasklist_lock) in ptrace_stop() is converted to mutex on RT kernel,
+|and it can remove __TASK_TRACED from task->state (by moving it to
+|task->saved_state). If parent does wait() on child followed by a sys_ptrace
+|call, the following race can happen:
+|
+|- child sets __TASK_TRACED in ptrace_stop()
+|- parent does wait() which eventually calls wait_task_stopped() and returns
+| child's pid
+|- child blocks on read_lock(&tasklist_lock) in ptrace_stop() and moves
+| __TASK_TRACED flag to saved_state
+|- parent calls sys_ptrace, which calls ptrace_check_attach() and wait_task_inactive()
+
+The patch is based on his initial patch where an additional check is
+added in case the __TASK_TRACED moved to ->saved_state. The pi_lock is
+taken in case the caller is interrupted between looking into ->state and
+->saved_state.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/sched.h | 49 +++++++++++++++++++++++++++++++++++++++----
+ kernel/ptrace.c | 9 +++++++-
+ kernel/sched/core.c | 17 +++++++++++++--
+ 3 files changed, 68 insertions(+), 7 deletions(-)
+
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 1d85cfa28fe6..dab770a030bd 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -112,12 +112,8 @@ struct task_group;
+ __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
+ TASK_PARKED)
+
+-#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
+-
+ #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
+
+-#define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
+-
+ #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+
+ /*
+@@ -1884,6 +1880,51 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
+ return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
+ }
+
++static inline bool __task_is_stopped_or_traced(struct task_struct *task)
++{
++ if (task->state & (__TASK_STOPPED | __TASK_TRACED))
++ return true;
++#ifdef CONFIG_PREEMPT_RT
++ if (task->saved_state & (__TASK_STOPPED | __TASK_TRACED))
++ return true;
++#endif
++ return false;
++}
++
++static inline bool task_is_stopped_or_traced(struct task_struct *task)
++{
++ bool traced_stopped;
++
++#ifdef CONFIG_PREEMPT_RT
++ unsigned long flags;
++
++ raw_spin_lock_irqsave(&task->pi_lock, flags);
++ traced_stopped = __task_is_stopped_or_traced(task);
++ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
++#else
++ traced_stopped = __task_is_stopped_or_traced(task);
++#endif
++ return traced_stopped;
++}
++
++static inline bool task_is_traced(struct task_struct *task)
++{
++ bool traced = false;
++
++ if (task->state & __TASK_TRACED)
++ return true;
++#ifdef CONFIG_PREEMPT_RT
++ /* in case the task is sleeping on tasklist_lock */
++ raw_spin_lock_irq(&task->pi_lock);
++ if (task->state & __TASK_TRACED)
++ traced = true;
++ else if (task->saved_state & __TASK_TRACED)
++ traced = true;
++ raw_spin_unlock_irq(&task->pi_lock);
++#endif
++ return traced;
++}
++
+ /*
+ * cond_resched() and cond_resched_lock(): latency reduction via
+ * explicit rescheduling in places that are safe. The return
+diff --git a/kernel/ptrace.c b/kernel/ptrace.c
+index 821cf1723814..3bb0ec669997 100644
+--- a/kernel/ptrace.c
++++ b/kernel/ptrace.c
+@@ -180,7 +180,14 @@ static bool ptrace_freeze_traced(struct task_struct *task)
+
+ spin_lock_irq(&task->sighand->siglock);
+ if (task_is_traced(task) && !__fatal_signal_pending(task)) {
+- task->state = __TASK_TRACED;
++ unsigned long flags;
++
++ raw_spin_lock_irqsave(&task->pi_lock, flags);
++ if (task->state & __TASK_TRACED)
++ task->state = __TASK_TRACED;
++ else
++ task->saved_state = __TASK_TRACED;
++ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+ ret = true;
+ }
+ spin_unlock_irq(&task->sighand->siglock);
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index c05455d4f034..5dc7a8edf093 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -2596,6 +2596,18 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p,
+ }
+ #endif /* CONFIG_NUMA_BALANCING */
+
++static bool check_task_state(struct task_struct *p, long match_state)
++{
++ bool match = false;
++
++ raw_spin_lock_irq(&p->pi_lock);
++ if (p->state == match_state || p->saved_state == match_state)
++ match = true;
++ raw_spin_unlock_irq(&p->pi_lock);
++
++ return match;
++}
++
+ /*
+ * wait_task_inactive - wait for a thread to unschedule.
+ *
+@@ -2640,7 +2652,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
+ * is actually now running somewhere else!
+ */
+ while (task_running(rq, p)) {
+- if (match_state && unlikely(p->state != match_state))
++ if (match_state && !check_task_state(p, match_state))
+ return 0;
+ cpu_relax();
+ }
+@@ -2655,7 +2667,8 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
+ running = task_running(rq, p);
+ queued = task_on_rq_queued(p);
+ ncsw = 0;
+- if (!match_state || p->state == match_state)
++ if (!match_state || p->state == match_state ||
++ p->saved_state == match_state)
+ ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
+ task_rq_unlock(rq, p, &rf);
+
+--
+2.19.1
+