aboutsummaryrefslogtreecommitdiffstats
path: root/features/rt/tasklets-Replace-spin-wait-in-tasklet_unlock_wait.patch
diff options
context:
space:
mode:
Diffstat (limited to 'features/rt/tasklets-Replace-spin-wait-in-tasklet_unlock_wait.patch')
-rw-r--r--features/rt/tasklets-Replace-spin-wait-in-tasklet_unlock_wait.patch89
1 files changed, 89 insertions, 0 deletions
diff --git a/features/rt/tasklets-Replace-spin-wait-in-tasklet_unlock_wait.patch b/features/rt/tasklets-Replace-spin-wait-in-tasklet_unlock_wait.patch
new file mode 100644
index 00000000..edca165f
--- /dev/null
+++ b/features/rt/tasklets-Replace-spin-wait-in-tasklet_unlock_wait.patch
@@ -0,0 +1,89 @@
+From 9c90471b76134571fa2dfddb7423d48c521146ab Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Tue, 9 Mar 2021 09:42:08 +0100
+Subject: [PATCH 045/191] tasklets: Replace spin wait in tasklet_unlock_wait()
+
+tasklet_unlock_wait() spin waits for TASKLET_STATE_RUN to be cleared. This
+is wasting CPU cycles in a tight loop which is especially painful in a
+guest when the CPU running the tasklet is scheduled out.
+
+tasklet_unlock_wait() is invoked from tasklet_kill() which is used in
+teardown paths and not performance critical at all. Replace the spin wait
+with wait_var_event().
+
+There are no users of tasklet_unlock_wait() which are invoked from atomic
+contexts. The usage in tasklet_disable() has been replaced temporarily with
+the spin waiting variant until the atomic users are fixed up and will be
+converted to the sleep wait variant later.
+
+Signed-off-by: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/interrupt.h | 13 ++-----------
+ kernel/softirq.c | 18 ++++++++++++++++++
+ 2 files changed, 20 insertions(+), 11 deletions(-)
+
+diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
+index f0b82429950c..ee3ce4f852b7 100644
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
+@@ -660,17 +660,8 @@ static inline int tasklet_trylock(struct tasklet_struct *t)
+ return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
+ }
+
+-static inline void tasklet_unlock(struct tasklet_struct *t)
+-{
+- smp_mb__before_atomic();
+- clear_bit(TASKLET_STATE_RUN, &(t)->state);
+-}
+-
+-static inline void tasklet_unlock_wait(struct tasklet_struct *t)
+-{
+- while (test_bit(TASKLET_STATE_RUN, &t->state))
+- cpu_relax();
+-}
++void tasklet_unlock(struct tasklet_struct *t);
++void tasklet_unlock_wait(struct tasklet_struct *t);
+
+ /*
+ * Do not use in new code. Waiting for tasklets from atomic contexts is
+diff --git a/kernel/softirq.c b/kernel/softirq.c
+index 9908ec4a9bfe..7cd63df59e1c 100644
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -25,6 +25,7 @@
+ #include <linux/smpboot.h>
+ #include <linux/tick.h>
+ #include <linux/irq.h>
++#include <linux/wait_bit.h>
+
+ #include <asm/softirq_stack.h>
+
+@@ -621,6 +622,23 @@ void tasklet_kill(struct tasklet_struct *t)
+ }
+ EXPORT_SYMBOL(tasklet_kill);
+
++#ifdef CONFIG_SMP
++void tasklet_unlock(struct tasklet_struct *t)
++{
++ smp_mb__before_atomic();
++ clear_bit(TASKLET_STATE_RUN, &t->state);
++ smp_mb__after_atomic();
++ wake_up_var(&t->state);
++}
++EXPORT_SYMBOL_GPL(tasklet_unlock);
++
++void tasklet_unlock_wait(struct tasklet_struct *t)
++{
++ wait_var_event(&t->state, !test_bit(TASKLET_STATE_RUN, &t->state));
++}
++EXPORT_SYMBOL_GPL(tasklet_unlock_wait);
++#endif
++
+ void __init softirq_init(void)
+ {
+ int cpu;
+--
+2.19.1
+