aboutsummaryrefslogtreecommitdiffstats
path: root/features/rt/tasklets-Provide-tasklet_disable_in_atomic.patch
diff options
context:
space:
mode:
Diffstat (limited to 'features/rt/tasklets-Provide-tasklet_disable_in_atomic.patch')
-rw-r--r--features/rt/tasklets-Provide-tasklet_disable_in_atomic.patch67
1 files changed, 67 insertions, 0 deletions
diff --git a/features/rt/tasklets-Provide-tasklet_disable_in_atomic.patch b/features/rt/tasklets-Provide-tasklet_disable_in_atomic.patch
new file mode 100644
index 00000000..89d0730f
--- /dev/null
+++ b/features/rt/tasklets-Provide-tasklet_disable_in_atomic.patch
@@ -0,0 +1,67 @@
+From 21b5c4734bb2b5f6e37dad86b2857ac1b2ae1a16 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 9 Mar 2021 09:42:06 +0100
+Subject: [PATCH 043/191] tasklets: Provide tasklet_disable_in_atomic()
+
+Replacing the spin wait loops in tasklet_unlock_wait() with
+wait_var_event() is not possible as a handful of tasklet_disable()
+invocations are happening in atomic context. All other invocations are in
+teardown paths which can sleep.
+
+Provide tasklet_disable_in_atomic() and tasklet_unlock_spin_wait() to
+convert the few atomic use cases over, which allows to change
+tasklet_disable() and tasklet_unlock_wait() in a later step.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/interrupt.h | 22 ++++++++++++++++++++++
+ 1 file changed, 22 insertions(+)
+
+diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
+index b49ac2639b3e..9d9475f7b89f 100644
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
+@@ -671,10 +671,21 @@ static inline void tasklet_unlock_wait(struct tasklet_struct *t)
+ while (test_bit(TASKLET_STATE_RUN, &t->state))
+ cpu_relax();
+ }
++
++/*
++ * Do not use in new code. Waiting for tasklets from atomic contexts is
++ * error prone and should be avoided.
++ */
++static inline void tasklet_unlock_spin_wait(struct tasklet_struct *t)
++{
++ while (test_bit(TASKLET_STATE_RUN, &t->state))
++ cpu_relax();
++}
+ #else
+ static inline int tasklet_trylock(struct tasklet_struct *t) { return 1; }
+ static inline void tasklet_unlock(struct tasklet_struct *t) { }
+ static inline void tasklet_unlock_wait(struct tasklet_struct *t) { }
++static inline void tasklet_unlock_spin_wait(struct tasklet_struct *t) { }
+ #endif
+
+ extern void __tasklet_schedule(struct tasklet_struct *t);
+@@ -699,6 +710,17 @@ static inline void tasklet_disable_nosync(struct tasklet_struct *t)
+ smp_mb__after_atomic();
+ }
+
++/*
++ * Do not use in new code. Disabling tasklets from atomic contexts is
++ * error prone and should be avoided.
++ */
++static inline void tasklet_disable_in_atomic(struct tasklet_struct *t)
++{
++ tasklet_disable_nosync(t);
++ tasklet_unlock_spin_wait(t);
++ smp_mb();
++}
++
+ static inline void tasklet_disable(struct tasklet_struct *t)
+ {
+ tasklet_disable_nosync(t);
+--
+2.19.1
+