aboutsummaryrefslogtreecommitdiffstats
path: root/features/rt/rcutorture-Avoid-problematic-critical-section-nestin.patch
diff options
context:
space:
mode:
Diffstat (limited to 'features/rt/rcutorture-Avoid-problematic-critical-section-nestin.patch')
-rw-r--r--features/rt/rcutorture-Avoid-problematic-critical-section-nestin.patch195
1 files changed, 195 insertions, 0 deletions
diff --git a/features/rt/rcutorture-Avoid-problematic-critical-section-nestin.patch b/features/rt/rcutorture-Avoid-problematic-critical-section-nestin.patch
new file mode 100644
index 00000000..59d4b8a1
--- /dev/null
+++ b/features/rt/rcutorture-Avoid-problematic-critical-section-nestin.patch
@@ -0,0 +1,195 @@
+From 46cd674891358a7e6bf74242122b648fb85f6bcc Mon Sep 17 00:00:00 2001
+From: Scott Wood <swood@redhat.com>
+Date: Wed, 11 Sep 2019 17:57:29 +0100
+Subject: [PATCH 132/191] rcutorture: Avoid problematic critical section
+ nesting on RT
+
+rcutorture was generating some nesting scenarios that are not
+reasonable. Constrain the state selection to avoid them.
+
+Example #1:
+
+1. preempt_disable()
+2. local_bh_disable()
+3. preempt_enable()
+4. local_bh_enable()
+
+On PREEMPT_RT, BH disabling takes a local lock only when called in
+non-atomic context. Thus, atomic context must be retained until after BH
+is re-enabled. Likewise, if BH is initially disabled in non-atomic
+context, it cannot be re-enabled in atomic context.
+
+Example #2:
+
+1. rcu_read_lock()
+2. local_irq_disable()
+3. rcu_read_unlock()
+4. local_irq_enable()
+
+If the thread is preempted between steps 1 and 2,
+rcu_read_unlock_special.b.blocked will be set, but it won't be
+acted on in step 3 because IRQs are disabled. Thus, reporting of the
+quiescent state will be delayed beyond the local_irq_enable().
+
+For now, these scenarios will continue to be tested on non-PREEMPT_RT
+kernels, until debug checks are added to ensure that they are not
+happening elsewhere.
+
+Signed-off-by: Scott Wood <swood@redhat.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/rcu/rcutorture.c | 97 +++++++++++++++++++++++++++++++++++------
+ 1 file changed, 83 insertions(+), 14 deletions(-)
+
+diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
+index 99657ffa6688..d41a13e09185 100644
+--- a/kernel/rcu/rcutorture.c
++++ b/kernel/rcu/rcutorture.c
+@@ -61,10 +61,13 @@ MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@
+ #define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */
+ #define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */
+ #define RCUTORTURE_RDR_RCU 0x20 /* ... entering another RCU reader. */
+-#define RCUTORTURE_RDR_NBITS 6 /* Number of bits defined above. */
++#define RCUTORTURE_RDR_ATOM_BH 0x40 /* ... disabling bh while atomic */
++#define RCUTORTURE_RDR_ATOM_RBH 0x80 /* ... RBH while atomic */
++#define RCUTORTURE_RDR_NBITS 8 /* Number of bits defined above. */
+ #define RCUTORTURE_MAX_EXTEND \
+ (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \
+- RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED)
++ RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED | \
++ RCUTORTURE_RDR_ATOM_BH | RCUTORTURE_RDR_ATOM_RBH)
+ #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */
+ /* Must be power of two minus one. */
+ #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)
+@@ -1418,31 +1421,53 @@ static void rcutorture_one_extend(int *readstate, int newstate,
+ WARN_ON_ONCE((idxold >> RCUTORTURE_RDR_SHIFT) > 1);
+ rtrsp->rt_readstate = newstate;
+
+- /* First, put new protection in place to avoid critical-section gap. */
++ /*
++ * First, put new protection in place to avoid critical-section gap.
++ * Disable preemption around the ATOM disables to ensure that
++ * in_atomic() is true.
++ */
+ if (statesnew & RCUTORTURE_RDR_BH)
+ local_bh_disable();
++ if (statesnew & RCUTORTURE_RDR_RBH)
++ rcu_read_lock_bh();
+ if (statesnew & RCUTORTURE_RDR_IRQ)
+ local_irq_disable();
+ if (statesnew & RCUTORTURE_RDR_PREEMPT)
+ preempt_disable();
+- if (statesnew & RCUTORTURE_RDR_RBH)
+- rcu_read_lock_bh();
+ if (statesnew & RCUTORTURE_RDR_SCHED)
+ rcu_read_lock_sched();
++ preempt_disable();
++ if (statesnew & RCUTORTURE_RDR_ATOM_BH)
++ local_bh_disable();
++ if (statesnew & RCUTORTURE_RDR_ATOM_RBH)
++ rcu_read_lock_bh();
++ preempt_enable();
+ if (statesnew & RCUTORTURE_RDR_RCU)
+ idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT;
+
+- /* Next, remove old protection, irq first due to bh conflict. */
++ /*
++ * Next, remove old protection, in decreasing order of strength
++ * to avoid unlock paths that aren't safe in the stronger
++ * context. Disable preemption around the ATOM enables in
++ * case the context was only atomic due to IRQ disabling.
++ */
++ preempt_disable();
+ if (statesold & RCUTORTURE_RDR_IRQ)
+ local_irq_enable();
+- if (statesold & RCUTORTURE_RDR_BH)
++ if (statesold & RCUTORTURE_RDR_ATOM_BH)
+ local_bh_enable();
++ if (statesold & RCUTORTURE_RDR_ATOM_RBH)
++ rcu_read_unlock_bh();
++ preempt_enable();
+ if (statesold & RCUTORTURE_RDR_PREEMPT)
+ preempt_enable();
+- if (statesold & RCUTORTURE_RDR_RBH)
+- rcu_read_unlock_bh();
+ if (statesold & RCUTORTURE_RDR_SCHED)
+ rcu_read_unlock_sched();
++ if (statesold & RCUTORTURE_RDR_BH)
++ local_bh_enable();
++ if (statesold & RCUTORTURE_RDR_RBH)
++ rcu_read_unlock_bh();
++
+ if (statesold & RCUTORTURE_RDR_RCU) {
+ bool lockit = !statesnew && !(torture_random(trsp) & 0xffff);
+
+@@ -1485,6 +1510,12 @@ rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
+ int mask = rcutorture_extend_mask_max();
+ unsigned long randmask1 = torture_random(trsp) >> 8;
+ unsigned long randmask2 = randmask1 >> 3;
++ unsigned long preempts = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED;
++ unsigned long preempts_irq = preempts | RCUTORTURE_RDR_IRQ;
++ unsigned long nonatomic_bhs = RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH;
++ unsigned long atomic_bhs = RCUTORTURE_RDR_ATOM_BH |
++ RCUTORTURE_RDR_ATOM_RBH;
++ unsigned long tmp;
+
+ WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT);
+ /* Mostly only one bit (need preemption!), sometimes lots of bits. */
+@@ -1492,11 +1523,49 @@ rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
+ mask = mask & randmask2;
+ else
+ mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS));
+- /* Can't enable bh w/irq disabled. */
+- if ((mask & RCUTORTURE_RDR_IRQ) &&
+- ((!(mask & RCUTORTURE_RDR_BH) && (oldmask & RCUTORTURE_RDR_BH)) ||
+- (!(mask & RCUTORTURE_RDR_RBH) && (oldmask & RCUTORTURE_RDR_RBH))))
+- mask |= RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH;
++
++ /*
++ * Can't enable bh w/irq disabled.
++ */
++ tmp = atomic_bhs | nonatomic_bhs;
++ if (mask & RCUTORTURE_RDR_IRQ)
++ mask |= oldmask & tmp;
++
++ /*
++ * Ideally these sequences would be detected in debug builds
++ * (regardless of RT), but until then don't stop testing
++ * them on non-RT.
++ */
++ if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
++ /*
++ * Can't release the outermost rcu lock in an irq disabled
++ * section without preemption also being disabled, if irqs
++ * had ever been enabled during this RCU critical section
++ * (could leak a special flag and delay reporting the qs).
++ */
++ if ((oldmask & RCUTORTURE_RDR_RCU) &&
++ (mask & RCUTORTURE_RDR_IRQ) &&
++ !(mask & preempts))
++ mask |= RCUTORTURE_RDR_RCU;
++
++ /* Can't modify atomic bh in non-atomic context */
++ if ((oldmask & atomic_bhs) && (mask & atomic_bhs) &&
++ !(mask & preempts_irq)) {
++ mask |= oldmask & preempts_irq;
++ if (mask & RCUTORTURE_RDR_IRQ)
++ mask |= oldmask & tmp;
++ }
++ if ((mask & atomic_bhs) && !(mask & preempts_irq))
++ mask |= RCUTORTURE_RDR_PREEMPT;
++
++ /* Can't modify non-atomic bh in atomic context */
++ tmp = nonatomic_bhs;
++ if (oldmask & preempts_irq)
++ mask &= ~tmp;
++ if ((oldmask | mask) & preempts_irq)
++ mask |= oldmask & tmp;
++ }
++
+ return mask ?: RCUTORTURE_RDR_RCU;
+ }
+
+--
+2.19.1
+