aboutsummaryrefslogtreecommitdiffstats
path: root/features/rt/softirq-Check-preemption-after-reenabling-interrupts.patch
diff options
context:
space:
mode:
Diffstat (limited to 'features/rt/softirq-Check-preemption-after-reenabling-interrupts.patch')
-rw-r--r--features/rt/softirq-Check-preemption-after-reenabling-interrupts.patch150
1 files changed, 150 insertions, 0 deletions
diff --git a/features/rt/softirq-Check-preemption-after-reenabling-interrupts.patch b/features/rt/softirq-Check-preemption-after-reenabling-interrupts.patch
new file mode 100644
index 00000000..21b60936
--- /dev/null
+++ b/features/rt/softirq-Check-preemption-after-reenabling-interrupts.patch
@@ -0,0 +1,150 @@
+From 601c56cd3ae5a8d6c944ec0b378e10877f102bdc Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 13 Nov 2011 17:17:09 +0100
+Subject: [PATCH 124/191] softirq: Check preemption after reenabling interrupts
+
+raise_softirq_irqoff() disables interrupts and wakes the softirq
+daemon, but after reenabling interrupts there is no preemption check,
+so the execution of the softirq thread might be delayed arbitrarily.
+
+In principle we could add that check to local_irq_enable/restore, but
+that's overkill as the rasie_softirq_irqoff() sections are the only
+ones which show this behaviour.
+
+Reported-by: Carsten Emde <cbe@osadl.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/preempt.h | 3 +++
+ lib/irq_poll.c | 5 +++++
+ net/core/dev.c | 7 +++++++
+ 3 files changed, 15 insertions(+)
+
+diff --git a/include/linux/preempt.h b/include/linux/preempt.h
+index 5ceac863e729..fb140e00f74d 100644
+--- a/include/linux/preempt.h
++++ b/include/linux/preempt.h
+@@ -190,8 +190,10 @@ do { \
+
+ #ifdef CONFIG_PREEMPT_RT
+ # define preempt_enable_no_resched() sched_preempt_enable_no_resched()
++# define preempt_check_resched_rt() preempt_check_resched()
+ #else
+ # define preempt_enable_no_resched() preempt_enable()
++# define preempt_check_resched_rt() barrier();
+ #endif
+
+ #define preemptible() (preempt_count() == 0 && !irqs_disabled())
+@@ -262,6 +264,7 @@ do { \
+ #define preempt_disable_notrace() barrier()
+ #define preempt_enable_no_resched_notrace() barrier()
+ #define preempt_enable_notrace() barrier()
++#define preempt_check_resched_rt() barrier()
+ #define preemptible() 0
+
+ #endif /* CONFIG_PREEMPT_COUNT */
+diff --git a/lib/irq_poll.c b/lib/irq_poll.c
+index 2f17b488d58e..7557bf7ecf1f 100644
+--- a/lib/irq_poll.c
++++ b/lib/irq_poll.c
+@@ -37,6 +37,7 @@ void irq_poll_sched(struct irq_poll *iop)
+ list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll));
+ raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
+ local_irq_restore(flags);
++ preempt_check_resched_rt();
+ }
+ EXPORT_SYMBOL(irq_poll_sched);
+
+@@ -72,6 +73,7 @@ void irq_poll_complete(struct irq_poll *iop)
+ local_irq_save(flags);
+ __irq_poll_complete(iop);
+ local_irq_restore(flags);
++ preempt_check_resched_rt();
+ }
+ EXPORT_SYMBOL(irq_poll_complete);
+
+@@ -96,6 +98,7 @@ static void __latent_entropy irq_poll_softirq(struct softirq_action *h)
+ }
+
+ local_irq_enable();
++ preempt_check_resched_rt();
+
+ /* Even though interrupts have been re-enabled, this
+ * access is safe because interrupts can only add new
+@@ -133,6 +136,7 @@ static void __latent_entropy irq_poll_softirq(struct softirq_action *h)
+ __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
+
+ local_irq_enable();
++ preempt_check_resched_rt();
+ }
+
+ /**
+@@ -196,6 +200,7 @@ static int irq_poll_cpu_dead(unsigned int cpu)
+ this_cpu_ptr(&blk_cpu_iopoll));
+ __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
+ local_irq_enable();
++ preempt_check_resched_rt();
+
+ return 0;
+ }
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 6c5967e80132..86a599a41062 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3060,6 +3060,7 @@ static void __netif_reschedule(struct Qdisc *q)
+ sd->output_queue_tailp = &q->next_sched;
+ raise_softirq_irqoff(NET_TX_SOFTIRQ);
+ local_irq_restore(flags);
++ preempt_check_resched_rt();
+ }
+
+ void __netif_schedule(struct Qdisc *q)
+@@ -3122,6 +3123,7 @@ void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
+ __this_cpu_write(softnet_data.completion_queue, skb);
+ raise_softirq_irqoff(NET_TX_SOFTIRQ);
+ local_irq_restore(flags);
++ preempt_check_resched_rt();
+ }
+ EXPORT_SYMBOL(__dev_kfree_skb_irq);
+
+@@ -4617,6 +4619,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
+ rps_unlock(sd);
+
+ local_irq_restore(flags);
++ preempt_check_resched_rt();
+
+ atomic_long_inc(&skb->dev->rx_dropped);
+ kfree_skb(skb);
+@@ -6306,12 +6309,14 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
+ sd->rps_ipi_list = NULL;
+
+ local_irq_enable();
++ preempt_check_resched_rt();
+
+ /* Send pending IPI's to kick RPS processing on remote cpus. */
+ net_rps_send_ipi(remsd);
+ } else
+ #endif
+ local_irq_enable();
++ preempt_check_resched_rt();
+ }
+
+ static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
+@@ -6389,6 +6394,7 @@ void __napi_schedule(struct napi_struct *n)
+ local_irq_save(flags);
+ ____napi_schedule(this_cpu_ptr(&softnet_data), n);
+ local_irq_restore(flags);
++ preempt_check_resched_rt();
+ }
+ EXPORT_SYMBOL(__napi_schedule);
+
+@@ -11144,6 +11150,7 @@ static int dev_cpu_dead(unsigned int oldcpu)
+
+ raise_softirq_irqoff(NET_TX_SOFTIRQ);
+ local_irq_enable();
++ preempt_check_resched_rt();
+
+ #ifdef CONFIG_RPS
+ remsd = oldsd->rps_ipi_list;
+--
+2.19.1
+