aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/softirq.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/softirq.c')
-rw-r--r--kernel/softirq.c706
1 files changed, 610 insertions, 96 deletions
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 6f584861d329..c15583162a55 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -21,11 +21,14 @@
#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/rcupdate.h>
+#include <linux/delay.h>
#include <linux/ftrace.h>
#include <linux/smp.h>
#include <linux/smpboot.h>
#include <linux/tick.h>
+#include <linux/locallock.h>
#include <linux/irq.h>
+#include <linux/sched/types.h>
#define CREATE_TRACE_POINTS
#include <trace/events/irq.h>
@@ -56,12 +59,108 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
+#ifdef CONFIG_PREEMPT_RT_FULL
+#define TIMER_SOFTIRQS ((1 << TIMER_SOFTIRQ) | (1 << HRTIMER_SOFTIRQ))
+DEFINE_PER_CPU(struct task_struct *, ktimer_softirqd);
+#endif
const char * const softirq_to_name[NR_SOFTIRQS] = {
"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
"TASKLET", "SCHED", "HRTIMER", "RCU"
};
+#ifdef CONFIG_NO_HZ_COMMON
+# ifdef CONFIG_PREEMPT_RT_FULL
+
+struct softirq_runner {
+ struct task_struct *runner[NR_SOFTIRQS];
+};
+
+static DEFINE_PER_CPU(struct softirq_runner, softirq_runners);
+
+static inline void softirq_set_runner(unsigned int sirq)
+{
+ struct softirq_runner *sr = this_cpu_ptr(&softirq_runners);
+
+ sr->runner[sirq] = current;
+}
+
+static inline void softirq_clr_runner(unsigned int sirq)
+{
+ struct softirq_runner *sr = this_cpu_ptr(&softirq_runners);
+
+ sr->runner[sirq] = NULL;
+}
+
+/*
+ * On preempt-rt a softirq running context might be blocked on a
+ * lock. There might be no other runnable task on this CPU because the
+ * lock owner runs on some other CPU. So we have to go into idle with
+ * the pending bit set. Therefor we need to check this otherwise we
+ * warn about false positives which confuses users and defeats the
+ * whole purpose of this test.
+ *
+ * This code is called with interrupts disabled.
+ */
+void softirq_check_pending_idle(void)
+{
+ static int rate_limit;
+ struct softirq_runner *sr = this_cpu_ptr(&softirq_runners);
+ u32 warnpending;
+ int i;
+
+ if (rate_limit >= 10)
+ return;
+
+ warnpending = local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK;
+ for (i = 0; i < NR_SOFTIRQS; i++) {
+ struct task_struct *tsk = sr->runner[i];
+
+ /*
+ * The wakeup code in rtmutex.c wakes up the task
+ * _before_ it sets pi_blocked_on to NULL under
+ * tsk->pi_lock. So we need to check for both: state
+ * and pi_blocked_on.
+ */
+ if (tsk) {
+ raw_spin_lock(&tsk->pi_lock);
+ if (tsk->pi_blocked_on || tsk->state == TASK_RUNNING) {
+ /* Clear all bits pending in that task */
+ warnpending &= ~(tsk->softirqs_raised);
+ warnpending &= ~(1 << i);
+ }
+ raw_spin_unlock(&tsk->pi_lock);
+ }
+ }
+
+ if (warnpending) {
+ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
+ warnpending);
+ rate_limit++;
+ }
+}
+# else
+/*
+ * On !PREEMPT_RT we just printk rate limited:
+ */
+void softirq_check_pending_idle(void)
+{
+ static int rate_limit;
+
+ if (rate_limit < 10 &&
+ (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) {
+ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
+ local_softirq_pending());
+ rate_limit++;
+ }
+}
+# endif
+
+#else /* !CONFIG_NO_HZ_COMMON */
+static inline void softirq_set_runner(unsigned int sirq) { }
+static inline void softirq_clr_runner(unsigned int sirq) { }
+#endif
+
/*
* we cannot loop indefinitely here to avoid userspace starvation,
* but we also don't want to introduce a worst case 1/HZ latency
@@ -77,6 +176,38 @@ static void wakeup_softirqd(void)
wake_up_process(tsk);
}
+#ifdef CONFIG_PREEMPT_RT_FULL
+static void wakeup_timer_softirqd(void)
+{
+ /* Interrupts are disabled: no need to stop preemption */
+ struct task_struct *tsk = __this_cpu_read(ktimer_softirqd);
+
+ if (tsk && tsk->state != TASK_RUNNING)
+ wake_up_process(tsk);
+}
+#endif
+
+static void handle_softirq(unsigned int vec_nr)
+{
+ struct softirq_action *h = softirq_vec + vec_nr;
+ int prev_count;
+
+ prev_count = preempt_count();
+
+ kstat_incr_softirqs_this_cpu(vec_nr);
+
+ trace_softirq_entry(vec_nr);
+ h->action(h);
+ trace_softirq_exit(vec_nr);
+ if (unlikely(prev_count != preempt_count())) {
+ pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
+ vec_nr, softirq_to_name[vec_nr], h->action,
+ prev_count, preempt_count());
+ preempt_count_set(prev_count);
+ }
+}
+
+#ifndef CONFIG_PREEMPT_RT_FULL
/*
* If ksoftirqd is scheduled, we do not want to process pending softirqs
* right now. Let ksoftirqd handle this at its own rate, to get fairness,
@@ -92,6 +223,47 @@ static bool ksoftirqd_running(unsigned long pending)
return tsk && (tsk->state == TASK_RUNNING);
}
+static inline int ksoftirqd_softirq_pending(void)
+{
+ return local_softirq_pending();
+}
+
+static void handle_pending_softirqs(u32 pending)
+{
+ struct softirq_action *h = softirq_vec;
+ int softirq_bit;
+
+ local_irq_enable();
+
+ h = softirq_vec;
+
+ while ((softirq_bit = ffs(pending))) {
+ unsigned int vec_nr;
+
+ h += softirq_bit - 1;
+ vec_nr = h - softirq_vec;
+ handle_softirq(vec_nr);
+
+ h++;
+ pending >>= softirq_bit;
+ }
+
+ rcu_bh_qs();
+ local_irq_disable();
+}
+
+static void run_ksoftirqd(unsigned int cpu)
+{
+ local_irq_disable();
+ if (ksoftirqd_softirq_pending()) {
+ __do_softirq();
+ local_irq_enable();
+ cond_resched();
+ return;
+ }
+ local_irq_enable();
+}
+
/*
* preempt_count and SOFTIRQ_OFFSET usage:
* - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
@@ -251,10 +423,8 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
unsigned long old_flags = current->flags;
int max_restart = MAX_SOFTIRQ_RESTART;
- struct softirq_action *h;
bool in_hardirq;
__u32 pending;
- int softirq_bit;
/*
* Mask out PF_MEMALLOC s current task context is borrowed for the
@@ -273,36 +443,7 @@ restart:
/* Reset the pending bitmask before enabling irqs */
set_softirq_pending(0);
- local_irq_enable();
-
- h = softirq_vec;
-
- while ((softirq_bit = ffs(pending))) {
- unsigned int vec_nr;
- int prev_count;
-
- h += softirq_bit - 1;
-
- vec_nr = h - softirq_vec;
- prev_count = preempt_count();
-
- kstat_incr_softirqs_this_cpu(vec_nr);
-
- trace_softirq_entry(vec_nr);
- h->action(h);
- trace_softirq_exit(vec_nr);
- if (unlikely(prev_count != preempt_count())) {
- pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
- vec_nr, softirq_to_name[vec_nr], h->action,
- prev_count, preempt_count());
- preempt_count_set(prev_count);
- }
- h++;
- pending >>= softirq_bit;
- }
-
- rcu_bh_qs();
- local_irq_disable();
+ handle_pending_softirqs(pending);
pending = local_softirq_pending();
if (pending) {
@@ -339,6 +480,309 @@ asmlinkage __visible void do_softirq(void)
}
/*
+ * This function must run with irqs disabled!
+ */
+void raise_softirq_irqoff(unsigned int nr)
+{
+ __raise_softirq_irqoff(nr);
+
+ /*
+ * If we're in an interrupt or softirq, we're done
+ * (this also catches softirq-disabled code). We will
+ * actually run the softirq once we return from
+ * the irq or softirq.
+ *
+ * Otherwise we wake up ksoftirqd to make sure we
+ * schedule the softirq soon.
+ */
+ if (!in_interrupt())
+ wakeup_softirqd();
+}
+
+void __raise_softirq_irqoff(unsigned int nr)
+{
+ trace_softirq_raise(nr);
+ or_softirq_pending(1UL << nr);
+}
+
+static inline void local_bh_disable_nort(void) { local_bh_disable(); }
+static inline void _local_bh_enable_nort(void) { _local_bh_enable(); }
+static void ksoftirqd_set_sched_params(unsigned int cpu) { }
+
+#else /* !PREEMPT_RT_FULL */
+
+/*
+ * On RT we serialize softirq execution with a cpu local lock per softirq
+ */
+static DEFINE_PER_CPU(struct local_irq_lock [NR_SOFTIRQS], local_softirq_locks);
+
+void __init softirq_early_init(void)
+{
+ int i;
+
+ for (i = 0; i < NR_SOFTIRQS; i++)
+ local_irq_lock_init(local_softirq_locks[i]);
+}
+
+static void lock_softirq(int which)
+{
+ local_lock(local_softirq_locks[which]);
+}
+
+static void unlock_softirq(int which)
+{
+ local_unlock(local_softirq_locks[which]);
+}
+
+static void do_single_softirq(int which)
+{
+ unsigned long old_flags = current->flags;
+
+ current->flags &= ~PF_MEMALLOC;
+ vtime_account_irq_enter(current);
+ current->flags |= PF_IN_SOFTIRQ;
+ lockdep_softirq_enter();
+ local_irq_enable();
+ handle_softirq(which);
+ local_irq_disable();
+ lockdep_softirq_exit();
+ current->flags &= ~PF_IN_SOFTIRQ;
+ vtime_account_irq_enter(current);
+ current_restore_flags(old_flags, PF_MEMALLOC);
+}
+
+/*
+ * Called with interrupts disabled. Process softirqs which were raised
+ * in current context (or on behalf of ksoftirqd).
+ */
+static void do_current_softirqs(void)
+{
+ while (current->softirqs_raised) {
+ int i = __ffs(current->softirqs_raised);
+ unsigned int pending, mask = (1U << i);
+
+ current->softirqs_raised &= ~mask;
+ local_irq_enable();
+
+ /*
+ * If the lock is contended, we boost the owner to
+ * process the softirq or leave the critical section
+ * now.
+ */
+ lock_softirq(i);
+ local_irq_disable();
+ softirq_set_runner(i);
+ /*
+ * Check with the local_softirq_pending() bits,
+ * whether we need to process this still or if someone
+ * else took care of it.
+ */
+ pending = local_softirq_pending();
+ if (pending & mask) {
+ set_softirq_pending(pending & ~mask);
+ do_single_softirq(i);
+ }
+ softirq_clr_runner(i);
+ WARN_ON(current->softirq_nestcnt != 1);
+ local_irq_enable();
+ unlock_softirq(i);
+ local_irq_disable();
+ }
+}
+
+void __local_bh_disable(void)
+{
+ if (++current->softirq_nestcnt == 1)
+ migrate_disable();
+}
+EXPORT_SYMBOL(__local_bh_disable);
+
+void __local_bh_enable(void)
+{
+ if (WARN_ON(current->softirq_nestcnt == 0))
+ return;
+
+ local_irq_disable();
+ if (current->softirq_nestcnt == 1 && current->softirqs_raised)
+ do_current_softirqs();
+ local_irq_enable();
+
+ if (--current->softirq_nestcnt == 0)
+ migrate_enable();
+}
+EXPORT_SYMBOL(__local_bh_enable);
+
+void _local_bh_enable(void)
+{
+ if (WARN_ON(current->softirq_nestcnt == 0))
+ return;
+ if (--current->softirq_nestcnt == 0)
+ migrate_enable();
+}
+EXPORT_SYMBOL(_local_bh_enable);
+
+int in_serving_softirq(void)
+{
+ return current->flags & PF_IN_SOFTIRQ;
+}
+EXPORT_SYMBOL(in_serving_softirq);
+
+/* Called with preemption disabled */
+static void run_ksoftirqd(unsigned int cpu)
+{
+ local_irq_disable();
+ current->softirq_nestcnt++;
+
+ do_current_softirqs();
+ current->softirq_nestcnt--;
+ local_irq_enable();
+ cond_resched();
+}
+
+/*
+ * Called from netif_rx_ni(). Preemption enabled, but migration
+ * disabled. So the cpu can't go away under us.
+ */
+void thread_do_softirq(void)
+{
+ if (!in_serving_softirq() && current->softirqs_raised) {
+ current->softirq_nestcnt++;
+ do_current_softirqs();
+ current->softirq_nestcnt--;
+ }
+}
+
+static void do_raise_softirq_irqoff(unsigned int nr)
+{
+ unsigned int mask;
+
+ mask = 1UL << nr;
+
+ trace_softirq_raise(nr);
+ or_softirq_pending(mask);
+
+ /*
+ * If we are not in a hard interrupt and inside a bh disabled
+ * region, we simply raise the flag on current. local_bh_enable()
+ * will make sure that the softirq is executed. Otherwise we
+ * delegate it to ksoftirqd.
+ */
+ if (!in_irq() && current->softirq_nestcnt)
+ current->softirqs_raised |= mask;
+ else if (!__this_cpu_read(ksoftirqd) || !__this_cpu_read(ktimer_softirqd))
+ return;
+
+ if (mask & TIMER_SOFTIRQS)
+ __this_cpu_read(ktimer_softirqd)->softirqs_raised |= mask;
+ else
+ __this_cpu_read(ksoftirqd)->softirqs_raised |= mask;
+}
+
+static void wakeup_proper_softirq(unsigned int nr)
+{
+ if ((1UL << nr) & TIMER_SOFTIRQS)
+ wakeup_timer_softirqd();
+ else
+ wakeup_softirqd();
+}
+
+void __raise_softirq_irqoff(unsigned int nr)
+{
+ do_raise_softirq_irqoff(nr);
+ if (!in_irq() && !current->softirq_nestcnt)
+ wakeup_proper_softirq(nr);
+}
+
+/*
+ * Same as __raise_softirq_irqoff() but will process them in ksoftirqd
+ */
+void __raise_softirq_irqoff_ksoft(unsigned int nr)
+{
+ unsigned int mask;
+
+ if (WARN_ON_ONCE(!__this_cpu_read(ksoftirqd) ||
+ !__this_cpu_read(ktimer_softirqd)))
+ return;
+ mask = 1UL << nr;
+
+ trace_softirq_raise(nr);
+ or_softirq_pending(mask);
+ if (mask & TIMER_SOFTIRQS)
+ __this_cpu_read(ktimer_softirqd)->softirqs_raised |= mask;
+ else
+ __this_cpu_read(ksoftirqd)->softirqs_raised |= mask;
+ wakeup_proper_softirq(nr);
+}
+
+/*
+ * This function must run with irqs disabled!
+ */
+void raise_softirq_irqoff(unsigned int nr)
+{
+ do_raise_softirq_irqoff(nr);
+
+ /*
+ * If we're in an hard interrupt we let irq return code deal
+ * with the wakeup of ksoftirqd.
+ */
+ if (in_irq())
+ return;
+ /*
+ * If we are in thread context but outside of a bh disabled
+ * region, we need to wake ksoftirqd as well.
+ *
+ * CHECKME: Some of the places which do that could be wrapped
+ * into local_bh_disable/enable pairs. Though it's unclear
+ * whether this is worth the effort. To find those places just
+ * raise a WARN() if the condition is met.
+ */
+ if (!current->softirq_nestcnt)
+ wakeup_proper_softirq(nr);
+}
+
+static inline int ksoftirqd_softirq_pending(void)
+{
+ return current->softirqs_raised;
+}
+
+static inline void local_bh_disable_nort(void) { }
+static inline void _local_bh_enable_nort(void) { }
+
+static inline void ksoftirqd_set_sched_params(unsigned int cpu)
+{
+ /* Take over all but timer pending softirqs when starting */
+ local_irq_disable();
+ current->softirqs_raised = local_softirq_pending() & ~TIMER_SOFTIRQS;
+ local_irq_enable();
+}
+
+static inline void ktimer_softirqd_set_sched_params(unsigned int cpu)
+{
+ struct sched_param param = { .sched_priority = 1 };
+
+ sched_setscheduler(current, SCHED_FIFO, &param);
+
+ /* Take over timer pending softirqs when starting */
+ local_irq_disable();
+ current->softirqs_raised = local_softirq_pending() & TIMER_SOFTIRQS;
+ local_irq_enable();
+}
+
+static inline void ktimer_softirqd_clr_sched_params(unsigned int cpu,
+ bool online)
+{
+ struct sched_param param = { .sched_priority = 0 };
+
+ sched_setscheduler(current, SCHED_NORMAL, &param);
+}
+
+static int ktimer_softirqd_should_run(unsigned int cpu)
+{
+ return current->softirqs_raised;
+}
+
+#endif /* PREEMPT_RT_FULL */
+/*
* Enter an interrupt context.
*/
void irq_enter(void)
@@ -349,9 +793,9 @@ void irq_enter(void)
* Prevent raise_softirq from needlessly waking up ksoftirqd
* here, as softirq will be serviced on return from interrupt.
*/
- local_bh_disable();
+ local_bh_disable_nort();
tick_irq_enter();
- _local_bh_enable();
+ _local_bh_enable_nort();
}
__irq_enter();
@@ -359,6 +803,7 @@ void irq_enter(void)
static inline void invoke_softirq(void)
{
+#ifndef CONFIG_PREEMPT_RT_FULL
if (ksoftirqd_running(local_softirq_pending()))
return;
@@ -381,6 +826,18 @@ static inline void invoke_softirq(void)
} else {
wakeup_softirqd();
}
+#else /* PREEMPT_RT_FULL */
+ unsigned long flags;
+
+ local_irq_save(flags);
+ if (__this_cpu_read(ksoftirqd) &&
+ __this_cpu_read(ksoftirqd)->softirqs_raised)
+ wakeup_softirqd();
+ if (__this_cpu_read(ktimer_softirqd) &&
+ __this_cpu_read(ktimer_softirqd)->softirqs_raised)
+ wakeup_timer_softirqd();
+ local_irq_restore(flags);
+#endif
}
static inline void tick_irq_exit(void)
@@ -416,26 +873,6 @@ void irq_exit(void)
trace_hardirq_exit(); /* must be last! */
}
-/*
- * This function must run with irqs disabled!
- */
-inline void raise_softirq_irqoff(unsigned int nr)
-{
- __raise_softirq_irqoff(nr);
-
- /*
- * If we're in an interrupt or softirq, we're done
- * (this also catches softirq-disabled code). We will
- * actually run the softirq once we return from
- * the irq or softirq.
- *
- * Otherwise we wake up ksoftirqd to make sure we
- * schedule the softirq soon.
- */
- if (!in_interrupt())
- wakeup_softirqd();
-}
-
void raise_softirq(unsigned int nr)
{
unsigned long flags;
@@ -445,12 +882,6 @@ void raise_softirq(unsigned int nr)
local_irq_restore(flags);
}
-void __raise_softirq_irqoff(unsigned int nr)
-{
- trace_softirq_raise(nr);
- or_softirq_pending(1UL << nr);
-}
-
void open_softirq(int nr, void (*action)(struct softirq_action *))
{
softirq_vec[nr].action = action;
@@ -475,11 +906,38 @@ static void __tasklet_schedule_common(struct tasklet_struct *t,
unsigned long flags;
local_irq_save(flags);
+ if (!tasklet_trylock(t)) {
+ local_irq_restore(flags);
+ return;
+ }
+
head = this_cpu_ptr(headp);
- t->next = NULL;
- *head->tail = t;
- head->tail = &(t->next);
- raise_softirq_irqoff(softirq_nr);
+again:
+ /* We may have been preempted before tasklet_trylock
+ * and __tasklet_action may have already run.
+ * So double check the sched bit while the takslet
+ * is locked before adding it to the list.
+ */
+ if (test_bit(TASKLET_STATE_SCHED, &t->state)) {
+ t->next = NULL;
+ *head->tail = t;
+ head->tail = &(t->next);
+ raise_softirq_irqoff(softirq_nr);
+ tasklet_unlock(t);
+ } else {
+ /* This is subtle. If we hit the corner case above
+ * It is possible that we get preempted right here,
+ * and another task has successfully called
+ * tasklet_schedule(), then this function, and
+ * failed on the trylock. Thus we must be sure
+ * before releasing the tasklet lock, that the
+ * SCHED_BIT is clear. Otherwise the tasklet
+ * may get its SCHED_BIT set, but not added to the
+ * list
+ */
+ if (!tasklet_tryunlock(t))
+ goto again;
+ }
local_irq_restore(flags);
}
@@ -497,11 +955,21 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
}
EXPORT_SYMBOL(__tasklet_hi_schedule);
+void tasklet_enable(struct tasklet_struct *t)
+{
+ if (!atomic_dec_and_test(&t->count))
+ return;
+ if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state))
+ tasklet_schedule(t);
+}
+EXPORT_SYMBOL(tasklet_enable);
+
static void tasklet_action_common(struct softirq_action *a,
struct tasklet_head *tl_head,
unsigned int softirq_nr)
{
struct tasklet_struct *list;
+ int loops = 1000000;
local_irq_disable();
list = tl_head->head;
@@ -513,25 +981,56 @@ static void tasklet_action_common(struct softirq_action *a,
struct tasklet_struct *t = list;
list = list->next;
+ /*
+ * Should always succeed - after a tasklist got on the
+ * list (after getting the SCHED bit set from 0 to 1),
+ * nothing but the tasklet softirq it got queued to can
+ * lock it:
+ */
+ if (!tasklet_trylock(t)) {
+ WARN_ON(1);
+ continue;
+ }
+
+ t->next = NULL;
- if (tasklet_trylock(t)) {
- if (!atomic_read(&t->count)) {
- if (!test_and_clear_bit(TASKLET_STATE_SCHED,
- &t->state))
- BUG();
- t->func(t->data);
+ if (unlikely(atomic_read(&t->count))) {
+out_disabled:
+ /* implicit unlock: */
+ wmb();
+ t->state = TASKLET_STATEF_PENDING;
+ continue;
+ }
+ /*
+ * After this point on the tasklet might be rescheduled
+ * on another CPU, but it can only be added to another
+ * CPU's tasklet list if we unlock the tasklet (which we
+ * dont do yet).
+ */
+ if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
+ WARN_ON(1);
+again:
+ t->func(t->data);
+
+ while (!tasklet_tryunlock(t)) {
+ /*
+ * If it got disabled meanwhile, bail out:
+ */
+ if (atomic_read(&t->count))
+ goto out_disabled;
+ /*
+ * If it got scheduled meanwhile, re-execute
+ * the tasklet function:
+ */
+ if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
+ goto again;
+ if (!--loops) {
+ printk("hm, tasklet state: %08lx\n", t->state);
+ WARN_ON(1);
tasklet_unlock(t);
- continue;
+ break;
}
- tasklet_unlock(t);
}
-
- local_irq_disable();
- t->next = NULL;
- *tl_head->tail = t;
- tl_head->tail = &t->next;
- __raise_softirq_irqoff(softirq_nr);
- local_irq_enable();
}
}
@@ -563,7 +1062,7 @@ void tasklet_kill(struct tasklet_struct *t)
while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
do {
- yield();
+ msleep(1);
} while (test_bit(TASKLET_STATE_SCHED, &t->state));
}
tasklet_unlock_wait(t);
@@ -637,25 +1136,26 @@ void __init softirq_init(void)
open_softirq(HI_SOFTIRQ, tasklet_hi_action);
}
-static int ksoftirqd_should_run(unsigned int cpu)
-{
- return local_softirq_pending();
-}
-
-static void run_ksoftirqd(unsigned int cpu)
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
+void tasklet_unlock_wait(struct tasklet_struct *t)
{
- local_irq_disable();
- if (local_softirq_pending()) {
+ while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
/*
- * We can safely run softirq on inline stack, as we are not deep
- * in the task stack here.
+ * Hack for now to avoid this busy-loop:
*/
- __do_softirq();
- local_irq_enable();
- cond_resched();
- return;
+#ifdef CONFIG_PREEMPT_RT_FULL
+ msleep(1);
+#else
+ barrier();
+#endif
}
- local_irq_enable();
+}
+EXPORT_SYMBOL(tasklet_unlock_wait);
+#endif
+
+static int ksoftirqd_should_run(unsigned int cpu)
+{
+ return ksoftirqd_softirq_pending();
}
#ifdef CONFIG_HOTPLUG_CPU
@@ -722,17 +1222,31 @@ static int takeover_tasklets(unsigned int cpu)
static struct smp_hotplug_thread softirq_threads = {
.store = &ksoftirqd,
+ .setup = ksoftirqd_set_sched_params,
.thread_should_run = ksoftirqd_should_run,
.thread_fn = run_ksoftirqd,
.thread_comm = "ksoftirqd/%u",
};
+#ifdef CONFIG_PREEMPT_RT_FULL
+static struct smp_hotplug_thread softirq_timer_threads = {
+ .store = &ktimer_softirqd,
+ .setup = ktimer_softirqd_set_sched_params,
+ .cleanup = ktimer_softirqd_clr_sched_params,
+ .thread_should_run = ktimer_softirqd_should_run,
+ .thread_fn = run_ksoftirqd,
+ .thread_comm = "ktimersoftd/%u",
+};
+#endif
+
static __init int spawn_ksoftirqd(void)
{
cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
takeover_tasklets);
BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
-
+#ifdef CONFIG_PREEMPT_RT_FULL
+ BUG_ON(smpboot_register_percpu_thread(&softirq_timer_threads));
+#endif
return 0;
}
early_initcall(spawn_ksoftirqd);