aboutsummaryrefslogtreecommitdiffstats
path: root/lib/nmi_backtrace.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/nmi_backtrace.c')
-rw-r--r--lib/nmi_backtrace.c23
1 files changed, 18 insertions, 5 deletions
diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c
index 15ca78e1c7d4..33c154264bfe 100644
--- a/lib/nmi_backtrace.c
+++ b/lib/nmi_backtrace.c
@@ -34,7 +34,7 @@ static unsigned long backtrace_flag;
* they are passed being updated as a side effect of this call.
*/
void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
- bool exclude_self,
+ int exclude_cpu,
void (*raise)(cpumask_t *mask))
{
int i, this_cpu = get_cpu();
@@ -49,8 +49,8 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
}
cpumask_copy(to_cpumask(backtrace_mask), mask);
- if (exclude_self)
- cpumask_clear_cpu(this_cpu, to_cpumask(backtrace_mask));
+ if (exclude_cpu != -1)
+ cpumask_clear_cpu(exclude_cpu, to_cpumask(backtrace_mask));
/*
* Don't try to send an NMI to this cpu; it may work on some
@@ -64,6 +64,7 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
if (!cpumask_empty(to_cpumask(backtrace_mask))) {
pr_info("Sending NMI from CPU %d to CPUs %*pbl:\n",
this_cpu, nr_cpumask_bits, to_cpumask(backtrace_mask));
+ nmi_backtrace_stall_snap(to_cpumask(backtrace_mask));
raise(to_cpumask(backtrace_mask));
}
@@ -74,23 +75,34 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
mdelay(1);
touch_softlockup_watchdog();
}
+ nmi_backtrace_stall_check(to_cpumask(backtrace_mask));
/*
* Force flush any remote buffers that might be stuck in IRQ context
* and therefore could not run their irq_work.
*/
- printk_safe_flush();
+ printk_trigger_flush();
clear_bit_unlock(0, &backtrace_flag);
put_cpu();
}
+// Dump stacks even for idle CPUs.
+static bool backtrace_idle;
+module_param(backtrace_idle, bool, 0644);
+
bool nmi_cpu_backtrace(struct pt_regs *regs)
{
int cpu = smp_processor_id();
+ unsigned long flags;
if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
- if (regs && cpu_in_idle(instruction_pointer(regs))) {
+ /*
+ * Allow nested NMI backtraces while serializing
+ * against other CPUs.
+ */
+ printk_cpu_sync_get_irqsave(flags);
+ if (!READ_ONCE(backtrace_idle) && regs && cpu_in_idle(instruction_pointer(regs))) {
pr_warn("NMI backtrace for cpu %d skipped: idling at %pS\n",
cpu, (void *)instruction_pointer(regs));
} else {
@@ -100,6 +112,7 @@ bool nmi_cpu_backtrace(struct pt_regs *regs)
else
dump_stack();
}
+ printk_cpu_sync_put_irqrestore(flags);
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
return true;
}