@@ -767,7 +767,10 @@ void ipi_cpu_backtrace(struct pt_regs *regs)
/* Replace printk to write into the NMI seq */
this_cpu_write(printk_func, nmi_vprintk);
printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
- show_regs(regs);
+ if (regs != NULL)
+ show_regs(regs);
+ else
+ dump_stack();
this_cpu_write(printk_func, printk_func_save);
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
@@ -812,6 +815,16 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
seq_buf_init(&s->seq, s->buffer, NMI_BUF_SIZE);
}
+ /*
+ * If irqs are disabled on the current processor then, if
+ * IPI_CPU_BACKTRACE is delivered using IRQ, we will won't be able to
+ * react to IPI_CPU_BACKTRACE until we leave this function. We avoid
+ * the potential timeout (not to mention the failure to print useful
+ * information) by calling the backtrace directly.
+ */
+ if (include_self && irqs_disabled())
+ ipi_cpu_backtrace(in_interrupt() ? get_irq_regs() : NULL);
+
if (!cpumask_empty(to_cpumask(backtrace_mask))) {
pr_info("Sending NMI to %s CPUs:\n",
(include_self ? "all" : "other"));
Currently if arch_trigger_all_cpu_backtrace() is called with interrupts disabled and on a platform the delivers IPI_CPU_BACKTRACE using regular IRQ requests the system will wedge for ten seconds waiting for the current CPU to react to a masked interrupt. This patch resolves this issue by calling directly into the backtrace dump code instead of generating an IPI. Signed-off-by: Daniel Thompson <daniel.thompson@linaro.org> Cc: Steven Rostedt <rostedt@goodmis.org> --- arch/arm/kernel/smp.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-)