Message ID | 20170420131154.GL3452@pathway.suse.cz (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On (04/20/17 15:11), Petr Mladek wrote: [..] > void printk_nmi_enter(void) > { > - this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK); > + /* > + * The size of the extra per-CPU buffer is limited. Use it > + * only when really needed. > + */ > + if (this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK || > + raw_spin_is_locked(&logbuf_lock)) { can we please have && here? [..] > diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c > index 4e8a30d1c22f..0bc0a3535a8a 100644 > --- a/lib/nmi_backtrace.c > +++ b/lib/nmi_backtrace.c > @@ -86,9 +86,11 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask, > > bool nmi_cpu_backtrace(struct pt_regs *regs) > { > + static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED; > int cpu = smp_processor_id(); > > if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { > + arch_spin_lock(&lock); > if (regs && cpu_in_idle(instruction_pointer(regs))) { > pr_warn("NMI backtrace for cpu %d skipped: idling at pc %#lx\n", > cpu, instruction_pointer(regs)); > @@ -99,6 +101,7 @@ bool nmi_cpu_backtrace(struct pt_regs *regs) > else > dump_stack(); > } > + arch_spin_unlock(&lock); > cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); > return true; > } can the nmi_backtrace part be a patch on its own? -ss -- To unsubscribe from this list: send the line "unsubscribe linux-sh" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On Fri 2017-04-28 10:25:30, Sergey Senozhatsky wrote: > > On (04/20/17 15:11), Petr Mladek wrote: > [..] > > void printk_nmi_enter(void) > > { > > - this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK); > > + /* > > + * The size of the extra per-CPU buffer is limited. Use it > > + * only when really needed. > > + */ > > + if (this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK || > > + raw_spin_is_locked(&logbuf_lock)) { > > can we please have && here? OK, it sounds reasonable after all. > [..] > > diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c > > index 4e8a30d1c22f..0bc0a3535a8a 100644 > > --- a/lib/nmi_backtrace.c > > +++ b/lib/nmi_backtrace.c > > @@ -86,9 +86,11 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask, > > > > bool nmi_cpu_backtrace(struct pt_regs *regs) > > { > > + static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED; > > int cpu = smp_processor_id(); > > > > if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { > > + arch_spin_lock(&lock); > > if (regs && cpu_in_idle(instruction_pointer(regs))) { > > pr_warn("NMI backtrace for cpu %d skipped: idling at pc %#lx\n", > > cpu, instruction_pointer(regs)); > > @@ -99,6 +101,7 @@ bool nmi_cpu_backtrace(struct pt_regs *regs) > > else > > dump_stack(); > > } > > + arch_spin_unlock(&lock); > > cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); > > return true; > > } > > can the nmi_backtrace part be a patch on its own? I would prefer to keep it in the same patch. The backtrace from all CPUs is completely unusable when all CPUs push to the global log buffer in parallel. Single patch might safe hair of some poor bisectors. Best Regards, Petr -- To unsubscribe from this list: send the line "unsubscribe linux-sh" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
diff --git a/kernel/printk/internal.h b/kernel/printk/internal.h index 1db044f808b7..2a7d04049af4 100644 --- a/kernel/printk/internal.h +++ b/kernel/printk/internal.h @@ -18,12 +18,14 @@ #ifdef CONFIG_PRINTK -#define PRINTK_SAFE_CONTEXT_MASK 0x7fffffff -#define PRINTK_NMI_CONTEXT_MASK 0x80000000 +#define PRINTK_SAFE_CONTEXT_MASK 0x3fffffff +#define PRINTK_NMI_DEFERRED_CONTEXT_MASK 0x40000000 +#define PRINTK_NMI_CONTEXT_MASK 0x80000000 extern raw_spinlock_t logbuf_lock; __printf(1, 0) int vprintk_default(const char *fmt, va_list args); +__printf(1, 0) int vprintk_deferred(const char *fmt, va_list args); __printf(1, 0) int vprintk_func(const char *fmt, va_list args); void __printk_safe_enter(void); void __printk_safe_exit(void); diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 2984fb0f0257..16b519927d35 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -2691,16 +2691,13 @@ void wake_up_klogd(void) preempt_enable(); } -int printk_deferred(const char *fmt, ...) +int vprintk_deferred(const char *fmt, va_list args) { - va_list args; int r; - preempt_disable(); - va_start(args, fmt); r = vprintk_emit(0, LOGLEVEL_SCHED, NULL, 0, fmt, args); - va_end(args); + preempt_disable(); __this_cpu_or(printk_pending, PRINTK_PENDING_OUTPUT); irq_work_queue(this_cpu_ptr(&wake_up_klogd_work)); preempt_enable(); @@ -2708,6 +2705,18 @@ int printk_deferred(const char *fmt, ...) return r; } +int printk_deferred(const char *fmt, ...) +{ + va_list args; + int r; + + va_start(args, fmt); + r = vprintk_deferred(fmt, args); + va_end(args); + + return r; +} + /* * printk rate limiting, lifted from the networking subsystem. * diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c index 033e50a7d706..c3d165bcde42 100644 --- a/kernel/printk/printk_safe.c +++ b/kernel/printk/printk_safe.c @@ -308,12 +308,23 @@ static int vprintk_nmi(const char *fmt, va_list args) void printk_nmi_enter(void) { - this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK); + /* + * The size of the extra per-CPU buffer is limited. Use it + * only when really needed. + */ + if (this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK || + raw_spin_is_locked(&logbuf_lock)) { + this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK); + } else { + this_cpu_or(printk_context, PRINTK_NMI_DEFERRED_CONTEXT_MASK); + } } void printk_nmi_exit(void) { - this_cpu_and(printk_context, ~PRINTK_NMI_CONTEXT_MASK); + this_cpu_and(printk_context, + ~(PRINTK_NMI_CONTEXT_MASK || + PRINTK_NMI_DEFERRED_CONTEXT_MASK)); } #else @@ -351,12 +362,22 @@ void __printk_safe_exit(void) __printf(1, 0) int vprintk_func(const char *fmt, va_list args) { + /* Use extra buffer in NMI when logbuf_lock is taken or in safe mode. */ if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK) return vprintk_nmi(fmt, args); + /* Use extra buffer to prevent a recursion deadlock in safe mode. */ if (this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK) return vprintk_safe(fmt, args); + /* + * Use the main logbuf when logbuf_lock is available in NMI. + * But avoid calling console drivers that might have their own locks. + */ + if (this_cpu_read(printk_context) & PRINTK_NMI_DEFERRED_CONTEXT_MASK) + return vprintk_deferred(fmt, args); + + /* No obstacles. */ return vprintk_default(fmt, args); } diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c index 4e8a30d1c22f..0bc0a3535a8a 100644 --- a/lib/nmi_backtrace.c +++ b/lib/nmi_backtrace.c @@ -86,9 +86,11 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask, bool nmi_cpu_backtrace(struct pt_regs *regs) { + static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED; int cpu = smp_processor_id(); if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { + arch_spin_lock(&lock); if (regs && cpu_in_idle(instruction_pointer(regs))) { pr_warn("NMI backtrace for cpu %d skipped: idling at pc %#lx\n", cpu, instruction_pointer(regs)); @@ -99,6 +101,7 @@ bool nmi_cpu_backtrace(struct pt_regs *regs) else dump_stack(); } + arch_spin_unlock(&lock); cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); return true; }