@@ -36,6 +36,7 @@
* were handled or when IRQs are blocked.
*/
DEFINE_PER_CPU(printk_func_t, printk_func) = vprintk_default;
+static int printk_nmi_irq_ready;
struct nmi_seq_buf {
atomic_t len; /* length of written data */
@@ -80,8 +81,11 @@ again:
goto again;
/* Get flushed in a more safe context. */
- if (add)
+ if (add && printk_nmi_irq_ready) {
+ /* Make sure that IRQ work is really initialized. */
+ smp_rmb();
irq_work_queue(&s->work);
+ }
return add;
}
@@ -187,6 +191,13 @@ void __init printk_nmi_init(void)
init_irq_work(&s->work, __printk_nmi_flush);
}
+
+ /* Make sure that IRQ works are initialized before enabling. */
+ smp_wmb();
+ printk_nmi_irq_ready = 1;
+
+ /* Flush pending messages that did not have scheduled IRQ works. */
+ printk_nmi_flush();
}
void printk_nmi_enter(void)
NMIs could happen at any time. This patch makes sure that the safe printk() in NMI will schedule IRQ work only when the related structs are initialized. All pending messages are flushed when the IRQ work is being initialized. Signed-off-by: Petr Mladek <pmladek@suse.com> --- kernel/printk/nmi.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-)