@@ -12,6 +12,17 @@ config DEBUG_RODATA
portion of the kernel code won't be covered by a TLB anymore.
If in doubt, say "N".
+config DEBUG_STACKOVERFLOW
+ bool "Check for stack overflows"
+ depends on DEBUG_KERNEL
+ ---help---
+ Say Y here if you want to check the overflows of kernel, IRQ
+ and exception stacks. This option will cause messages of the
+ stacks in detail when free stack space drops below a certain
+ limit.
+ If in doubt, say "N".
+
+
config DEBUG_STRICT_USER_COPY_CHECKS
bool "Strict copy size checks"
depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
@@ -10,6 +10,8 @@
#include <linux/cpumask.h>
#include <asm/types.h>
+#define __ARCH_HAS_DO_SOFTIRQ
+
#define NO_IRQ (-1)
#ifdef CONFIG_GSC
@@ -20,8 +20,6 @@
#endif /* __ASSEMBLY__ */
-#define KERNEL_STACK_SIZE (4*PAGE_SIZE)
-
/*
* Default implementation of macro that returns current
* instruction pointer ("program counter").
@@ -33,6 +31,8 @@
#endif
#define current_text_addr() ({ void *pc; current_ia(pc); pc; })
+#define get_current_sp(sp) __asm__("copy %%r30, %0" : "=r"(sp))
+
#define TASK_SIZE_OF(tsk) ((tsk)->thread.task_size)
#define TASK_SIZE TASK_SIZE_OF(current)
#define TASK_UNMAPPED_BASE (current->thread.map_base)
@@ -61,6 +61,20 @@
#ifndef __ASSEMBLY__
/*
+ * IRQ STACK - used for irq and irq bh handler
+ */
+#ifdef __KERNEL__
+
+#define IRQ_STACK_SIZE (4096 << 2) /* = 16k, todo: use: PAGE_SIZE instead of 4096 */
+
+union irq_stack_union {
+ unsigned long irq_stack[IRQ_STACK_SIZE/sizeof(unsigned long)];
+};
+
+DECLARE_PER_CPU(union irq_stack_union, irq_stack_union);
+#endif /* __KERNEL__ */
+
+/*
* Data detected about CPUs at boot time which is the same for all CPU's.
* HP boxes are SMP - ie identical processors.
*
@@ -40,7 +40,7 @@ struct thread_info {
/* thread information allocation */
-#define THREAD_SIZE_ORDER 2
+#define THREAD_SIZE_ORDER 2 /* keep value 2 for 16k, use 3 for 32k */
/* Be sure to hunt all references to this down when you change the size of
* the kernel stack */
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
@@ -1997,6 +1997,28 @@ ftrace_stub:
ENDPROC(return_to_handler)
#endif /* CONFIG_FUNCTION_TRACER */
+/* void call_on_stack(unsigned long param1, void *func, unsigned long new_stack) */
+ENTRY(call_on_stack)
+ STREG %sp, 8(%arg2)
+ STREG %rp, 16(%arg2)
+
+ /* HPPA calling convention for function pointers */
+#ifdef CONFIG_64BIT
+ LDREG 16(%arg1), %arg1
+ bve,l (%arg1), %rp
+ addi 0x40, %arg2, %sp
+#else
+ addi 0x40, %arg2, %sp
+ be,l 0(%sr4,%arg1), %sr0, %r31
+ copy %r31, %rp
+#endif
+
+ addi -0x40, %sp, %sp
+ LDREG 16(%sp),%rp
+ bv (%rp)
+ LDREG 8(%sp),%sp
+ENDPROC(call_on_stack)
+
get_register:
/*
@@ -330,6 +330,60 @@ static inline int eirr_to_irq(unsigned long eirr)
return (BITS_PER_LONG - bit) + TIMER_IRQ;
}
+
+int sysctl_panic_on_stackoverflow __read_mostly;
+
+/*
+ * Stack overflow check:
+ */
+static inline void stack_overflow_check(unsigned long sp, unsigned long stack_start,
+ unsigned long stack_size, const char *stackname)
+{
+#ifdef CONFIG_DEBUG_STACKOVERFLOW
+#define STACK_MARGIN 128
+ if (likely((sp - stack_start) < (stack_size - STACK_MARGIN)))
+ return;
+
+ WARN("stackcheck: %s has overflown the %s stack (sp:%lx, stk bottom-top:%lx-%lx)\n",
+ current->comm, stackname, sp,
+ stack_start, stack_start + stack_size);
+
+ if (sysctl_panic_on_stackoverflow)
+ panic("low stack detected by irq handler - check messages\n");
+#endif
+}
+
+extern void call_on_stack(unsigned long param1, void *func, unsigned long new_stack); /* in entry.S */
+
+
+static void noinline execute_on_irq_stack(void *func, unsigned long param1)
+{
+ int cpu = smp_processor_id();
+ unsigned long sp, irq_stack;
+ void (*direct_call)(unsigned long param1) = func;
+
+ irq_stack = (unsigned long) &per_cpu(irq_stack_union, cpu);
+ get_current_sp(sp);
+
+ /*
+ * this is where we try to switch to the IRQ stack. However, if we are
+ * already using the IRQ stack (because we interrupted a hardirq
+ * handler) we can't do that and just have to keep using the
+ * current stack (which is the irq stack already after all)
+ */
+
+ if ((sp - irq_stack) >= IRQ_STACK_SIZE) {
+ stack_overflow_check(sp, (unsigned long)task_stack_page(current), THREAD_SIZE, "kernel");
+ call_on_stack(param1, func, irq_stack);
+ // WARN_ON_ONCE(1); /* enable to check if irq stack is being used. */
+ // TODO: check if backtrace works from irq stack
+ // TODO: use get_current_sp() macro in other code as well.
+ } else {
+ stack_overflow_check(sp, irq_stack, IRQ_STACK_SIZE, "irq");
+ direct_call(param1);
+ }
+}
+
/* ONLY called from entry.S:intr_extint() */
void do_cpu_irq_mask(struct pt_regs *regs)
{
@@ -364,7 +418,7 @@ void do_cpu_irq_mask(struct pt_regs *regs)
goto set_out;
}
#endif
- generic_handle_irq(irq);
+ execute_on_irq_stack(&generic_handle_irq, irq);
out:
irq_exit();
@@ -423,3 +477,24 @@ void __init init_IRQ(void)
}
+DEFINE_PER_CPU(union irq_stack_union, irq_stack_union);
+
+asmlinkage void do_softirq(void)
+{
+ __u32 pending;
+ unsigned long flags;
+
+ if (in_interrupt())
+ return;
+
+ local_irq_save(flags);
+
+ pending = local_softirq_pending();
+
+ /* Switch to interrupt stack */
+ if (pending) {
+ execute_on_irq_stack(&__do_softirq, 0);
+ WARN_ON_ONCE(softirq_count());
+ }
+ local_irq_restore(flags);
+}