Message ID | 20220919084533.42318-1-liaochang1@huawei.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | kprobes: Mark ftrace mcount handler functions nokprobe | expand |
On Mon, 19 Sep 2022 16:45:33 +0800 Liao Chang <liaochang1@huawei.com> wrote: > Mark ftrace mcount handler functions nokprobe since probing on these > functions probably reaches mcount recursivly during kprobe breakpoint > handler for some architecture(tested for riscv, arm64), and reenter > kprobe is treated as a fatal error, causes kernel panic. This looks to me that the affected archs should be made more robust for this case than to add this to the generic code. -- Steve > > Pesudo code below demonstrate this problem: > > mcount > function_trace_call (probed) > arch_breakpoint_handler > arch_setup_singlestep [mcount] > function_trace_call (probed) > arch_breakpoint_handler > reenter_kprobe > BUG > > Signed-off-by: Liao Chang <liaochang1@huawei.com> > --- > kernel/trace/trace_functions.c | 5 +++++ > 1 file changed, 5 insertions(+) > > diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c > index 9f1bfbe105e8..440a678a8c7c 100644 > --- a/kernel/trace/trace_functions.c > +++ b/kernel/trace/trace_functions.c > @@ -16,6 +16,7 @@ > #include <linux/ftrace.h> > #include <linux/slab.h> > #include <linux/fs.h> > +#include <linux/kprobes.h> > > #include "trace.h" > > @@ -194,6 +195,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip, > > ftrace_test_recursion_unlock(bit); > } > +NOKPROBE_SYMBOL(function_trace_call); > > #ifdef CONFIG_UNWINDER_ORC > /* > @@ -245,6 +247,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip, > atomic_dec(&data->disabled); > local_irq_restore(flags); > } > +NOKPROBE_SYMBOL(function_stack_trace_call); > > static inline bool is_repeat_check(struct trace_array *tr, > struct trace_func_repeats *last_info, > @@ -321,6 +324,7 @@ function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip, > out: > ftrace_test_recursion_unlock(bit); > } > +NOKPROBE_SYMBOL(function_no_repeats_trace_call); > > static void > function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip, > @@ -363,6 +367,7 @@ function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip, > atomic_dec(&data->disabled); > local_irq_restore(flags); > } > +NOKPROBE_SYMBOL(function_stack_no_repeats_trace_call); > > static struct tracer_opt func_opts[] = { > #ifdef CONFIG_STACKTRACE
在 2022/9/27 2:37, Steven Rostedt 写道: > On Mon, 19 Sep 2022 16:45:33 +0800 > Liao Chang <liaochang1@huawei.com> wrote: > >> Mark ftrace mcount handler functions nokprobe since probing on these >> functions probably reaches mcount recursivly during kprobe breakpoint >> handler for some architecture(tested for riscv, arm64), and reenter >> kprobe is treated as a fatal error, causes kernel panic. > > This looks to me that the affected archs should be made more robust for this > case than to add this to the generic code. OK, i will fix this problem in arch related code, thanks for feedback. > > -- Steve > > >> >> Pesudo code below demonstrate this problem: >> >> mcount >> function_trace_call (probed) >> arch_breakpoint_handler >> arch_setup_singlestep [mcount] >> function_trace_call (probed) >> arch_breakpoint_handler >> reenter_kprobe >> BUG >> >> Signed-off-by: Liao Chang <liaochang1@huawei.com> >> --- >> kernel/trace/trace_functions.c | 5 +++++ >> 1 file changed, 5 insertions(+) >> >> diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c >> index 9f1bfbe105e8..440a678a8c7c 100644 >> --- a/kernel/trace/trace_functions.c >> +++ b/kernel/trace/trace_functions.c >> @@ -16,6 +16,7 @@ >> #include <linux/ftrace.h> >> #include <linux/slab.h> >> #include <linux/fs.h> >> +#include <linux/kprobes.h> >> >> #include "trace.h" >> >> @@ -194,6 +195,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip, >> >> ftrace_test_recursion_unlock(bit); >> } >> +NOKPROBE_SYMBOL(function_trace_call); >> >> #ifdef CONFIG_UNWINDER_ORC >> /* >> @@ -245,6 +247,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip, >> atomic_dec(&data->disabled); >> local_irq_restore(flags); >> } >> +NOKPROBE_SYMBOL(function_stack_trace_call); >> >> static inline bool is_repeat_check(struct trace_array *tr, >> struct trace_func_repeats *last_info, >> @@ -321,6 +324,7 @@ function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip, >> out: >> ftrace_test_recursion_unlock(bit); >> } >> +NOKPROBE_SYMBOL(function_no_repeats_trace_call); >> >> static void >> function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip, >> @@ -363,6 +367,7 @@ function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip, >> atomic_dec(&data->disabled); >> local_irq_restore(flags); >> } >> +NOKPROBE_SYMBOL(function_stack_no_repeats_trace_call); >> >> static struct tracer_opt func_opts[] = { >> #ifdef CONFIG_STACKTRACE > > > .
On Mon, 26 Sep 2022 14:37:23 -0400 Steven Rostedt <rostedt@goodmis.org> wrote: > On Mon, 19 Sep 2022 16:45:33 +0800 > Liao Chang <liaochang1@huawei.com> wrote: > > > Mark ftrace mcount handler functions nokprobe since probing on these > > functions probably reaches mcount recursivly during kprobe breakpoint > > handler for some architecture(tested for riscv, arm64), and reenter > > kprobe is treated as a fatal error, causes kernel panic. > > This looks to me that the affected archs should be made more robust for this > case than to add this to the generic code. Yeah, kprobes (arch specific code) itself shouldn't be traced by ftrace usually. Thank you, > > -- Steve > > > > > > Pesudo code below demonstrate this problem: > > > > mcount > > function_trace_call (probed) > > arch_breakpoint_handler > > arch_setup_singlestep [mcount] > > function_trace_call (probed) > > arch_breakpoint_handler > > reenter_kprobe > > BUG > > > > Signed-off-by: Liao Chang <liaochang1@huawei.com> > > --- > > kernel/trace/trace_functions.c | 5 +++++ > > 1 file changed, 5 insertions(+) > > > > diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c > > index 9f1bfbe105e8..440a678a8c7c 100644 > > --- a/kernel/trace/trace_functions.c > > +++ b/kernel/trace/trace_functions.c > > @@ -16,6 +16,7 @@ > > #include <linux/ftrace.h> > > #include <linux/slab.h> > > #include <linux/fs.h> > > +#include <linux/kprobes.h> > > > > #include "trace.h" > > > > @@ -194,6 +195,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip, > > > > ftrace_test_recursion_unlock(bit); > > } > > +NOKPROBE_SYMBOL(function_trace_call); > > > > #ifdef CONFIG_UNWINDER_ORC > > /* > > @@ -245,6 +247,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip, > > atomic_dec(&data->disabled); > > local_irq_restore(flags); > > } > > +NOKPROBE_SYMBOL(function_stack_trace_call); > > > > static inline bool is_repeat_check(struct trace_array *tr, > > struct trace_func_repeats *last_info, > > @@ -321,6 +324,7 @@ function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip, > > out: > > ftrace_test_recursion_unlock(bit); > > } > > +NOKPROBE_SYMBOL(function_no_repeats_trace_call); > > > > static void > > function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip, > > @@ -363,6 +367,7 @@ function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip, > > atomic_dec(&data->disabled); > > local_irq_restore(flags); > > } > > +NOKPROBE_SYMBOL(function_stack_no_repeats_trace_call); > > > > static struct tracer_opt func_opts[] = { > > #ifdef CONFIG_STACKTRACE >
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 9f1bfbe105e8..440a678a8c7c 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -16,6 +16,7 @@ #include <linux/ftrace.h> #include <linux/slab.h> #include <linux/fs.h> +#include <linux/kprobes.h> #include "trace.h" @@ -194,6 +195,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip, ftrace_test_recursion_unlock(bit); } +NOKPROBE_SYMBOL(function_trace_call); #ifdef CONFIG_UNWINDER_ORC /* @@ -245,6 +247,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip, atomic_dec(&data->disabled); local_irq_restore(flags); } +NOKPROBE_SYMBOL(function_stack_trace_call); static inline bool is_repeat_check(struct trace_array *tr, struct trace_func_repeats *last_info, @@ -321,6 +324,7 @@ function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip, out: ftrace_test_recursion_unlock(bit); } +NOKPROBE_SYMBOL(function_no_repeats_trace_call); static void function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip, @@ -363,6 +367,7 @@ function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip, atomic_dec(&data->disabled); local_irq_restore(flags); } +NOKPROBE_SYMBOL(function_stack_no_repeats_trace_call); static struct tracer_opt func_opts[] = { #ifdef CONFIG_STACKTRACE
Mark ftrace mcount handler functions nokprobe since probing on these functions probably reaches mcount recursivly during kprobe breakpoint handler for some architecture(tested for riscv, arm64), and reenter kprobe is treated as a fatal error, causes kernel panic. Pesudo code below demonstrate this problem: mcount function_trace_call (probed) arch_breakpoint_handler arch_setup_singlestep [mcount] function_trace_call (probed) arch_breakpoint_handler reenter_kprobe BUG Signed-off-by: Liao Chang <liaochang1@huawei.com> --- kernel/trace/trace_functions.c | 5 +++++ 1 file changed, 5 insertions(+)