Message ID | 20190418084253.903603121@linutronix.de (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | stacktrace: Consolidate stack trace usage | expand |
On 4/18/19 11:41 AM, Thomas Gleixner wrote: > Replace the indirection through struct stack_trace by using the storage > array based interfaces. > > Signed-off-by: Thomas Gleixner <tglx@linutronix.de> > Acked-by: Dmitry Vyukov <dvyukov@google.com> > Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> > Cc: Alexander Potapenko <glider@google.com> > Cc: kasan-dev@googlegroups.com > Cc: linux-mm@kvack.org Acked-by: Andrey Ryabinin <aryabinin@virtuozzo.com> > > static inline depot_stack_handle_t save_stack(gfp_t flags) > { > unsigned long entries[KASAN_STACK_DEPTH]; > - struct stack_trace trace = { > - .nr_entries = 0, > - .entries = entries, > - .max_entries = KASAN_STACK_DEPTH, > - .skip = 0 > - }; > + unsigned int nr_entries; > > - save_stack_trace(&trace); > - filter_irq_stacks(&trace); > - > - return depot_save_stack(&trace, flags); > + nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0); > + nr_entries = filter_irq_stacks(entries, nr_entries); > + return stack_depot_save(entries, nr_entries, flags); Suggestion for further improvement: stack_trace_save() shouldn't unwind beyond irq entry point so we wouldn't need filter_irq_stacks(). Probably all call sites doesn't care about random stack above irq entry point, so it doesn't make sense to spend resources on unwinding non-irq stack from interrupt first an filtering out it later. It would improve performance of stack_trace_save() called from interrupt and fix page_owner which feed unfiltered stack to stack_depot_save(). Random non-irq part kills the benefit of using the stack_deopt_save().
On Thu, 18 Apr 2019, Andrey Ryabinin wrote: > On 4/18/19 11:41 AM, Thomas Gleixner wrote: > > Replace the indirection through struct stack_trace by using the storage > > array based interfaces. > > > > Signed-off-by: Thomas Gleixner <tglx@linutronix.de> > > Acked-by: Dmitry Vyukov <dvyukov@google.com> > > Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> > > Cc: Alexander Potapenko <glider@google.com> > > Cc: kasan-dev@googlegroups.com > > Cc: linux-mm@kvack.org > > Acked-by: Andrey Ryabinin <aryabinin@virtuozzo.com> > > > > > static inline depot_stack_handle_t save_stack(gfp_t flags) > > { > > unsigned long entries[KASAN_STACK_DEPTH]; > > - struct stack_trace trace = { > > - .nr_entries = 0, > > - .entries = entries, > > - .max_entries = KASAN_STACK_DEPTH, > > - .skip = 0 > > - }; > > + unsigned int nr_entries; > > > > - save_stack_trace(&trace); > > - filter_irq_stacks(&trace); > > - > > - return depot_save_stack(&trace, flags); > > + nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0); > > + nr_entries = filter_irq_stacks(entries, nr_entries); > > + return stack_depot_save(entries, nr_entries, flags); > > Suggestion for further improvement: > > stack_trace_save() shouldn't unwind beyond irq entry point so we wouldn't > need filter_irq_stacks(). Probably all call sites doesn't care about > random stack above irq entry point, so it doesn't make sense to spend > resources on unwinding non-irq stack from interrupt first an filtering > out it later. There are users which care about the full trace. Once we have cleaned up the whole architeture side, we can add core side filtering which allows to 1) replace the 'skip number of entries at the beginning 2) stop the trace when it reaches a certain point Right now, I don't want to change any of this until the whole mess is consolidated. Thanks, tglx
--- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -48,34 +48,28 @@ static inline int in_irqentry_text(unsig ptr < (unsigned long)&__softirqentry_text_end); } -static inline void filter_irq_stacks(struct stack_trace *trace) +static inline unsigned int filter_irq_stacks(unsigned long *entries, + unsigned int nr_entries) { - int i; + unsigned int i; - if (!trace->nr_entries) - return; - for (i = 0; i < trace->nr_entries; i++) - if (in_irqentry_text(trace->entries[i])) { + for (i = 0; i < nr_entries; i++) { + if (in_irqentry_text(entries[i])) { /* Include the irqentry function into the stack. */ - trace->nr_entries = i + 1; - break; + return i + 1; } + } + return nr_entries; } static inline depot_stack_handle_t save_stack(gfp_t flags) { unsigned long entries[KASAN_STACK_DEPTH]; - struct stack_trace trace = { - .nr_entries = 0, - .entries = entries, - .max_entries = KASAN_STACK_DEPTH, - .skip = 0 - }; + unsigned int nr_entries; - save_stack_trace(&trace); - filter_irq_stacks(&trace); - - return depot_save_stack(&trace, flags); + nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0); + nr_entries = filter_irq_stacks(entries, nr_entries); + return stack_depot_save(entries, nr_entries, flags); } static inline void set_track(struct kasan_track *track, gfp_t flags) --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -100,10 +100,11 @@ static void print_track(struct kasan_tra { pr_err("%s by task %u:\n", prefix, track->pid); if (track->stack) { - struct stack_trace trace; + unsigned long *entries; + unsigned int nr_entries; - depot_fetch_stack(track->stack, &trace); - print_stack_trace(&trace, 0); + nr_entries = stack_depot_fetch(track->stack, &entries); + stack_trace_print(entries, nr_entries, 0); } else { pr_err("(stack is not available)\n"); }