@@ -11,8 +11,6 @@ struct stackframe {
unsigned long ra;
};
-extern void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
- bool (*fn)(void *, unsigned long), void *arg);
extern void dump_backtrace(struct pt_regs *regs, struct task_struct *task,
const char *loglvl);
@@ -74,5 +74,5 @@ static bool fill_callchain(void *entry, unsigned long pc)
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
- walk_stackframe(NULL, regs, fill_callchain, entry);
+ arch_stack_walk(fill_callchain, entry, NULL, regs);
}
@@ -16,8 +16,9 @@
#ifdef CONFIG_FRAME_POINTER
-void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
- bool (*fn)(void *, unsigned long), void *arg)
+noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry,
+ void *cookie, struct task_struct *task,
+ struct pt_regs *regs)
{
unsigned long fp, sp, pc;
int level = 0;
@@ -29,7 +30,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
} else if (task == NULL || task == current) {
fp = (unsigned long)__builtin_frame_address(0);
sp = current_stack_pointer;
- pc = (unsigned long)walk_stackframe;
+ pc = (unsigned long)arch_stack_walk;
} else {
/* task blocked in __switch_to */
fp = task->thread.s[0];
@@ -41,7 +42,8 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
unsigned long low, high;
struct stackframe *frame;
- if (unlikely(!__kernel_text_address(pc) || (level++ >= 1 && !fn(arg, pc))))
+ if (unlikely(!__kernel_text_address(pc) ||
+ (level++ >= 1 && !consume_entry(cookie, pc))))
break;
/* Validate frame pointer */
@@ -66,8 +68,9 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
#else /* !CONFIG_FRAME_POINTER */
-void notrace walk_stackframe(struct task_struct *task,
- struct pt_regs *regs, bool (*fn)(void *, unsigned long), void *arg)
+noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry,
+ void *cookie, struct task_struct *task,
+ struct pt_regs *regs)
{
unsigned long sp, pc;
unsigned long *ksp;
@@ -77,7 +80,7 @@ void notrace walk_stackframe(struct task_struct *task,
pc = instruction_pointer(regs);
} else if (task == NULL || task == current) {
sp = current_stack_pointer;
- pc = (unsigned long)walk_stackframe;
+ pc = (unsigned long)arch_stack_walk;
} else {
/* task blocked in __switch_to */
sp = task->thread.sp;
@@ -89,7 +92,7 @@ void notrace walk_stackframe(struct task_struct *task,
ksp = (unsigned long *)sp;
while (!kstack_end(ksp)) {
- if (__kernel_text_address(pc) && unlikely(!fn(arg, pc)))
+ if (__kernel_text_address(pc) && unlikely(!consume_entry(cookie, pc)))
break;
pc = (*ksp++) - 0x4;
}
@@ -108,7 +111,7 @@ static bool print_trace_address(void *arg, unsigned long pc)
noinline void dump_backtrace(struct pt_regs *regs, struct task_struct *task,
const char *loglvl)
{
- walk_stackframe(task, regs, print_trace_address, (void *)loglvl);
+ arch_stack_walk(print_trace_address, (void *)loglvl, task, regs);
}
void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
@@ -133,13 +136,7 @@ unsigned long __get_wchan(struct task_struct *task)
if (!try_get_task_stack(task))
return 0;
- walk_stackframe(task, NULL, save_wchan, &pc);
+ arch_stack_walk(save_wchan, &pc, task, NULL);
put_task_stack(task);
return pc;
}
-
-noinline void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
- struct task_struct *task, struct pt_regs *regs)
-{
- walk_stackframe(task, regs, consume_entry, cookie);
-}
walk_stackframe can be all replaced by arch_stack_walk. Since walk_stackframe is only called by arch_stack_walk and their only difference is the argument sequence. Signed-off-by: Chen Zhongjin <chenzhongjin@huawei.com> --- arch/riscv/include/asm/stacktrace.h | 2 -- arch/riscv/kernel/perf_callchain.c | 2 +- arch/riscv/kernel/stacktrace.c | 29 +++++++++++++---------------- 3 files changed, 14 insertions(+), 19 deletions(-)