@@ -193,6 +193,7 @@ config RISCV
select THREAD_INFO_IN_TASK
select TRACE_IRQFLAGS_SUPPORT
select UACCESS_MEMCPY if !MMU
+ select USER_STACKTRACE_SUPPORT
select ZONE_DMA32 if 64BIT
config CLANG_SUPPORTS_DYNAMIC_FTRACE
@@ -6,37 +6,9 @@
#include <asm/stacktrace.h>
-/*
- * Get the return address for a single stackframe and return a pointer to the
- * next frame tail.
- */
-static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry,
- unsigned long fp, unsigned long reg_ra)
+static bool fill_callchain(void *entry, unsigned long pc)
{
- struct stackframe buftail;
- unsigned long ra = 0;
- unsigned long __user *user_frame_tail =
- (unsigned long __user *)(fp - sizeof(struct stackframe));
-
- /* Check accessibility of one struct frame_tail beyond */
- if (!access_ok(user_frame_tail, sizeof(buftail)))
- return 0;
- if (__copy_from_user_inatomic(&buftail, user_frame_tail,
- sizeof(buftail)))
- return 0;
-
- if (reg_ra != 0)
- ra = reg_ra;
- else
- ra = buftail.ra;
-
- fp = buftail.fp;
- if (ra != 0)
- perf_callchain_store(entry, ra);
- else
- return 0;
-
- return fp;
+ return perf_callchain_store(entry, pc) == 0;
}
/*
@@ -56,19 +28,7 @@ static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry,
void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
- unsigned long fp = 0;
-
- fp = regs->s0;
- perf_callchain_store(entry, regs->epc);
-
- fp = user_backtrace(entry, fp, regs->ra);
- while (fp && !(fp & 0x3) && entry->nr < entry->max_stack)
- fp = user_backtrace(entry, fp, 0);
-}
-
-static bool fill_callchain(void *entry, unsigned long pc)
-{
- return perf_callchain_store(entry, pc) == 0;
+ arch_stack_walk_user(fill_callchain, entry, regs);
}
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
@@ -161,3 +161,46 @@ noinline void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie
{
walk_stackframe(task, regs, consume_entry, cookie);
}
+
+/*
+ * Get the return address for a single stackframe and return a pointer to the
+ * next frame tail.
+ */
+static unsigned long unwind_user_frame(stack_trace_consume_fn consume_entry,
+ void *cookie, unsigned long fp,
+ unsigned long reg_ra)
+{
+ struct stackframe buftail;
+ unsigned long ra = 0;
+ unsigned long __user *user_frame_tail =
+ (unsigned long __user *)(fp - sizeof(struct stackframe));
+
+ /* Check accessibility of one struct frame_tail beyond */
+ if (!access_ok(user_frame_tail, sizeof(buftail)))
+ return 0;
+ if (__copy_from_user_inatomic(&buftail, user_frame_tail,
+ sizeof(buftail)))
+ return 0;
+
+ ra = reg_ra ? : buftail.ra;
+
+ fp = buftail.fp;
+ if (!ra || (ra && !consume_entry(cookie, ra)))
+ return 0;
+
+ return fp;
+}
+
+void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
+ const struct pt_regs *regs)
+{
+ unsigned long fp = 0;
+
+ fp = regs->s0;
+ if (!consume_entry(cookie, regs->epc))
+ return;
+
+ fp = unwind_user_frame(consume_entry, cookie, fp, regs->ra);
+ while (fp && !(fp & 0x3))
+ fp = unwind_user_frame(consume_entry, cookie, fp, 0);
+}
Currently, userstacktrace is unsupported for riscv. So use the perf_callchain_user() code as blueprint to implement the arch_stack_walk_user() which add userstacktrace support on riscv. Meanwhile, we can use arch_stack_walk_user() to simplify the implementation of perf_callchain_user(). Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com> --- arch/riscv/Kconfig | 1 + arch/riscv/kernel/perf_callchain.c | 46 ++---------------------------- arch/riscv/kernel/stacktrace.c | 43 ++++++++++++++++++++++++++++ 3 files changed, 47 insertions(+), 43 deletions(-)