@@ -5,6 +5,7 @@
#ifndef __ASM_STACKTRACE_H
#define __ASM_STACKTRACE_H
+#include <linux/dwarf.h>
#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
@@ -35,6 +36,7 @@ struct stack_info {
* A snapshot of a frame record or fp/lr register values, along with some
* accounting information necessary for robust unwinding.
*
+ * @sp: The sp value (CFA) at the call site of the current function.
* @fp: The fp value in the frame record (or the real fp)
* @pc: The lr value in the frame record (or the real lr)
*
@@ -47,8 +49,11 @@ struct stack_info {
* @prev_type: The type of stack this frame record was on, or a synthetic
* value of STACK_TYPE_UNKNOWN. This is used to detect a
* transition from one stack to another.
+ *
+ * @reliable Stack trace is reliable.
*/
struct stackframe {
+ unsigned long sp;
unsigned long fp;
unsigned long pc;
DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES);
@@ -57,6 +62,7 @@ struct stackframe {
#ifdef CONFIG_KRETPROBES
struct llist_node *kr_cur;
#endif
+ bool reliable;
};
extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame);
@@ -5,6 +5,7 @@
* Copyright (C) 2012 ARM Ltd.
*/
#include <linux/kernel.h>
+#include <linux/dwarf.h>
#include <linux/export.h>
#include <linux/ftrace.h>
#include <linux/kprobes.h>
@@ -36,8 +37,22 @@
void start_backtrace(struct stackframe *frame, unsigned long fp,
unsigned long pc)
{
+ struct dwarf_rule *rule;
+
+ frame->reliable = true;
frame->fp = fp;
frame->pc = pc;
+ frame->sp = 0;
+ /*
+ * Lookup the dwarf rule for PC. If it exists, initialize the SP
+ * based on the frame pointer passed in.
+ */
+ rule = dwarf_lookup(pc);
+ if (rule)
+ frame->sp = fp - rule->fp_offset;
+ else
+ frame->reliable = false;
+
#ifdef CONFIG_KRETPROBES
frame->kr_cur = NULL;
#endif
@@ -67,6 +82,8 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
{
unsigned long fp = frame->fp;
struct stack_info info;
+ struct dwarf_rule *rule;
+ unsigned long lookup_pc;
if (!tsk)
tsk = current;
@@ -137,6 +154,32 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
frame->pc = kretprobe_find_ret_addr(tsk, (void *)frame->fp, &frame->kr_cur);
#endif
+ /*
+ * If it is the last frame, no need to check dwarf.
+ */
+ if (frame->fp == (unsigned long)task_pt_regs(tsk)->stackframe)
+ return 0;
+
+ if (!frame->reliable) {
+ /*
+ * The sp value cannot be reliably computed anymore because a
+ * previous frame was unreliable.
+ */
+ return 0;
+ }
+ lookup_pc = frame->pc;
+
+ rule = dwarf_lookup(lookup_pc);
+ if (!rule) {
+ frame->reliable = false;
+ return 0;
+ }
+
+ frame->sp += rule->sp_offset;
+ if (frame->fp != (frame->sp + rule->fp_offset)) {
+ frame->reliable = false;
+ return 0;
+ }
return 0;
}
NOKPROBE_SYMBOL(unwind_frame);
@@ -242,4 +285,30 @@ noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry,
walk_stackframe(task, &frame, consume_entry, cookie);
}
+noinline int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
+ void *cookie, struct task_struct *task)
+{
+ struct stackframe frame;
+ int ret = 0;
+
+ if (task == current) {
+ start_backtrace(&frame,
+ (unsigned long)__builtin_frame_address(1),
+ (unsigned long)__builtin_return_address(0));
+ } else {
+ start_backtrace(&frame, thread_saved_fp(task),
+ thread_saved_pc(task));
+ }
+
+ while (!ret) {
+ if (!frame.reliable)
+ return -EINVAL;
+ if (!consume_entry(cookie, frame.pc))
+ return -EINVAL;
+ ret = unwind_frame(task, &frame);
+ }
+
+ return ret == -ENOENT ? 0 : -EINVAL;
+}
+
#endif