@@ -40,6 +40,12 @@
* value.
*
* @task: The task being unwound.
+ *
+ * @final_fp: Pointer to the final frame.
+ *
+ * @done: Unwind completed successfully.
+ *
+ * @failed: Unwind failed.
*/
struct unwind_state {
unsigned long fp;
@@ -51,6 +57,9 @@ struct unwind_state {
struct llist_node *kr_cur;
#endif
struct task_struct *task;
+ unsigned long final_fp;
+ bool done;
+ bool failed;
};
static void unwind_init_common(struct unwind_state *state,
@@ -73,6 +82,26 @@ static void unwind_init_common(struct unwind_state *state,
bitmap_zero(state->stacks_done, __NR_STACK_TYPES);
state->prev_fp = 0;
state->prev_type = STACK_TYPE_UNKNOWN;
+ state->done = false;
+ state->failed = false;
+
+ /* Stack trace terminates here. */
+ state->final_fp = (unsigned long)task_pt_regs(task)->stackframe;
+}
+
+static inline bool unwind_final_frame(struct unwind_state *state)
+{
+ return state->fp == state->final_fp;
+}
+
+static inline bool unwind_done(struct unwind_state *state)
+{
+ return state->done;
+}
+
+static inline bool unwind_failed(struct unwind_state *state)
+{
+ return state->failed;
}
/*
@@ -133,24 +162,31 @@ static inline void unwind_init_from_task(struct unwind_state *state,
* records (e.g. a cycle), determined based on the location and fp value of A
* and the location (but not the fp value) of B.
*/
-static int notrace unwind_next(struct unwind_state *state)
+static void notrace unwind_next(struct unwind_state *state)
{
struct task_struct *tsk = state->task;
unsigned long fp = state->fp;
struct stack_info info;
- /* Final frame; nothing to unwind */
- if (fp == (unsigned long)task_pt_regs(tsk)->stackframe)
- return -ENOENT;
+ if (unwind_final_frame(state)) {
+ state->done = true;
+ return;
+ }
- if (fp & 0x7)
- return -EINVAL;
+ if (fp & 0x7) {
+ state->failed = true;
+ return;
+ }
- if (!on_accessible_stack(tsk, fp, 16, &info))
- return -EINVAL;
+ if (!on_accessible_stack(tsk, fp, 16, &info)) {
+ state->failed = true;
+ return;
+ }
- if (test_bit(info.type, state->stacks_done))
- return -EINVAL;
+ if (test_bit(info.type, state->stacks_done)) {
+ state->failed = true;
+ return;
+ }
/*
* As stacks grow downward, any valid record on the same stack must be
@@ -166,8 +202,10 @@ static int notrace unwind_next(struct unwind_state *state)
* stack.
*/
if (info.type == state->prev_type) {
- if (fp <= state->prev_fp)
- return -EINVAL;
+ if (fp <= state->prev_fp) {
+ state->failed = true;
+ return;
+ }
} else {
__set_bit(state->prev_type, state->stacks_done);
}
@@ -195,8 +233,10 @@ static int notrace unwind_next(struct unwind_state *state)
*/
orig_pc = ftrace_graph_ret_addr(tsk, NULL, state->pc,
(void *)state->fp);
- if (WARN_ON_ONCE(state->pc == orig_pc))
- return -EINVAL;
+ if (WARN_ON_ONCE(state->pc == orig_pc)) {
+ state->failed = true;
+ return;
+ }
state->pc = orig_pc;
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
@@ -204,26 +244,9 @@ static int notrace unwind_next(struct unwind_state *state)
if (is_kretprobe_trampoline(state->pc))
state->pc = kretprobe_find_ret_addr(tsk, (void *)state->fp, &state->kr_cur);
#endif
-
- return 0;
}
NOKPROBE_SYMBOL(unwind_next);
-static void notrace unwind(struct unwind_state *state,
- stack_trace_consume_fn consume_entry, void *cookie)
-{
- while (1) {
- int ret;
-
- if (!consume_entry(cookie, state->pc))
- break;
- ret = unwind_next(state);
- if (ret < 0)
- break;
- }
-}
-NOKPROBE_SYMBOL(unwind);
-
static bool dump_backtrace_entry(void *arg, unsigned long where)
{
char *loglvl = arg;
@@ -257,21 +280,39 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
barrier();
}
+static __always_inline void unwind_start(struct unwind_state *state,
+ struct task_struct *task,
+ struct pt_regs *regs)
+{
+ if (regs)
+ unwind_init_from_regs(state, regs);
+ else if (task == current)
+ unwind_init_from_caller(state);
+ else
+ unwind_init_from_task(state, task);
+
+}
+
noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry,
void *cookie, struct task_struct *task,
struct pt_regs *regs)
{
struct unwind_state state;
- if (regs) {
- if (task != current)
+ if (regs && task != current)
+ return;
+
+ for (unwind_start(&state, task, regs); !unwind_done(&state);
+ unwind_next(&state)) {
+
+ if (unwind_failed(&state)) {
+ /* PC is suspect. Cannot consume it. */
return;
- unwind_init_from_regs(&state, regs);
- } else if (task == current) {
- unwind_init_from_caller(&state);
- } else {
- unwind_init_from_task(&state, task);
- }
+ }
- unwind(&state, consume_entry, cookie);
+ if (!consume_entry(cookie, state.pc)) {
+ /* Caller terminated the unwind. */
+ return;
+ }
+ }
}