diff mbox series

[2/2] arm64: stacktrace: factor out kunwind_stack_walk()

Message ID 20231124110511.2795958-3-mark.rutland@arm.com (mailing list archive)
State New, archived
Headers show
Series arm64: stacktrace: add kunwind_stack_walk() | expand

Commit Message

Mark Rutland Nov. 24, 2023, 11:05 a.m. UTC
Currently arm64 uses the generic arch_stack_walk() interface for all
stack walking code. This only passes a PC value and cookie to the unwind
callback, whereas we'd like to pass some additional information in some
cases. For example, the BPF exception unwinder wants the FP, for
reliable stacktrace we'll want to perform additional checks on other
portions of unwind state, and we'd like to expand the information
printed by dump_backtrace() to include provenance and reliability
information.

As preparation for all of the above, this patch factors the core unwind
logic out of arch_stack_walk() and into a new kunwind_stack_walk()
function which provides all of the unwind state to a callback function.
The existing arch_stack_walk() interface is implemented atop this.

The kunwind_stack_walk() function is intended to be a private
implementation detail of unwinders in stacktrace.c, and not something to
be exported generally to kernel code. It is __always_inline'd into its
caller so that neither it or its caller appear in stactraces (which is
the existing/required behavior for arch_stack_walk() and friends) and so
that the compiler can optimize away some of the indirection.

There should be no functional change as a result of this patch.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Kalesh Singh <kaleshsingh@google.com>
Cc: Madhavan T. Venkataraman <madvenka@linux.microsoft.com>
Cc: Mark Brown <broonie@kernel.org>
Cc: Puranjay Mohan <puranjay12@gmail.com>
Cc: Will Deacon <will@kernel.org>
---
 arch/arm64/kernel/stacktrace.c | 39 ++++++++++++++++++++++++++++------
 1 file changed, 33 insertions(+), 6 deletions(-)

Comments

Kalesh Singh Nov. 27, 2023, 6:37 p.m. UTC | #1
On Fri, Nov 24, 2023 at 3:05 AM Mark Rutland <mark.rutland@arm.com> wrote:
>
> Currently arm64 uses the generic arch_stack_walk() interface for all
> stack walking code. This only passes a PC value and cookie to the unwind
> callback, whereas we'd like to pass some additional information in some
> cases. For example, the BPF exception unwinder wants the FP, for
> reliable stacktrace we'll want to perform additional checks on other
> portions of unwind state, and we'd like to expand the information
> printed by dump_backtrace() to include provenance and reliability
> information.
>
> As preparation for all of the above, this patch factors the core unwind
> logic out of arch_stack_walk() and into a new kunwind_stack_walk()
> function which provides all of the unwind state to a callback function.
> The existing arch_stack_walk() interface is implemented atop this.
>
> The kunwind_stack_walk() function is intended to be a private
> implementation detail of unwinders in stacktrace.c, and not something to
> be exported generally to kernel code. It is __always_inline'd into its
> caller so that neither it or its caller appear in stactraces (which is
> the existing/required behavior for arch_stack_walk() and friends) and so
> that the compiler can optimize away some of the indirection.
>
> There should be no functional change as a result of this patch.

Reviewed-by: Kalesh Singh <kaleshsingh@google.com>

Thanks,
Kalesh

>
> Signed-off-by: Mark Rutland <mark.rutland@arm.com>
> Cc: Catalin Marinas <catalin.marinas@arm.com>
> Cc: Kalesh Singh <kaleshsingh@google.com>
> Cc: Madhavan T. Venkataraman <madvenka@linux.microsoft.com>
> Cc: Mark Brown <broonie@kernel.org>
> Cc: Puranjay Mohan <puranjay12@gmail.com>
> Cc: Will Deacon <will@kernel.org>
> ---
>  arch/arm64/kernel/stacktrace.c | 39 ++++++++++++++++++++++++++++------
>  1 file changed, 33 insertions(+), 6 deletions(-)
>
> diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
> index aafc89192787a..7f88028a00c02 100644
> --- a/arch/arm64/kernel/stacktrace.c
> +++ b/arch/arm64/kernel/stacktrace.c
> @@ -154,8 +154,10 @@ kunwind_next(struct kunwind_state *state)
>         return kunwind_recover_return_address(state);
>  }
>
> +typedef bool (*kunwind_consume_fn)(const struct kunwind_state *state, void *cookie);
> +
>  static __always_inline void
> -do_kunwind(struct kunwind_state *state, stack_trace_consume_fn consume_entry,
> +do_kunwind(struct kunwind_state *state, kunwind_consume_fn consume_state,
>            void *cookie)
>  {
>         if (kunwind_recover_return_address(state))
> @@ -164,7 +166,7 @@ do_kunwind(struct kunwind_state *state, stack_trace_consume_fn consume_entry,
>         while (1) {
>                 int ret;
>
> -               if (!consume_entry(cookie, state->common.pc))
> +               if (!consume_state(state, cookie))
>                         break;
>                 ret = kunwind_next(state);
>                 if (ret < 0)
> @@ -201,9 +203,10 @@ do_kunwind(struct kunwind_state *state, stack_trace_consume_fn consume_entry,
>                         : stackinfo_get_unknown();              \
>         })
>
> -noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
> -                             void *cookie, struct task_struct *task,
> -                             struct pt_regs *regs)
> +static __always_inline void
> +kunwind_stack_walk(kunwind_consume_fn consume_state,
> +                  void *cookie, struct task_struct *task,
> +                  struct pt_regs *regs)
>  {
>         struct stack_info stacks[] = {
>                 stackinfo_get_task(task),
> @@ -236,7 +239,31 @@ noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
>                 kunwind_init_from_task(&state, task);
>         }
>
> -       do_kunwind(&state, consume_entry, cookie);
> +       do_kunwind(&state, consume_state, cookie);
> +}
> +
> +struct kunwind_consume_entry_data {
> +       stack_trace_consume_fn consume_entry;
> +       void *cookie;
> +};
> +
> +static bool
> +arch_kunwind_consume_entry(const struct kunwind_state *state, void *cookie)
> +{
> +       struct kunwind_consume_entry_data *data = cookie;
> +       return data->consume_entry(data->cookie, state->common.pc);
> +}
> +
> +noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
> +                             void *cookie, struct task_struct *task,
> +                             struct pt_regs *regs)
> +{
> +       struct kunwind_consume_entry_data data = {
> +               .consume_entry = consume_entry,
> +               .cookie = cookie,
> +       };
> +
> +       kunwind_stack_walk(arch_kunwind_consume_entry, &data, task, regs);
>  }
>
>  static bool dump_backtrace_entry(void *arg, unsigned long where)
> --
> 2.30.2
>
Puranjay Mohan Nov. 28, 2023, 9:17 a.m. UTC | #2
Mark Rutland <mark.rutland@arm.com> writes:

> Currently arm64 uses the generic arch_stack_walk() interface for all
> stack walking code. This only passes a PC value and cookie to the unwind
> callback, whereas we'd like to pass some additional information in some
> cases. For example, the BPF exception unwinder wants the FP, for
> reliable stacktrace we'll want to perform additional checks on other
> portions of unwind state, and we'd like to expand the information
> printed by dump_backtrace() to include provenance and reliability
> information.
>
> As preparation for all of the above, this patch factors the core unwind
> logic out of arch_stack_walk() and into a new kunwind_stack_walk()
> function which provides all of the unwind state to a callback function.
> The existing arch_stack_walk() interface is implemented atop this.
>
> The kunwind_stack_walk() function is intended to be a private
> implementation detail of unwinders in stacktrace.c, and not something to
> be exported generally to kernel code. It is __always_inline'd into its
> caller so that neither it or its caller appear in stactraces (which is
> the existing/required behavior for arch_stack_walk() and friends) and so
> that the compiler can optimize away some of the indirection.
>
> There should be no functional change as a result of this patch.
>

Thanks for helping with this.

Reviewed-by: Puranjay Mohan <puranjay12@gmail.com>

Thanks,
Puranjay

> Signed-off-by: Mark Rutland <mark.rutland@arm.com>
> Cc: Catalin Marinas <catalin.marinas@arm.com>
> Cc: Kalesh Singh <kaleshsingh@google.com>
> Cc: Madhavan T. Venkataraman <madvenka@linux.microsoft.com>
> Cc: Mark Brown <broonie@kernel.org>
> Cc: Puranjay Mohan <puranjay12@gmail.com>
> Cc: Will Deacon <will@kernel.org>
> ---
>  arch/arm64/kernel/stacktrace.c | 39 ++++++++++++++++++++++++++++------
>  1 file changed, 33 insertions(+), 6 deletions(-)
>
> diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
> index aafc89192787a..7f88028a00c02 100644
> --- a/arch/arm64/kernel/stacktrace.c
> +++ b/arch/arm64/kernel/stacktrace.c
> @@ -154,8 +154,10 @@ kunwind_next(struct kunwind_state *state)
>  	return kunwind_recover_return_address(state);
>  }
>  
> +typedef bool (*kunwind_consume_fn)(const struct kunwind_state *state, void *cookie);
> +
>  static __always_inline void
> -do_kunwind(struct kunwind_state *state, stack_trace_consume_fn consume_entry,
> +do_kunwind(struct kunwind_state *state, kunwind_consume_fn consume_state,
>  	   void *cookie)
>  {
>  	if (kunwind_recover_return_address(state))
> @@ -164,7 +166,7 @@ do_kunwind(struct kunwind_state *state, stack_trace_consume_fn consume_entry,
>  	while (1) {
>  		int ret;
>  
> -		if (!consume_entry(cookie, state->common.pc))
> +		if (!consume_state(state, cookie))
>  			break;
>  		ret = kunwind_next(state);
>  		if (ret < 0)
> @@ -201,9 +203,10 @@ do_kunwind(struct kunwind_state *state, stack_trace_consume_fn consume_entry,
>  			: stackinfo_get_unknown();		\
>  	})
>  
> -noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
> -			      void *cookie, struct task_struct *task,
> -			      struct pt_regs *regs)
> +static __always_inline void
> +kunwind_stack_walk(kunwind_consume_fn consume_state,
> +		   void *cookie, struct task_struct *task,
> +		   struct pt_regs *regs)
>  {
>  	struct stack_info stacks[] = {
>  		stackinfo_get_task(task),
> @@ -236,7 +239,31 @@ noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
>  		kunwind_init_from_task(&state, task);
>  	}
>  
> -	do_kunwind(&state, consume_entry, cookie);
> +	do_kunwind(&state, consume_state, cookie);
> +}
> +
> +struct kunwind_consume_entry_data {
> +	stack_trace_consume_fn consume_entry;
> +	void *cookie;
> +};
> +
> +static bool
> +arch_kunwind_consume_entry(const struct kunwind_state *state, void *cookie)
> +{
> +	struct kunwind_consume_entry_data *data = cookie;
> +	return data->consume_entry(data->cookie, state->common.pc);
> +}
> +
> +noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
> +			      void *cookie, struct task_struct *task,
> +			      struct pt_regs *regs)
> +{
> +	struct kunwind_consume_entry_data data = {
> +		.consume_entry = consume_entry,
> +		.cookie = cookie,
> +	};
> +
> +	kunwind_stack_walk(arch_kunwind_consume_entry, &data, task, regs);
>  }
>  
>  static bool dump_backtrace_entry(void *arg, unsigned long where)
> -- 
> 2.30.2
diff mbox series

Patch

diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index aafc89192787a..7f88028a00c02 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -154,8 +154,10 @@  kunwind_next(struct kunwind_state *state)
 	return kunwind_recover_return_address(state);
 }
 
+typedef bool (*kunwind_consume_fn)(const struct kunwind_state *state, void *cookie);
+
 static __always_inline void
-do_kunwind(struct kunwind_state *state, stack_trace_consume_fn consume_entry,
+do_kunwind(struct kunwind_state *state, kunwind_consume_fn consume_state,
 	   void *cookie)
 {
 	if (kunwind_recover_return_address(state))
@@ -164,7 +166,7 @@  do_kunwind(struct kunwind_state *state, stack_trace_consume_fn consume_entry,
 	while (1) {
 		int ret;
 
-		if (!consume_entry(cookie, state->common.pc))
+		if (!consume_state(state, cookie))
 			break;
 		ret = kunwind_next(state);
 		if (ret < 0)
@@ -201,9 +203,10 @@  do_kunwind(struct kunwind_state *state, stack_trace_consume_fn consume_entry,
 			: stackinfo_get_unknown();		\
 	})
 
-noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
-			      void *cookie, struct task_struct *task,
-			      struct pt_regs *regs)
+static __always_inline void
+kunwind_stack_walk(kunwind_consume_fn consume_state,
+		   void *cookie, struct task_struct *task,
+		   struct pt_regs *regs)
 {
 	struct stack_info stacks[] = {
 		stackinfo_get_task(task),
@@ -236,7 +239,31 @@  noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
 		kunwind_init_from_task(&state, task);
 	}
 
-	do_kunwind(&state, consume_entry, cookie);
+	do_kunwind(&state, consume_state, cookie);
+}
+
+struct kunwind_consume_entry_data {
+	stack_trace_consume_fn consume_entry;
+	void *cookie;
+};
+
+static bool
+arch_kunwind_consume_entry(const struct kunwind_state *state, void *cookie)
+{
+	struct kunwind_consume_entry_data *data = cookie;
+	return data->consume_entry(data->cookie, state->common.pc);
+}
+
+noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
+			      void *cookie, struct task_struct *task,
+			      struct pt_regs *regs)
+{
+	struct kunwind_consume_entry_data data = {
+		.consume_entry = consume_entry,
+		.cookie = cookie,
+	};
+
+	kunwind_stack_walk(arch_kunwind_consume_entry, &data, task, regs);
 }
 
 static bool dump_backtrace_entry(void *arg, unsigned long where)