diff mbox series

[4/5] ARM: stacktrace: Make stack walk callback consistent with generic code

Message ID 20220712021527.109921-5-lihuafei1@huawei.com (mailing list archive)
State New, archived
Headers show
Series ARM: Convert to ARCH_STACKWALK | expand

Commit Message

Li Huafei July 12, 2022, 2:15 a.m. UTC
In order to use generic arch_stack_walk() code, make stack walk callback
consistent with it.

Signed-off-by: Li Huafei <lihuafei1@huawei.com>
---
 arch/arm/include/asm/stacktrace.h |  2 +-
 arch/arm/kernel/perf_callchain.c  |  9 ++++-----
 arch/arm/kernel/return_address.c  |  8 ++++----
 arch/arm/kernel/stacktrace.c      | 13 ++++++-------
 4 files changed, 15 insertions(+), 17 deletions(-)

Comments

Mark Brown July 12, 2022, 1:34 p.m. UTC | #1
On Tue, Jul 12, 2022 at 10:15:26AM +0800, Li Huafei wrote:

> In order to use generic arch_stack_walk() code, make stack walk callback
> consistent with it.

It might be useful to say what the changes are here, if nothing else
that makes it easier to review and confirm that the changes are doing
what you intend them to.  See my conversion for arm64 for an example.
The actual changes here seem OK though

Reviewed-by: Mark Brown <broonie@kernel.org>
Li Huafei July 13, 2022, 11:21 a.m. UTC | #2
On 2022/7/12 21:34, Mark Brown wrote:
> On Tue, Jul 12, 2022 at 10:15:26AM +0800, Li Huafei wrote:
>
>> In order to use generic arch_stack_walk() code, make stack walk callback
>> consistent with it.
> It might be useful to say what the changes are here, if nothing else
> that makes it easier to review and confirm that the changes are doing
> what you intend them to.  See my conversion for arm64 for an example.


Thanks for the advice. I've looked at the commit:

   baa2cd417053 ("arm64: stacktrace: Make stack walk callback consistent 
with generic code")

The description is very clear. It describes the change in detail and
gives a reason for making that change a separate commit. The same
description applies to the current changes, so I took the commit message
directly, just made the s/arm64/ARM/ changes, and updated to v2:

https://lore.kernel.org/lkml/20220713110020.85511-1-lihuafei1@huawei.com/


> The actual changes here seem OK though
>
> Reviewed-by: Mark Brown <broonie@kernel.org>

Thanks for the review!


Thanks,

Huafei
Linus Walleij July 18, 2022, 9:09 a.m. UTC | #3
On Tue, Jul 12, 2022 at 4:19 AM Li Huafei <lihuafei1@huawei.com> wrote:

> In order to use generic arch_stack_walk() code, make stack walk callback
> consistent with it.
>
> Signed-off-by: Li Huafei <lihuafei1@huawei.com>

Reviewed-by: Linus Walleij <linus.walleij@linaro.org>

Yours,
Linus Walleij
Li Huafei July 26, 2022, 9:19 a.m. UTC | #4
On 2022/7/18 17:09, Linus Walleij wrote:
> On Tue, Jul 12, 2022 at 4:19 AM Li Huafei <lihuafei1@huawei.com> wrote:
> 
>> In order to use generic arch_stack_walk() code, make stack walk callback
>> consistent with it.
>>
>> Signed-off-by: Li Huafei <lihuafei1@huawei.com>
> 
> Reviewed-by: Linus Walleij <linus.walleij@linaro.org>

Thanks!

> 
> Yours,
> Linus Walleij
> .
>
diff mbox series

Patch

diff --git a/arch/arm/include/asm/stacktrace.h b/arch/arm/include/asm/stacktrace.h
index 25282ff645fb..ce85d3ad5d05 100644
--- a/arch/arm/include/asm/stacktrace.h
+++ b/arch/arm/include/asm/stacktrace.h
@@ -44,7 +44,7 @@  void arm_get_current_stackframe(struct pt_regs *regs, struct stackframe *frame)
 
 extern int unwind_frame(struct stackframe *frame);
 extern void walk_stackframe(struct stackframe *frame,
-			    int (*fn)(struct stackframe *, void *), void *data);
+			    bool (*fn)(void *, unsigned long), void *data);
 extern void dump_mem(const char *lvl, const char *str, unsigned long bottom,
 		     unsigned long top);
 
diff --git a/arch/arm/kernel/perf_callchain.c b/arch/arm/kernel/perf_callchain.c
index bc6b246ab55e..7147edbe56c6 100644
--- a/arch/arm/kernel/perf_callchain.c
+++ b/arch/arm/kernel/perf_callchain.c
@@ -81,13 +81,12 @@  perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
  * whist unwinding the stackframe and is like a subroutine return so we use
  * the PC.
  */
-static int
-callchain_trace(struct stackframe *fr,
-		void *data)
+static bool
+callchain_trace(void *data, unsigned long pc)
 {
 	struct perf_callchain_entry_ctx *entry = data;
-	perf_callchain_store(entry, fr->pc);
-	return 0;
+	perf_callchain_store(entry, pc);
+	return true;
 }
 
 void
diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c
index 38f1ea9c724d..ac15db66df4c 100644
--- a/arch/arm/kernel/return_address.c
+++ b/arch/arm/kernel/return_address.c
@@ -16,17 +16,17 @@  struct return_address_data {
 	void *addr;
 };
 
-static int save_return_addr(struct stackframe *frame, void *d)
+static bool save_return_addr(void *d, unsigned long pc)
 {
 	struct return_address_data *data = d;
 
 	if (!data->level) {
-		data->addr = (void *)frame->pc;
+		data->addr = (void *)pc;
 
-		return 1;
+		return false;
 	} else {
 		--data->level;
-		return 0;
+		return true;
 	}
 }
 
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
index 836420c00938..ec0ca3192775 100644
--- a/arch/arm/kernel/stacktrace.c
+++ b/arch/arm/kernel/stacktrace.c
@@ -121,12 +121,12 @@  int notrace unwind_frame(struct stackframe *frame)
 #endif
 
 void notrace walk_stackframe(struct stackframe *frame,
-		     int (*fn)(struct stackframe *, void *), void *data)
+		     bool (*fn)(void *, unsigned long), void *data)
 {
 	while (1) {
 		int ret;
 
-		if (fn(frame, data))
+		if (!fn(data, frame->pc))
 			break;
 		ret = unwind_frame(frame);
 		if (ret < 0)
@@ -142,21 +142,20 @@  struct stack_trace_data {
 	unsigned int skip;
 };
 
-static int save_trace(struct stackframe *frame, void *d)
+static bool save_trace(void *d, unsigned long addr)
 {
 	struct stack_trace_data *data = d;
 	struct stack_trace *trace = data->trace;
-	unsigned long addr = frame->pc;
 
 	if (data->no_sched_functions && in_sched_functions(addr))
-		return 0;
+		return true;
 	if (data->skip) {
 		data->skip--;
-		return 0;
+		return true;
 	}
 
 	trace->entries[trace->nr_entries++] = addr;
-	return trace->nr_entries >= trace->max_entries;
+	return trace->nr_entries < trace->max_entries;
 }
 
 /* This must be noinline to so that our skip calculation works correctly */