diff mbox series

[bpf-next,1/5] arm64: ftrace: Add ftrace direct call support

Message ID 20220413054959.1053668-2-xukuohai@huawei.com (mailing list archive)
State Superseded
Delegated to: BPF
Headers show
Series bpf trampoline for arm64 | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR fail PR summary
netdev/tree_selection success Clearly marked for bpf-next, async
netdev/fixes_present success Fixes tag not required for -next series
netdev/subject_prefix success Link
netdev/cover_letter success Series has a cover letter
netdev/patch_count success Link
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 0 this patch: 0
netdev/cc_maintainers warning 2 maintainers not CCed: mingo@redhat.com catalin.marinas@arm.com
netdev/build_clang success Errors and warnings before: 0 this patch: 0
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 0 this patch: 0
netdev/checkpatch warning WARNING: please, no spaces at the start of a line
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
bpf/vmtest-bpf-next-VM_Test-1 fail Logs for Kernel LATEST on ubuntu-latest + selftests
bpf/vmtest-bpf-next-VM_Test-2 fail Logs for Kernel LATEST on z15 + selftests

Commit Message

Xu Kuohai April 13, 2022, 5:49 a.m. UTC
Add ftrace direct support for arm64.

1. When there is custom trampoline only, replace the fentry nop to a
   jump instruction that jumps directly to the custom trampoline.

2. When ftrace trampoline and custome coexist, jump from fentry to
   ftrace trampoline first, then jump to custom trampoline when ftrace
   trampoline exits. The currently unused register pt_regs->x0 is used
   as an intermediary for jumping from ftrace trampoline to custom
   trampoline.

Signed-off-by: Xu Kuohai <xukuohai@huawei.com>
---
 arch/arm64/Kconfig               |  2 ++
 arch/arm64/include/asm/ftrace.h  | 10 ++++++++++
 arch/arm64/kernel/asm-offsets.c  |  1 +
 arch/arm64/kernel/entry-ftrace.S | 18 +++++++++++++++---
 4 files changed, 28 insertions(+), 3 deletions(-)

Comments

Song Liu April 14, 2022, 5:58 a.m. UTC | #1
> On Apr 12, 2022, at 10:49 PM, Xu Kuohai <xukuohai@huawei.com> wrote:
> 
> Add ftrace direct support for arm64.
> 
> 1. When there is custom trampoline only, replace the fentry nop to a
>   jump instruction that jumps directly to the custom trampoline.
> 
> 2. When ftrace trampoline and custome coexist, jump from fentry to
>   ftrace trampoline first, then jump to custom trampoline when ftrace
>   trampoline exits. The currently unused register pt_regs->x0 is used
>   as an intermediary for jumping from ftrace trampoline to custom
>   trampoline.
> 
> Signed-off-by: Xu Kuohai <xukuohai@huawei.com>

For the series: 

Acked-by: Song Liu <songliubraving@fb.com>

(Pardon my laziness, I somehow only got 1/5 and 3/5 in one of my 
inboxes, and nothing in my other inbox. :( ) 

Just one nitpick for 2/5: as we move is_valid_bpf_tramp_flags to
trampoline.c, we should change the multi-line comment into net
style:

	/* BPF_TRAMP_F_RET_FENTRY_RET is only used by bpf_struct_ops,
	 * and it must be used alone.
	 */

Thanks,
Song

> ---
> arch/arm64/Kconfig               |  2 ++
> arch/arm64/include/asm/ftrace.h  | 10 ++++++++++
> arch/arm64/kernel/asm-offsets.c  |  1 +
> arch/arm64/kernel/entry-ftrace.S | 18 +++++++++++++++---
> 4 files changed, 28 insertions(+), 3 deletions(-)
> 
> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
> index 57c4c995965f..81cc330daafc 100644
> --- a/arch/arm64/Kconfig
> +++ b/arch/arm64/Kconfig
> @@ -177,6 +177,8 @@ config ARM64
> 	select HAVE_DYNAMIC_FTRACE
> 	select HAVE_DYNAMIC_FTRACE_WITH_REGS \
> 		if $(cc-option,-fpatchable-function-entry=2)
> +	select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS \
> +		if DYNAMIC_FTRACE_WITH_REGS
> 	select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY \
> 		if DYNAMIC_FTRACE_WITH_REGS
> 	select HAVE_EFFICIENT_UNALIGNED_ACCESS
> diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h
> index 1494cfa8639b..3a363d6a3bd0 100644
> --- a/arch/arm64/include/asm/ftrace.h
> +++ b/arch/arm64/include/asm/ftrace.h
> @@ -78,6 +78,16 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
> 	return addr;
> }
> 
> +static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs,
> +						 unsigned long addr)
> +{
> +	/*
> +	 * Place custom trampoline address in regs->orig_x0 to let ftrace
> +	 * trampoline jump to it.
> +	 */
> +	regs->orig_x0 = addr;
> +}
> +
> #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
> struct dyn_ftrace;
> int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
> diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
> index 1197e7679882..b1ed0bf01c59 100644
> --- a/arch/arm64/kernel/asm-offsets.c
> +++ b/arch/arm64/kernel/asm-offsets.c
> @@ -80,6 +80,7 @@ int main(void)
>   DEFINE(S_SDEI_TTBR1,		offsetof(struct pt_regs, sdei_ttbr1));
>   DEFINE(S_PMR_SAVE,		offsetof(struct pt_regs, pmr_save));
>   DEFINE(S_STACKFRAME,		offsetof(struct pt_regs, stackframe));
> +  DEFINE(S_ORIG_X0,		offsetof(struct pt_regs, orig_x0));
>   DEFINE(PT_REGS_SIZE,		sizeof(struct pt_regs));
>   BLANK();
> #ifdef CONFIG_COMPAT
> diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
> index e535480a4069..b1bd6576f205 100644
> --- a/arch/arm64/kernel/entry-ftrace.S
> +++ b/arch/arm64/kernel/entry-ftrace.S
> @@ -60,6 +60,9 @@
> 	str	x29, [sp, #S_FP]
> 	.endif
> 
> +	/* Set orig_x0 to zero  */
> +	str     xzr, [sp, #S_ORIG_X0]
> +
> 	/* Save the callsite's SP and LR */
> 	add	x10, sp, #(PT_REGS_SIZE + 16)
> 	stp	x9, x10, [sp, #S_LR]
> @@ -119,12 +122,21 @@ ftrace_common_return:
> 	/* Restore the callsite's FP, LR, PC */
> 	ldr	x29, [sp, #S_FP]
> 	ldr	x30, [sp, #S_LR]
> -	ldr	x9, [sp, #S_PC]
> -
> +	ldr	x10, [sp, #S_PC]
> +
> +	ldr	x11, [sp, #S_ORIG_X0]
> +	cbz	x11, 1f
> +	/* Set x9 to parent ip before jump to bpf trampoline */
> +	mov	x9,  x30
> +	/* Set lr to self ip */
> +	ldr	x30, [sp, #S_PC]
> +	/* Set x10 (used for return address) to bpf trampoline */
> +	mov	x10, x11
> +1:
> 	/* Restore the callsite's SP */
> 	add	sp, sp, #PT_REGS_SIZE + 16
> 
> -	ret	x9
> +	ret	x10
> SYM_CODE_END(ftrace_common)
> 
> #ifdef CONFIG_FUNCTION_GRAPH_TRACER
> -- 
> 2.30.2
>
Xu Kuohai April 14, 2022, 6:35 a.m. UTC | #2
On 4/14/2022 1:58 PM, Song Liu wrote:
> Just one nitpick for 2/5: as we move is_valid_bpf_tramp_flags to
> trampoline.c, we should change the multi-line comment into net
> style:

Thanks for your suggestion, will fix in v2.
diff mbox series

Patch

diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 57c4c995965f..81cc330daafc 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -177,6 +177,8 @@  config ARM64
 	select HAVE_DYNAMIC_FTRACE
 	select HAVE_DYNAMIC_FTRACE_WITH_REGS \
 		if $(cc-option,-fpatchable-function-entry=2)
+	select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS \
+		if DYNAMIC_FTRACE_WITH_REGS
 	select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY \
 		if DYNAMIC_FTRACE_WITH_REGS
 	select HAVE_EFFICIENT_UNALIGNED_ACCESS
diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h
index 1494cfa8639b..3a363d6a3bd0 100644
--- a/arch/arm64/include/asm/ftrace.h
+++ b/arch/arm64/include/asm/ftrace.h
@@ -78,6 +78,16 @@  static inline unsigned long ftrace_call_adjust(unsigned long addr)
 	return addr;
 }
 
+static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs,
+						 unsigned long addr)
+{
+	/*
+	 * Place custom trampoline address in regs->orig_x0 to let ftrace
+	 * trampoline jump to it.
+	 */
+	regs->orig_x0 = addr;
+}
+
 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
 struct dyn_ftrace;
 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 1197e7679882..b1ed0bf01c59 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -80,6 +80,7 @@  int main(void)
   DEFINE(S_SDEI_TTBR1,		offsetof(struct pt_regs, sdei_ttbr1));
   DEFINE(S_PMR_SAVE,		offsetof(struct pt_regs, pmr_save));
   DEFINE(S_STACKFRAME,		offsetof(struct pt_regs, stackframe));
+  DEFINE(S_ORIG_X0,		offsetof(struct pt_regs, orig_x0));
   DEFINE(PT_REGS_SIZE,		sizeof(struct pt_regs));
   BLANK();
 #ifdef CONFIG_COMPAT
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index e535480a4069..b1bd6576f205 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -60,6 +60,9 @@ 
 	str	x29, [sp, #S_FP]
 	.endif
 
+	/* Set orig_x0 to zero  */
+	str     xzr, [sp, #S_ORIG_X0]
+
 	/* Save the callsite's SP and LR */
 	add	x10, sp, #(PT_REGS_SIZE + 16)
 	stp	x9, x10, [sp, #S_LR]
@@ -119,12 +122,21 @@  ftrace_common_return:
 	/* Restore the callsite's FP, LR, PC */
 	ldr	x29, [sp, #S_FP]
 	ldr	x30, [sp, #S_LR]
-	ldr	x9, [sp, #S_PC]
-
+	ldr	x10, [sp, #S_PC]
+
+	ldr	x11, [sp, #S_ORIG_X0]
+	cbz	x11, 1f
+	/* Set x9 to parent ip before jump to bpf trampoline */
+	mov	x9,  x30
+	/* Set lr to self ip */
+	ldr	x30, [sp, #S_PC]
+	/* Set x10 (used for return address) to bpf trampoline */
+	mov	x10, x11
+1:
 	/* Restore the callsite's SP */
 	add	sp, sp, #PT_REGS_SIZE + 16
 
-	ret	x9
+	ret	x10
 SYM_CODE_END(ftrace_common)
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER