diff mbox series

[RFC,bpf-next,v3,1/2] bpf, x64: Fix tailcall infinite loop

Message ID 20230825145216.56660-2-hffilwlqm@gmail.com (mailing list archive)
State Superseded
Delegated to: BPF
Headers show
Series bpf, x64: Fix tailcall infinite loop | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-VM_Test-0 success Logs for ShellCheck
bpf/vmtest-bpf-next-PR success PR summary
bpf/vmtest-bpf-next-VM_Test-5 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-1 success Logs for build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-3 success Logs for build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-4 success Logs for build for x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-7 pending Logs for test_maps on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-2 success Logs for build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-27 success Logs for test_verifier on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-26 success Logs for test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-24 success Logs for test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-21 success Logs for test_progs_parallel on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-28 success Logs for veristat
bpf/vmtest-bpf-next-VM_Test-13 success Logs for test_progs on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-25 success Logs for test_verifier on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-16 success Logs for test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-12 success Logs for test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-8 success Logs for test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-9 success Logs for test_maps on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-10 success Logs for test_progs on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-18 success Logs for test_progs_no_alu32_parallel on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-22 success Logs for test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-19 success Logs for test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-20 success Logs for test_progs_no_alu32_parallel on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-14 success Logs for test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-6 success Logs for test_maps on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-23 success Logs for test_progs_parallel on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-17 success Logs for test_progs_no_alu32 on x86_64 with llvm-16
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for bpf-next, async
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 2829 this patch: 2829
netdev/cc_maintainers warning 16 maintainers not CCed: tglx@linutronix.de hpa@zytor.com dsahern@kernel.org mingo@redhat.com kpsingh@kernel.org x86@kernel.org john.fastabend@gmail.com sdf@google.com netdev@vger.kernel.org martin.lau@linux.dev yonghong.song@linux.dev dave.hansen@linux.intel.com davem@davemloft.net jolsa@kernel.org haoluo@google.com bp@alien8.de
netdev/build_clang success Errors and warnings before: 1526 this patch: 1526
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 2857 this patch: 2857
netdev/checkpatch warning WARNING: line length of 82 exceeds 80 columns
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline fail Was 0 now: 1
bpf/vmtest-bpf-next-VM_Test-15 fail Logs for test_progs_no_alu32 on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-11 fail Logs for test_progs on s390x with gcc

Commit Message

Leon Hwang Aug. 25, 2023, 2:52 p.m. UTC
From commit ebf7d1f508a73871 ("bpf, x64: rework pro/epilogue and tailcall
handling in JIT"), the tailcall on x64 works better than before.

From commit e411901c0b775a3a ("bpf: allow for tailcalls in BPF subprograms
for x64 JIT"), tailcall is able to run in BPF subprograms on x64.

From commit 5b92a28aae4dd0f8 ("bpf: Support attaching tracing BPF program
to other BPF programs"), BPF program is able to trace other BPF programs.

How about combining them all together?

1. FENTRY/FEXIT on a BPF subprogram.
2. A tailcall runs in the BPF subprogram.
3. The tailcall calls itself.

As a result, a tailcall infinite loop comes up. And the loop would halt
the machine.

As we know, in tail call context, the tail_call_cnt propagates by stack
and rax register between BPF subprograms. So do it in trampolines.

Signed-off-by: Leon Hwang <hffilwlqm@gmail.com>
---
 arch/x86/net/bpf_jit_comp.c | 32 ++++++++++++++++++++++++++------
 include/linux/bpf.h         |  5 +++++
 kernel/bpf/trampoline.c     |  4 ++--
 kernel/bpf/verifier.c       | 30 +++++++++++++++++++++++-------
 4 files changed, 56 insertions(+), 15 deletions(-)

Comments

Fijalkowski, Maciej Aug. 25, 2023, 5:58 p.m. UTC | #1
On Fri, Aug 25, 2023 at 10:52:15PM +0800, Leon Hwang wrote:
> From commit ebf7d1f508a73871 ("bpf, x64: rework pro/epilogue and tailcall
> handling in JIT"), the tailcall on x64 works better than before.
> 
> From commit e411901c0b775a3a ("bpf: allow for tailcalls in BPF subprograms
> for x64 JIT"), tailcall is able to run in BPF subprograms on x64.
> 
> From commit 5b92a28aae4dd0f8 ("bpf: Support attaching tracing BPF program
> to other BPF programs"), BPF program is able to trace other BPF programs.
> 
> How about combining them all together?
> 
> 1. FENTRY/FEXIT on a BPF subprogram.
> 2. A tailcall runs in the BPF subprogram.
> 3. The tailcall calls itself.

I would be interested in seeing broken asm code TBH :)

> 
> As a result, a tailcall infinite loop comes up. And the loop would halt
> the machine.
> 
> As we know, in tail call context, the tail_call_cnt propagates by stack
> and rax register between BPF subprograms. So do it in trampolines.
> 
> Signed-off-by: Leon Hwang <hffilwlqm@gmail.com>
> ---
>  arch/x86/net/bpf_jit_comp.c | 32 ++++++++++++++++++++++++++------
>  include/linux/bpf.h         |  5 +++++
>  kernel/bpf/trampoline.c     |  4 ++--
>  kernel/bpf/verifier.c       | 30 +++++++++++++++++++++++-------
>  4 files changed, 56 insertions(+), 15 deletions(-)
> 
> diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
> index a5930042139d3..2846c21d75bfa 100644
> --- a/arch/x86/net/bpf_jit_comp.c
> +++ b/arch/x86/net/bpf_jit_comp.c
> @@ -303,8 +303,12 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
>  	prog += X86_PATCH_SIZE;
>  	if (!ebpf_from_cbpf) {
>  		if (tail_call_reachable && !is_subprog)
> +			/* When it's the entry of the whole tailcall context,
> +			 * zeroing rax means initialising tail_call_cnt.
> +			 */
>  			EMIT2(0x31, 0xC0); /* xor eax, eax */
>  		else
> +			/* Keep the same instruction layout. */

While these comments are helpful I have mixed feelings about them residing
in this patch - rule of thumb to me is to keep the fixes as small as
possible.

>  			EMIT2(0x66, 0x90); /* nop2 */
>  	}
>  	EMIT1(0x55);             /* push rbp */
> @@ -1018,6 +1022,10 @@ static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op)
>  
>  #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
>  
> +/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
> +#define RESTORE_TAIL_CALL_CNT(stack)				\
> +	EMIT3_off32(0x48, 0x8B, 0x85, -round_up(stack, 8) - 8)
> +
>  static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image,
>  		  int oldproglen, struct jit_context *ctx, bool jmp_padding)
>  {
> @@ -1623,9 +1631,7 @@ st:			if (is_imm8(insn->off))
>  
>  			func = (u8 *) __bpf_call_base + imm32;
>  			if (tail_call_reachable) {
> -				/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
> -				EMIT3_off32(0x48, 0x8B, 0x85,
> -					    -round_up(bpf_prog->aux->stack_depth, 8) - 8);
> +				RESTORE_TAIL_CALL_CNT(bpf_prog->aux->stack_depth);
>  				if (!imm32)
>  					return -EINVAL;
>  				offs = 7 + x86_call_depth_emit_accounting(&prog, func);
> @@ -2400,6 +2406,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
>  	 *                     [ ...        ]
>  	 *                     [ stack_arg2 ]
>  	 * RBP - arg_stack_off [ stack_arg1 ]
> +	 * RSP                 [ tail_call_cnt ] BPF_TRAMP_F_TAIL_CALL_CTX
>  	 */
>  
>  	/* room for return value of orig_call or fentry prog */
> @@ -2464,6 +2471,8 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
>  	else
>  		/* sub rsp, stack_size */
>  		EMIT4(0x48, 0x83, 0xEC, stack_size);
> +	if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
> +		EMIT1(0x50);		/* push rax */
>  	/* mov QWORD PTR [rbp - rbx_off], rbx */
>  	emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_6, -rbx_off);
>  
> @@ -2516,9 +2525,15 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
>  		restore_regs(m, &prog, regs_off);
>  		save_args(m, &prog, arg_stack_off, true);
>  
> +		if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
> +			/* Before calling the original function, restore the
> +			 * tail_call_cnt from stack to rax.
> +			 */
> +			RESTORE_TAIL_CALL_CNT(stack_size);
> +
>  		if (flags & BPF_TRAMP_F_ORIG_STACK) {
> -			emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8);
> -			EMIT2(0xff, 0xd0); /* call *rax */
> +			emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, 8);
> +			EMIT2(0xff, 0xd3); /* call *rbx */
>  		} else {
>  			/* call original function */
>  			if (emit_rsb_call(&prog, orig_call, prog)) {
> @@ -2569,7 +2584,12 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
>  			ret = -EINVAL;
>  			goto cleanup;
>  		}
> -	}
> +	} else if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
> +		/* Before running the original function, restore the
> +		 * tail_call_cnt from stack to rax.
> +		 */
> +		RESTORE_TAIL_CALL_CNT(stack_size);
> +
>  	/* restore return value of orig_call or fentry prog back into RAX */
>  	if (save_ret)
>  		emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
> diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> index cfabbcf47bdb8..c8df257ea435d 100644
> --- a/include/linux/bpf.h
> +++ b/include/linux/bpf.h
> @@ -1028,6 +1028,11 @@ struct btf_func_model {
>   */
>  #define BPF_TRAMP_F_SHARE_IPMODIFY	BIT(6)
>  
> +/* Indicate that current trampoline is in a tail call context. Then, it has to
> + * cache and restore tail_call_cnt to avoid infinite tail call loop.
> + */
> +#define BPF_TRAMP_F_TAIL_CALL_CTX	BIT(7)
> +
>  /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
>   * bytes on x86.
>   */
> diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
> index 78acf28d48732..16ab5da7161f2 100644
> --- a/kernel/bpf/trampoline.c
> +++ b/kernel/bpf/trampoline.c
> @@ -415,8 +415,8 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mut
>  		goto out;
>  	}
>  
> -	/* clear all bits except SHARE_IPMODIFY */
> -	tr->flags &= BPF_TRAMP_F_SHARE_IPMODIFY;
> +	/* clear all bits except SHARE_IPMODIFY and TAIL_CALL_CTX */
> +	tr->flags &= (BPF_TRAMP_F_SHARE_IPMODIFY | BPF_TRAMP_F_TAIL_CALL_CTX);
>  
>  	if (tlinks[BPF_TRAMP_FEXIT].nr_links ||
>  	    tlinks[BPF_TRAMP_MODIFY_RETURN].nr_links) {
> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> index 4ccca1f6c9981..6f290bc6f5f19 100644
> --- a/kernel/bpf/verifier.c
> +++ b/kernel/bpf/verifier.c
> @@ -19246,6 +19246,21 @@ static int check_non_sleepable_error_inject(u32 btf_id)
>  	return btf_id_set_contains(&btf_non_sleepable_error_inject, btf_id);
>  }
>  
> +static inline int find_subprog_index(const struct bpf_prog *prog,

FWIW please no inlines in source files, but I don't currently follow the
need for that routine.

> +				     u32 btf_id)
> +{
> +	struct bpf_prog_aux *aux = prog->aux;
> +	int i, subprog = -1;
> +
> +	for (i = 0; i < aux->func_info_cnt; i++)
> +		if (aux->func_info[i].type_id == btf_id) {
> +			subprog = i;
> +			break;
> +		}
> +
> +	return subprog;
> +}
> +
>  int bpf_check_attach_target(struct bpf_verifier_log *log,
>  			    const struct bpf_prog *prog,
>  			    const struct bpf_prog *tgt_prog,
> @@ -19254,9 +19269,9 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
>  {
>  	bool prog_extension = prog->type == BPF_PROG_TYPE_EXT;
>  	const char prefix[] = "btf_trace_";
> -	int ret = 0, subprog = -1, i;
>  	const struct btf_type *t;
>  	bool conservative = true;
> +	int ret = 0, subprog;
>  	const char *tname;
>  	struct btf *btf;
>  	long addr = 0;
> @@ -19291,11 +19306,7 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
>  			return -EINVAL;
>  		}
>  
> -		for (i = 0; i < aux->func_info_cnt; i++)
> -			if (aux->func_info[i].type_id == btf_id) {
> -				subprog = i;
> -				break;
> -			}
> +		subprog = find_subprog_index(tgt_prog, btf_id);
>  		if (subprog == -1) {
>  			bpf_log(log, "Subprog %s doesn't exist\n", tname);
>  			return -EINVAL;
> @@ -19559,7 +19570,7 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
>  	struct bpf_attach_target_info tgt_info = {};
>  	u32 btf_id = prog->aux->attach_btf_id;
>  	struct bpf_trampoline *tr;
> -	int ret;
> +	int ret, subprog;
>  	u64 key;
>  
>  	if (prog->type == BPF_PROG_TYPE_SYSCALL) {
> @@ -19629,6 +19640,11 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
>  	if (!tr)
>  		return -ENOMEM;
>  
> +	if (tgt_prog && tgt_prog->aux->tail_call_reachable) {
> +		subprog = find_subprog_index(tgt_prog, btf_id);
> +		tr->flags = subprog > 0 ? BPF_TRAMP_F_TAIL_CALL_CTX : 0;
> +	}

I kinda forgot trampoline internals so please bear with me.
Here you are checking actually...what? That current program is a subprog
of tgt prog? My knee jerk reaction would be to propagate the
BPF_TRAMP_F_TAIL_CALL_CTX based on just tail_call_reachable, but I need
some more time to get my head around it again, sorry :<

> +
>  	prog->aux->dst_trampoline = tr;
>  	return 0;
>  }
> -- 
> 2.41.0
>
Leon Hwang Aug. 26, 2023, 4:03 a.m. UTC | #2
On 2023/8/26 01:58, Maciej Fijalkowski wrote:
> On Fri, Aug 25, 2023 at 10:52:15PM +0800, Leon Hwang wrote:
>> From commit ebf7d1f508a73871 ("bpf, x64: rework pro/epilogue and tailcall
>> handling in JIT"), the tailcall on x64 works better than before.
>>
>> From commit e411901c0b775a3a ("bpf: allow for tailcalls in BPF subprograms
>> for x64 JIT"), tailcall is able to run in BPF subprograms on x64.
>>
>> From commit 5b92a28aae4dd0f8 ("bpf: Support attaching tracing BPF program
>> to other BPF programs"), BPF program is able to trace other BPF programs.
>>
>> How about combining them all together?
>>
>> 1. FENTRY/FEXIT on a BPF subprogram.
>> 2. A tailcall runs in the BPF subprogram.
>> 3. The tailcall calls itself.
> 
> I would be interested in seeing broken asm code TBH :)
> 
>>
>> As a result, a tailcall infinite loop comes up. And the loop would halt
>> the machine.
>>
>> As we know, in tail call context, the tail_call_cnt propagates by stack
>> and rax register between BPF subprograms. So do it in trampolines.
>>
>> Signed-off-by: Leon Hwang <hffilwlqm@gmail.com>
>> ---
>>  arch/x86/net/bpf_jit_comp.c | 32 ++++++++++++++++++++++++++------
>>  include/linux/bpf.h         |  5 +++++
>>  kernel/bpf/trampoline.c     |  4 ++--
>>  kernel/bpf/verifier.c       | 30 +++++++++++++++++++++++-------
>>  4 files changed, 56 insertions(+), 15 deletions(-)
>>
>> diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
>> index a5930042139d3..2846c21d75bfa 100644
>> --- a/arch/x86/net/bpf_jit_comp.c
>> +++ b/arch/x86/net/bpf_jit_comp.c
>> @@ -303,8 +303,12 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
>>  	prog += X86_PATCH_SIZE;
>>  	if (!ebpf_from_cbpf) {
>>  		if (tail_call_reachable && !is_subprog)
>> +			/* When it's the entry of the whole tailcall context,
>> +			 * zeroing rax means initialising tail_call_cnt.
>> +			 */
>>  			EMIT2(0x31, 0xC0); /* xor eax, eax */
>>  		else
>> +			/* Keep the same instruction layout. */
> 
> While these comments are helpful I have mixed feelings about them residing
> in this patch - rule of thumb to me is to keep the fixes as small as
> possible.

Got it. I'll separate them into another patch.

Thanks for your rule of thumb.

> 
>>  			EMIT2(0x66, 0x90); /* nop2 */
>>  	}
>>  	EMIT1(0x55);             /* push rbp */
>> @@ -1018,6 +1022,10 @@ static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op)
>>  
>>  #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
>>  
>> +/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
>> +#define RESTORE_TAIL_CALL_CNT(stack)				\
>> +	EMIT3_off32(0x48, 0x8B, 0x85, -round_up(stack, 8) - 8)
>> +
>>  static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image,
>>  		  int oldproglen, struct jit_context *ctx, bool jmp_padding)
>>  {
>> @@ -1623,9 +1631,7 @@ st:			if (is_imm8(insn->off))
>>  
>>  			func = (u8 *) __bpf_call_base + imm32;
>>  			if (tail_call_reachable) {
>> -				/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
>> -				EMIT3_off32(0x48, 0x8B, 0x85,
>> -					    -round_up(bpf_prog->aux->stack_depth, 8) - 8);
>> +				RESTORE_TAIL_CALL_CNT(bpf_prog->aux->stack_depth);
>>  				if (!imm32)
>>  					return -EINVAL;
>>  				offs = 7 + x86_call_depth_emit_accounting(&prog, func);
>> @@ -2400,6 +2406,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
>>  	 *                     [ ...        ]
>>  	 *                     [ stack_arg2 ]
>>  	 * RBP - arg_stack_off [ stack_arg1 ]
>> +	 * RSP                 [ tail_call_cnt ] BPF_TRAMP_F_TAIL_CALL_CTX
>>  	 */
>>  
>>  	/* room for return value of orig_call or fentry prog */
>> @@ -2464,6 +2471,8 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
>>  	else
>>  		/* sub rsp, stack_size */
>>  		EMIT4(0x48, 0x83, 0xEC, stack_size);
>> +	if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
>> +		EMIT1(0x50);		/* push rax */
>>  	/* mov QWORD PTR [rbp - rbx_off], rbx */
>>  	emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_6, -rbx_off);
>>  
>> @@ -2516,9 +2525,15 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
>>  		restore_regs(m, &prog, regs_off);
>>  		save_args(m, &prog, arg_stack_off, true);
>>  
>> +		if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
>> +			/* Before calling the original function, restore the
>> +			 * tail_call_cnt from stack to rax.
>> +			 */
>> +			RESTORE_TAIL_CALL_CNT(stack_size);
>> +
>>  		if (flags & BPF_TRAMP_F_ORIG_STACK) {
>> -			emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8);
>> -			EMIT2(0xff, 0xd0); /* call *rax */
>> +			emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, 8);
>> +			EMIT2(0xff, 0xd3); /* call *rbx */
>>  		} else {
>>  			/* call original function */
>>  			if (emit_rsb_call(&prog, orig_call, prog)) {
>> @@ -2569,7 +2584,12 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
>>  			ret = -EINVAL;
>>  			goto cleanup;
>>  		}
>> -	}
>> +	} else if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
>> +		/* Before running the original function, restore the
>> +		 * tail_call_cnt from stack to rax.
>> +		 */
>> +		RESTORE_TAIL_CALL_CNT(stack_size);
>> +
>>  	/* restore return value of orig_call or fentry prog back into RAX */
>>  	if (save_ret)
>>  		emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
>> diff --git a/include/linux/bpf.h b/include/linux/bpf.h
>> index cfabbcf47bdb8..c8df257ea435d 100644
>> --- a/include/linux/bpf.h
>> +++ b/include/linux/bpf.h
>> @@ -1028,6 +1028,11 @@ struct btf_func_model {
>>   */
>>  #define BPF_TRAMP_F_SHARE_IPMODIFY	BIT(6)
>>  
>> +/* Indicate that current trampoline is in a tail call context. Then, it has to
>> + * cache and restore tail_call_cnt to avoid infinite tail call loop.
>> + */
>> +#define BPF_TRAMP_F_TAIL_CALL_CTX	BIT(7)
>> +
>>  /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
>>   * bytes on x86.
>>   */
>> diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
>> index 78acf28d48732..16ab5da7161f2 100644
>> --- a/kernel/bpf/trampoline.c
>> +++ b/kernel/bpf/trampoline.c
>> @@ -415,8 +415,8 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mut
>>  		goto out;
>>  	}
>>  
>> -	/* clear all bits except SHARE_IPMODIFY */
>> -	tr->flags &= BPF_TRAMP_F_SHARE_IPMODIFY;
>> +	/* clear all bits except SHARE_IPMODIFY and TAIL_CALL_CTX */
>> +	tr->flags &= (BPF_TRAMP_F_SHARE_IPMODIFY | BPF_TRAMP_F_TAIL_CALL_CTX);
>>  
>>  	if (tlinks[BPF_TRAMP_FEXIT].nr_links ||
>>  	    tlinks[BPF_TRAMP_MODIFY_RETURN].nr_links) {
>> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
>> index 4ccca1f6c9981..6f290bc6f5f19 100644
>> --- a/kernel/bpf/verifier.c
>> +++ b/kernel/bpf/verifier.c
>> @@ -19246,6 +19246,21 @@ static int check_non_sleepable_error_inject(u32 btf_id)
>>  	return btf_id_set_contains(&btf_non_sleepable_error_inject, btf_id);
>>  }
>>  
>> +static inline int find_subprog_index(const struct bpf_prog *prog,
> 
> FWIW please no inlines in source files, but I don't currently follow the
> need for that routine.

Got it. It's unnecessary to inline it.

> 
>> +				     u32 btf_id)
>> +{
>> +	struct bpf_prog_aux *aux = prog->aux;
>> +	int i, subprog = -1;
>> +
>> +	for (i = 0; i < aux->func_info_cnt; i++)
>> +		if (aux->func_info[i].type_id == btf_id) {
>> +			subprog = i;
>> +			break;
>> +		}
>> +
>> +	return subprog;
>> +}
>> +
>>  int bpf_check_attach_target(struct bpf_verifier_log *log,
>>  			    const struct bpf_prog *prog,
>>  			    const struct bpf_prog *tgt_prog,
>> @@ -19254,9 +19269,9 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
>>  {
>>  	bool prog_extension = prog->type == BPF_PROG_TYPE_EXT;
>>  	const char prefix[] = "btf_trace_";
>> -	int ret = 0, subprog = -1, i;
>>  	const struct btf_type *t;
>>  	bool conservative = true;
>> +	int ret = 0, subprog;
>>  	const char *tname;
>>  	struct btf *btf;
>>  	long addr = 0;
>> @@ -19291,11 +19306,7 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
>>  			return -EINVAL;
>>  		}
>>  
>> -		for (i = 0; i < aux->func_info_cnt; i++)
>> -			if (aux->func_info[i].type_id == btf_id) {
>> -				subprog = i;
>> -				break;
>> -			}
>> +		subprog = find_subprog_index(tgt_prog, btf_id);
>>  		if (subprog == -1) {
>>  			bpf_log(log, "Subprog %s doesn't exist\n", tname);
>>  			return -EINVAL;
>> @@ -19559,7 +19570,7 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
>>  	struct bpf_attach_target_info tgt_info = {};
>>  	u32 btf_id = prog->aux->attach_btf_id;
>>  	struct bpf_trampoline *tr;
>> -	int ret;
>> +	int ret, subprog;
>>  	u64 key;
>>  
>>  	if (prog->type == BPF_PROG_TYPE_SYSCALL) {
>> @@ -19629,6 +19640,11 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
>>  	if (!tr)
>>  		return -ENOMEM;
>>  
>> +	if (tgt_prog && tgt_prog->aux->tail_call_reachable) {
>> +		subprog = find_subprog_index(tgt_prog, btf_id);
>> +		tr->flags = subprog > 0 ? BPF_TRAMP_F_TAIL_CALL_CTX : 0;
>> +	}
> 
> I kinda forgot trampoline internals so please bear with me.
> Here you are checking actually...what? That current program is a subprog
> of tgt prog? My knee jerk reaction would be to propagate the
> BPF_TRAMP_F_TAIL_CALL_CTX based on just tail_call_reachable, but I need
> some more time to get my head around it again, sorry :<

Yeah, that current program must be a subprog of tgt prog.

For example:

tailcall_subprog() {
  bpf_tail_call_static(&jmp_table, 0);
}

tailcall_prog() {
  tailcall_subprog();
}

prog() {
  bpf_tail_call_static(&jmp_table, 0);
}

jmp_table populates with tailcall_prog().

When do fentry on prog(), there's no tail_call_cnt for fentry to
propagate. As we can see in emit_prologue(), fentry runs before
initialising tail_call_cnt.

When do fentry on tailcall_prog()? NO, it's impossible to do fentry on
tailcall_prog(). Because the tailcall 'jmp' skips the fentry on
tailcall_prog().

And, when do fentry on tailcall_subprog(), fentry has to propagate
tail_call_cnt properly.

In conclusion, that current program must be a subprog of tgt prog.

Thanks,
Leon
Fijalkowski, Maciej Aug. 30, 2023, 10:49 p.m. UTC | #3
On Sat, Aug 26, 2023 at 12:03:12PM +0800, Leon Hwang wrote:
> 
> 
> On 2023/8/26 01:58, Maciej Fijalkowski wrote:
> > On Fri, Aug 25, 2023 at 10:52:15PM +0800, Leon Hwang wrote:
> >> From commit ebf7d1f508a73871 ("bpf, x64: rework pro/epilogue and tailcall
> >> handling in JIT"), the tailcall on x64 works better than before.
> >>
> >> From commit e411901c0b775a3a ("bpf: allow for tailcalls in BPF subprograms
> >> for x64 JIT"), tailcall is able to run in BPF subprograms on x64.
> >>
> >> From commit 5b92a28aae4dd0f8 ("bpf: Support attaching tracing BPF program
> >> to other BPF programs"), BPF program is able to trace other BPF programs.
> >>
> >> How about combining them all together?
> >>
> >> 1. FENTRY/FEXIT on a BPF subprogram.
> >> 2. A tailcall runs in the BPF subprogram.
> >> 3. The tailcall calls itself.
> > 
> > I would be interested in seeing broken asm code TBH :)
> > 
> >>
> >> As a result, a tailcall infinite loop comes up. And the loop would halt
> >> the machine.
> >>
> >> As we know, in tail call context, the tail_call_cnt propagates by stack
> >> and rax register between BPF subprograms. So do it in trampolines.
> >>
> >> Signed-off-by: Leon Hwang <hffilwlqm@gmail.com>
> >> ---
> >>  arch/x86/net/bpf_jit_comp.c | 32 ++++++++++++++++++++++++++------
> >>  include/linux/bpf.h         |  5 +++++
> >>  kernel/bpf/trampoline.c     |  4 ++--
> >>  kernel/bpf/verifier.c       | 30 +++++++++++++++++++++++-------
> >>  4 files changed, 56 insertions(+), 15 deletions(-)
> >>
> >> diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
> >> index a5930042139d3..2846c21d75bfa 100644
> >> --- a/arch/x86/net/bpf_jit_comp.c
> >> +++ b/arch/x86/net/bpf_jit_comp.c
> >> @@ -303,8 +303,12 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
> >>  	prog += X86_PATCH_SIZE;
> >>  	if (!ebpf_from_cbpf) {
> >>  		if (tail_call_reachable && !is_subprog)
> >> +			/* When it's the entry of the whole tailcall context,
> >> +			 * zeroing rax means initialising tail_call_cnt.
> >> +			 */
> >>  			EMIT2(0x31, 0xC0); /* xor eax, eax */
> >>  		else
> >> +			/* Keep the same instruction layout. */
> > 
> > While these comments are helpful I have mixed feelings about them residing
> > in this patch - rule of thumb to me is to keep the fixes as small as
> > possible.
> 
> Got it. I'll separate them into another patch.
> 
> Thanks for your rule of thumb.
> 
> > 
> >>  			EMIT2(0x66, 0x90); /* nop2 */
> >>  	}
> >>  	EMIT1(0x55);             /* push rbp */
> >> @@ -1018,6 +1022,10 @@ static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op)
> >>  
> >>  #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
> >>  
> >> +/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
> >> +#define RESTORE_TAIL_CALL_CNT(stack)				\
> >> +	EMIT3_off32(0x48, 0x8B, 0x85, -round_up(stack, 8) - 8)
> >> +
> >>  static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image,
> >>  		  int oldproglen, struct jit_context *ctx, bool jmp_padding)
> >>  {
> >> @@ -1623,9 +1631,7 @@ st:			if (is_imm8(insn->off))
> >>  
> >>  			func = (u8 *) __bpf_call_base + imm32;
> >>  			if (tail_call_reachable) {
> >> -				/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
> >> -				EMIT3_off32(0x48, 0x8B, 0x85,
> >> -					    -round_up(bpf_prog->aux->stack_depth, 8) - 8);
> >> +				RESTORE_TAIL_CALL_CNT(bpf_prog->aux->stack_depth);
> >>  				if (!imm32)
> >>  					return -EINVAL;
> >>  				offs = 7 + x86_call_depth_emit_accounting(&prog, func);
> >> @@ -2400,6 +2406,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
> >>  	 *                     [ ...        ]
> >>  	 *                     [ stack_arg2 ]
> >>  	 * RBP - arg_stack_off [ stack_arg1 ]
> >> +	 * RSP                 [ tail_call_cnt ] BPF_TRAMP_F_TAIL_CALL_CTX
> >>  	 */
> >>  
> >>  	/* room for return value of orig_call or fentry prog */
> >> @@ -2464,6 +2471,8 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
> >>  	else
> >>  		/* sub rsp, stack_size */
> >>  		EMIT4(0x48, 0x83, 0xEC, stack_size);
> >> +	if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
> >> +		EMIT1(0x50);		/* push rax */
> >>  	/* mov QWORD PTR [rbp - rbx_off], rbx */
> >>  	emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_6, -rbx_off);
> >>  
> >> @@ -2516,9 +2525,15 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
> >>  		restore_regs(m, &prog, regs_off);
> >>  		save_args(m, &prog, arg_stack_off, true);
> >>  
> >> +		if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
> >> +			/* Before calling the original function, restore the
> >> +			 * tail_call_cnt from stack to rax.
> >> +			 */
> >> +			RESTORE_TAIL_CALL_CNT(stack_size);
> >> +
> >>  		if (flags & BPF_TRAMP_F_ORIG_STACK) {
> >> -			emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8);
> >> -			EMIT2(0xff, 0xd0); /* call *rax */
> >> +			emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, 8);
> >> +			EMIT2(0xff, 0xd3); /* call *rbx */
> >>  		} else {
> >>  			/* call original function */
> >>  			if (emit_rsb_call(&prog, orig_call, prog)) {
> >> @@ -2569,7 +2584,12 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
> >>  			ret = -EINVAL;
> >>  			goto cleanup;
> >>  		}
> >> -	}
> >> +	} else if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
> >> +		/* Before running the original function, restore the
> >> +		 * tail_call_cnt from stack to rax.
> >> +		 */
> >> +		RESTORE_TAIL_CALL_CNT(stack_size);
> >> +
> >>  	/* restore return value of orig_call or fentry prog back into RAX */
> >>  	if (save_ret)
> >>  		emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
> >> diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> >> index cfabbcf47bdb8..c8df257ea435d 100644
> >> --- a/include/linux/bpf.h
> >> +++ b/include/linux/bpf.h
> >> @@ -1028,6 +1028,11 @@ struct btf_func_model {
> >>   */
> >>  #define BPF_TRAMP_F_SHARE_IPMODIFY	BIT(6)
> >>  
> >> +/* Indicate that current trampoline is in a tail call context. Then, it has to
> >> + * cache and restore tail_call_cnt to avoid infinite tail call loop.
> >> + */
> >> +#define BPF_TRAMP_F_TAIL_CALL_CTX	BIT(7)
> >> +
> >>  /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
> >>   * bytes on x86.
> >>   */
> >> diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
> >> index 78acf28d48732..16ab5da7161f2 100644
> >> --- a/kernel/bpf/trampoline.c
> >> +++ b/kernel/bpf/trampoline.c
> >> @@ -415,8 +415,8 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mut
> >>  		goto out;
> >>  	}
> >>  
> >> -	/* clear all bits except SHARE_IPMODIFY */
> >> -	tr->flags &= BPF_TRAMP_F_SHARE_IPMODIFY;
> >> +	/* clear all bits except SHARE_IPMODIFY and TAIL_CALL_CTX */
> >> +	tr->flags &= (BPF_TRAMP_F_SHARE_IPMODIFY | BPF_TRAMP_F_TAIL_CALL_CTX);
> >>  
> >>  	if (tlinks[BPF_TRAMP_FEXIT].nr_links ||
> >>  	    tlinks[BPF_TRAMP_MODIFY_RETURN].nr_links) {
> >> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> >> index 4ccca1f6c9981..6f290bc6f5f19 100644
> >> --- a/kernel/bpf/verifier.c
> >> +++ b/kernel/bpf/verifier.c
> >> @@ -19246,6 +19246,21 @@ static int check_non_sleepable_error_inject(u32 btf_id)
> >>  	return btf_id_set_contains(&btf_non_sleepable_error_inject, btf_id);
> >>  }
> >>  
> >> +static inline int find_subprog_index(const struct bpf_prog *prog,
> > 
> > FWIW please no inlines in source files, but I don't currently follow the
> > need for that routine.
> 
> Got it. It's unnecessary to inline it.
> 
> > 
> >> +				     u32 btf_id)
> >> +{
> >> +	struct bpf_prog_aux *aux = prog->aux;
> >> +	int i, subprog = -1;
> >> +
> >> +	for (i = 0; i < aux->func_info_cnt; i++)
> >> +		if (aux->func_info[i].type_id == btf_id) {
> >> +			subprog = i;
> >> +			break;
> >> +		}
> >> +
> >> +	return subprog;
> >> +}
> >> +
> >>  int bpf_check_attach_target(struct bpf_verifier_log *log,
> >>  			    const struct bpf_prog *prog,
> >>  			    const struct bpf_prog *tgt_prog,
> >> @@ -19254,9 +19269,9 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
> >>  {
> >>  	bool prog_extension = prog->type == BPF_PROG_TYPE_EXT;
> >>  	const char prefix[] = "btf_trace_";
> >> -	int ret = 0, subprog = -1, i;
> >>  	const struct btf_type *t;
> >>  	bool conservative = true;
> >> +	int ret = 0, subprog;
> >>  	const char *tname;
> >>  	struct btf *btf;
> >>  	long addr = 0;
> >> @@ -19291,11 +19306,7 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
> >>  			return -EINVAL;
> >>  		}
> >>  
> >> -		for (i = 0; i < aux->func_info_cnt; i++)
> >> -			if (aux->func_info[i].type_id == btf_id) {
> >> -				subprog = i;
> >> -				break;
> >> -			}
> >> +		subprog = find_subprog_index(tgt_prog, btf_id);
> >>  		if (subprog == -1) {
> >>  			bpf_log(log, "Subprog %s doesn't exist\n", tname);
> >>  			return -EINVAL;
> >> @@ -19559,7 +19570,7 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
> >>  	struct bpf_attach_target_info tgt_info = {};
> >>  	u32 btf_id = prog->aux->attach_btf_id;
> >>  	struct bpf_trampoline *tr;
> >> -	int ret;
> >> +	int ret, subprog;
> >>  	u64 key;
> >>  
> >>  	if (prog->type == BPF_PROG_TYPE_SYSCALL) {
> >> @@ -19629,6 +19640,11 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
> >>  	if (!tr)
> >>  		return -ENOMEM;
> >>  
> >> +	if (tgt_prog && tgt_prog->aux->tail_call_reachable) {
> >> +		subprog = find_subprog_index(tgt_prog, btf_id);
> >> +		tr->flags = subprog > 0 ? BPF_TRAMP_F_TAIL_CALL_CTX : 0;
> >> +	}
> > 
> > I kinda forgot trampoline internals so please bear with me.
> > Here you are checking actually...what? That current program is a subprog
> > of tgt prog? My knee jerk reaction would be to propagate the
> > BPF_TRAMP_F_TAIL_CALL_CTX based on just tail_call_reachable, but I need
> > some more time to get my head around it again, sorry :<
> 
> Yeah, that current program must be a subprog of tgt prog.
> 
> For example:
> 
> tailcall_subprog() {
>   bpf_tail_call_static(&jmp_table, 0);
> }
> 
> tailcall_prog() {
>   tailcall_subprog();
> }
> 
> prog() {
>   bpf_tail_call_static(&jmp_table, 0);
> }
> 
> jmp_table populates with tailcall_prog().
> 
> When do fentry on prog(), there's no tail_call_cnt for fentry to
> propagate. As we can see in emit_prologue(), fentry runs before
> initialising tail_call_cnt.
> 
> When do fentry on tailcall_prog()? NO, it's impossible to do fentry on
> tailcall_prog(). Because the tailcall 'jmp' skips the fentry on
> tailcall_prog().
> 
> And, when do fentry on tailcall_subprog(), fentry has to propagate
> tail_call_cnt properly.
> 
> In conclusion, that current program must be a subprog of tgt prog.

Verifier propagates the info about tail call usage through whole call
chain on a given prog so this doesn't really matter to me where do we
attach fentry progs. All I'm saying is:

	if (tgt_prog && tgt_prog->aux->tail_call_reachable)
		tr->flags = BPF_TRAMP_F_TAIL_CALL_CTX;

should be just fine. I might be missing something but with above your
selftest does not hang my system.

> 
> Thanks,
> Leon
>
Leon Hwang Aug. 31, 2023, 1:12 p.m. UTC | #4
On 2023/8/31 06:49, Maciej Fijalkowski wrote:
> On Sat, Aug 26, 2023 at 12:03:12PM +0800, Leon Hwang wrote:
>>
>>
>> On 2023/8/26 01:58, Maciej Fijalkowski wrote:
>>> On Fri, Aug 25, 2023 at 10:52:15PM +0800, Leon Hwang wrote:
>>>> From commit ebf7d1f508a73871 ("bpf, x64: rework pro/epilogue and tailcall
>>>> handling in JIT"), the tailcall on x64 works better than before.
>>>>
>>>> From commit e411901c0b775a3a ("bpf: allow for tailcalls in BPF subprograms
>>>> for x64 JIT"), tailcall is able to run in BPF subprograms on x64.
>>>>
>>>> From commit 5b92a28aae4dd0f8 ("bpf: Support attaching tracing BPF program
>>>> to other BPF programs"), BPF program is able to trace other BPF programs.
>>>>
>>>> How about combining them all together?
>>>>
>>>> 1. FENTRY/FEXIT on a BPF subprogram.
>>>> 2. A tailcall runs in the BPF subprogram.
>>>> 3. The tailcall calls itself.
>>>
>>> I would be interested in seeing broken asm code TBH :)
>>>
>>>>
>>>> As a result, a tailcall infinite loop comes up. And the loop would halt
>>>> the machine.
>>>>
>>>> As we know, in tail call context, the tail_call_cnt propagates by stack
>>>> and rax register between BPF subprograms. So do it in trampolines.
>>>>
>>>> Signed-off-by: Leon Hwang <hffilwlqm@gmail.com>
>>>> ---
>>>>  arch/x86/net/bpf_jit_comp.c | 32 ++++++++++++++++++++++++++------
>>>>  include/linux/bpf.h         |  5 +++++
>>>>  kernel/bpf/trampoline.c     |  4 ++--
>>>>  kernel/bpf/verifier.c       | 30 +++++++++++++++++++++++-------
>>>>  4 files changed, 56 insertions(+), 15 deletions(-)
>>>>

[SNIP]

>>>>  
>>>> +	if (tgt_prog && tgt_prog->aux->tail_call_reachable) {
>>>> +		subprog = find_subprog_index(tgt_prog, btf_id);
>>>> +		tr->flags = subprog > 0 ? BPF_TRAMP_F_TAIL_CALL_CTX : 0;
>>>> +	}
>>>
>>> I kinda forgot trampoline internals so please bear with me.
>>> Here you are checking actually...what? That current program is a subprog
>>> of tgt prog? My knee jerk reaction would be to propagate the
>>> BPF_TRAMP_F_TAIL_CALL_CTX based on just tail_call_reachable, but I need
>>> some more time to get my head around it again, sorry :<
>>
>> Yeah, that current program must be a subprog of tgt prog.
>>
>> For example:
>>
>> tailcall_subprog() {
>>   bpf_tail_call_static(&jmp_table, 0);
>> }
>>
>> tailcall_prog() {
>>   tailcall_subprog();
>> }
>>
>> prog() {
>>   bpf_tail_call_static(&jmp_table, 0);
>> }
>>
>> jmp_table populates with tailcall_prog().
>>
>> When do fentry on prog(), there's no tail_call_cnt for fentry to
>> propagate. As we can see in emit_prologue(), fentry runs before
>> initialising tail_call_cnt.
>>
>> When do fentry on tailcall_prog()? NO, it's impossible to do fentry on
>> tailcall_prog(). Because the tailcall 'jmp' skips the fentry on
>> tailcall_prog().
>>
>> And, when do fentry on tailcall_subprog(), fentry has to propagate
>> tail_call_cnt properly.
>>
>> In conclusion, that current program must be a subprog of tgt prog.
> 
> Verifier propagates the info about tail call usage through whole call
> chain on a given prog so this doesn't really matter to me where do we
> attach fentry progs. All I'm saying is:
> 
> 	if (tgt_prog && tgt_prog->aux->tail_call_reachable)
> 		tr->flags = BPF_TRAMP_F_TAIL_CALL_CTX;
> 
> should be just fine. I might be missing something but with above your
> selftest does not hang my system.

I think it's unnecessary to propagate tail call usage info when do
fentry on prog(), which is the entry of the whole tail call context. If
do propagate in this case, it's meaningless to execute two extra
instructions.

I confirm that the above selftest is able to hang VM. I copy test_progs
along with tailcall*.bpf.o to another VM, which is Ubuntu 22.04.3 with
kernel 5.15.0-82-generic, then run ./test_progs -t tailcalls. And then
the VM hangs.

Here's the Ubuntu 22.04.3 VM info:
# uname -a
Linux hwang 5.15.0-82-generic #91-Ubuntu SMP Mon Aug 14 14:14:14 UTC
2023 x86_64 x86_64 x86_64 GNU/Linux

Thanks,
Leon
Leon Hwang Aug. 31, 2023, 2:44 p.m. UTC | #5
On 2023/8/31 21:12, Leon Hwang wrote:
> 
> 
> On 2023/8/31 06:49, Maciej Fijalkowski wrote:
>> On Sat, Aug 26, 2023 at 12:03:12PM +0800, Leon Hwang wrote:
>>>
>>>
>>> On 2023/8/26 01:58, Maciej Fijalkowski wrote:
>>>> On Fri, Aug 25, 2023 at 10:52:15PM +0800, Leon Hwang wrote:
>>>>> From commit ebf7d1f508a73871 ("bpf, x64: rework pro/epilogue and tailcall
>>>>> handling in JIT"), the tailcall on x64 works better than before.
>>>>>
>>>>> From commit e411901c0b775a3a ("bpf: allow for tailcalls in BPF subprograms
>>>>> for x64 JIT"), tailcall is able to run in BPF subprograms on x64.
>>>>>
>>>>> From commit 5b92a28aae4dd0f8 ("bpf: Support attaching tracing BPF program
>>>>> to other BPF programs"), BPF program is able to trace other BPF programs.
>>>>>
>>>>> How about combining them all together?
>>>>>
>>>>> 1. FENTRY/FEXIT on a BPF subprogram.
>>>>> 2. A tailcall runs in the BPF subprogram.
>>>>> 3. The tailcall calls itself.
>>>>
>>>> I would be interested in seeing broken asm code TBH :)
>>>>
>>>>>
>>>>> As a result, a tailcall infinite loop comes up. And the loop would halt
>>>>> the machine.
>>>>>
>>>>> As we know, in tail call context, the tail_call_cnt propagates by stack
>>>>> and rax register between BPF subprograms. So do it in trampolines.
>>>>>
>>>>> Signed-off-by: Leon Hwang <hffilwlqm@gmail.com>
>>>>> ---
>>>>>  arch/x86/net/bpf_jit_comp.c | 32 ++++++++++++++++++++++++++------
>>>>>  include/linux/bpf.h         |  5 +++++
>>>>>  kernel/bpf/trampoline.c     |  4 ++--
>>>>>  kernel/bpf/verifier.c       | 30 +++++++++++++++++++++++-------
>>>>>  4 files changed, 56 insertions(+), 15 deletions(-)
>>>>>
> 
> [SNIP]
> 
>>>>>  
>>>>> +	if (tgt_prog && tgt_prog->aux->tail_call_reachable) {
>>>>> +		subprog = find_subprog_index(tgt_prog, btf_id);
>>>>> +		tr->flags = subprog > 0 ? BPF_TRAMP_F_TAIL_CALL_CTX : 0;
>>>>> +	}
>>>>
>>>> I kinda forgot trampoline internals so please bear with me.
>>>> Here you are checking actually...what? That current program is a subprog
>>>> of tgt prog? My knee jerk reaction would be to propagate the
>>>> BPF_TRAMP_F_TAIL_CALL_CTX based on just tail_call_reachable, but I need
>>>> some more time to get my head around it again, sorry :<
>>>
>>> Yeah, that current program must be a subprog of tgt prog.
>>>
>>> For example:
>>>
>>> tailcall_subprog() {
>>>   bpf_tail_call_static(&jmp_table, 0);
>>> }
>>>
>>> tailcall_prog() {
>>>   tailcall_subprog();
>>> }
>>>
>>> prog() {
>>>   bpf_tail_call_static(&jmp_table, 0);
>>> }
>>>
>>> jmp_table populates with tailcall_prog().
>>>
>>> When do fentry on prog(), there's no tail_call_cnt for fentry to
>>> propagate. As we can see in emit_prologue(), fentry runs before
>>> initialising tail_call_cnt.
>>>
>>> When do fentry on tailcall_prog()? NO, it's impossible to do fentry on
>>> tailcall_prog(). Because the tailcall 'jmp' skips the fentry on
>>> tailcall_prog().
>>>
>>> And, when do fentry on tailcall_subprog(), fentry has to propagate
>>> tail_call_cnt properly.
>>>
>>> In conclusion, that current program must be a subprog of tgt prog.
>>
>> Verifier propagates the info about tail call usage through whole call
>> chain on a given prog so this doesn't really matter to me where do we
>> attach fentry progs. All I'm saying is:
>>
>> 	if (tgt_prog && tgt_prog->aux->tail_call_reachable)
>> 		tr->flags = BPF_TRAMP_F_TAIL_CALL_CTX;
>>
>> should be just fine. I might be missing something but with above your
>> selftest does not hang my system.
> 
> I think it's unnecessary to propagate tail call usage info when do
> fentry on prog(), which is the entry of the whole tail call context. If
> do propagate in this case, it's meaningless to execute two extra
> instructions.

Because it's harmless, I agree with you. I'll change it to

 	if (tgt_prog && tgt_prog->aux->tail_call_reachable)
 		tr->flags = BPF_TRAMP_F_TAIL_CALL_CTX;

With this update, it's easier to understand BPF_TRAMP_F_TAIL_CALL_CTX.

> 
> I confirm that the above selftest is able to hang VM. I copy test_progs
> along with tailcall*.bpf.o to another VM, which is Ubuntu 22.04.3 with
> kernel 5.15.0-82-generic, then run ./test_progs -t tailcalls. And then
> the VM hangs.
> 
> Here's the Ubuntu 22.04.3 VM info:
> # uname -a
> Linux hwang 5.15.0-82-generic #91-Ubuntu SMP Mon Aug 14 14:14:14 UTC
> 2023 x86_64 x86_64 x86_64 GNU/Linux

What I suggest here is to run the selftest in the second patch, not the
above example.

Thanks,
Leon
diff mbox series

Patch

diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index a5930042139d3..2846c21d75bfa 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -303,8 +303,12 @@  static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
 	prog += X86_PATCH_SIZE;
 	if (!ebpf_from_cbpf) {
 		if (tail_call_reachable && !is_subprog)
+			/* When it's the entry of the whole tailcall context,
+			 * zeroing rax means initialising tail_call_cnt.
+			 */
 			EMIT2(0x31, 0xC0); /* xor eax, eax */
 		else
+			/* Keep the same instruction layout. */
 			EMIT2(0x66, 0x90); /* nop2 */
 	}
 	EMIT1(0x55);             /* push rbp */
@@ -1018,6 +1022,10 @@  static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op)
 
 #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
 
+/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
+#define RESTORE_TAIL_CALL_CNT(stack)				\
+	EMIT3_off32(0x48, 0x8B, 0x85, -round_up(stack, 8) - 8)
+
 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image,
 		  int oldproglen, struct jit_context *ctx, bool jmp_padding)
 {
@@ -1623,9 +1631,7 @@  st:			if (is_imm8(insn->off))
 
 			func = (u8 *) __bpf_call_base + imm32;
 			if (tail_call_reachable) {
-				/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
-				EMIT3_off32(0x48, 0x8B, 0x85,
-					    -round_up(bpf_prog->aux->stack_depth, 8) - 8);
+				RESTORE_TAIL_CALL_CNT(bpf_prog->aux->stack_depth);
 				if (!imm32)
 					return -EINVAL;
 				offs = 7 + x86_call_depth_emit_accounting(&prog, func);
@@ -2400,6 +2406,7 @@  int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
 	 *                     [ ...        ]
 	 *                     [ stack_arg2 ]
 	 * RBP - arg_stack_off [ stack_arg1 ]
+	 * RSP                 [ tail_call_cnt ] BPF_TRAMP_F_TAIL_CALL_CTX
 	 */
 
 	/* room for return value of orig_call or fentry prog */
@@ -2464,6 +2471,8 @@  int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
 	else
 		/* sub rsp, stack_size */
 		EMIT4(0x48, 0x83, 0xEC, stack_size);
+	if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
+		EMIT1(0x50);		/* push rax */
 	/* mov QWORD PTR [rbp - rbx_off], rbx */
 	emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_6, -rbx_off);
 
@@ -2516,9 +2525,15 @@  int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
 		restore_regs(m, &prog, regs_off);
 		save_args(m, &prog, arg_stack_off, true);
 
+		if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
+			/* Before calling the original function, restore the
+			 * tail_call_cnt from stack to rax.
+			 */
+			RESTORE_TAIL_CALL_CNT(stack_size);
+
 		if (flags & BPF_TRAMP_F_ORIG_STACK) {
-			emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8);
-			EMIT2(0xff, 0xd0); /* call *rax */
+			emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, 8);
+			EMIT2(0xff, 0xd3); /* call *rbx */
 		} else {
 			/* call original function */
 			if (emit_rsb_call(&prog, orig_call, prog)) {
@@ -2569,7 +2584,12 @@  int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
 			ret = -EINVAL;
 			goto cleanup;
 		}
-	}
+	} else if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
+		/* Before running the original function, restore the
+		 * tail_call_cnt from stack to rax.
+		 */
+		RESTORE_TAIL_CALL_CNT(stack_size);
+
 	/* restore return value of orig_call or fentry prog back into RAX */
 	if (save_ret)
 		emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index cfabbcf47bdb8..c8df257ea435d 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1028,6 +1028,11 @@  struct btf_func_model {
  */
 #define BPF_TRAMP_F_SHARE_IPMODIFY	BIT(6)
 
+/* Indicate that current trampoline is in a tail call context. Then, it has to
+ * cache and restore tail_call_cnt to avoid infinite tail call loop.
+ */
+#define BPF_TRAMP_F_TAIL_CALL_CTX	BIT(7)
+
 /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
  * bytes on x86.
  */
diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
index 78acf28d48732..16ab5da7161f2 100644
--- a/kernel/bpf/trampoline.c
+++ b/kernel/bpf/trampoline.c
@@ -415,8 +415,8 @@  static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mut
 		goto out;
 	}
 
-	/* clear all bits except SHARE_IPMODIFY */
-	tr->flags &= BPF_TRAMP_F_SHARE_IPMODIFY;
+	/* clear all bits except SHARE_IPMODIFY and TAIL_CALL_CTX */
+	tr->flags &= (BPF_TRAMP_F_SHARE_IPMODIFY | BPF_TRAMP_F_TAIL_CALL_CTX);
 
 	if (tlinks[BPF_TRAMP_FEXIT].nr_links ||
 	    tlinks[BPF_TRAMP_MODIFY_RETURN].nr_links) {
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 4ccca1f6c9981..6f290bc6f5f19 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -19246,6 +19246,21 @@  static int check_non_sleepable_error_inject(u32 btf_id)
 	return btf_id_set_contains(&btf_non_sleepable_error_inject, btf_id);
 }
 
+static inline int find_subprog_index(const struct bpf_prog *prog,
+				     u32 btf_id)
+{
+	struct bpf_prog_aux *aux = prog->aux;
+	int i, subprog = -1;
+
+	for (i = 0; i < aux->func_info_cnt; i++)
+		if (aux->func_info[i].type_id == btf_id) {
+			subprog = i;
+			break;
+		}
+
+	return subprog;
+}
+
 int bpf_check_attach_target(struct bpf_verifier_log *log,
 			    const struct bpf_prog *prog,
 			    const struct bpf_prog *tgt_prog,
@@ -19254,9 +19269,9 @@  int bpf_check_attach_target(struct bpf_verifier_log *log,
 {
 	bool prog_extension = prog->type == BPF_PROG_TYPE_EXT;
 	const char prefix[] = "btf_trace_";
-	int ret = 0, subprog = -1, i;
 	const struct btf_type *t;
 	bool conservative = true;
+	int ret = 0, subprog;
 	const char *tname;
 	struct btf *btf;
 	long addr = 0;
@@ -19291,11 +19306,7 @@  int bpf_check_attach_target(struct bpf_verifier_log *log,
 			return -EINVAL;
 		}
 
-		for (i = 0; i < aux->func_info_cnt; i++)
-			if (aux->func_info[i].type_id == btf_id) {
-				subprog = i;
-				break;
-			}
+		subprog = find_subprog_index(tgt_prog, btf_id);
 		if (subprog == -1) {
 			bpf_log(log, "Subprog %s doesn't exist\n", tname);
 			return -EINVAL;
@@ -19559,7 +19570,7 @@  static int check_attach_btf_id(struct bpf_verifier_env *env)
 	struct bpf_attach_target_info tgt_info = {};
 	u32 btf_id = prog->aux->attach_btf_id;
 	struct bpf_trampoline *tr;
-	int ret;
+	int ret, subprog;
 	u64 key;
 
 	if (prog->type == BPF_PROG_TYPE_SYSCALL) {
@@ -19629,6 +19640,11 @@  static int check_attach_btf_id(struct bpf_verifier_env *env)
 	if (!tr)
 		return -ENOMEM;
 
+	if (tgt_prog && tgt_prog->aux->tail_call_reachable) {
+		subprog = find_subprog_index(tgt_prog, btf_id);
+		tr->flags = subprog > 0 ? BPF_TRAMP_F_TAIL_CALL_CTX : 0;
+	}
+
 	prog->aux->dst_trampoline = tr;
 	return 0;
 }