Message ID | 20240702121944.1091530-2-pulehui@huaweicloud.com (mailing list archive) |
---|---|
State | Handled Elsewhere, archived |
Headers | show |
Series | Add 12-argument support for RV64 bpf trampoline | expand |
Context | Check | Description |
---|---|---|
conchuod/vmtest-fixes-PR | fail | merge-conflict |
Pu Lehui <pulehui@huaweicloud.com> writes: > From: Pu Lehui <pulehui@huawei.com> > > This patch adds 12 function arguments support for riscv64 bpf > trampoline. The current bpf trampoline supports <= sizeof(u64) bytes > scalar arguments [0] and <= 16 bytes struct arguments [1]. Therefore, we > focus on the situation where scalars are at most XLEN bits and > aggregates whose total size does not exceed 2×XLEN bits in the riscv > calling convention [2]. > > Link: https://elixir.bootlin.com/linux/v6.8/source/kernel/bpf/btf.c#L6184 [0] > Link: https://elixir.bootlin.com/linux/v6.8/source/kernel/bpf/btf.c#L6769 [1] > Link: https://github.com/riscv-non-isa/riscv-elf-psabi-doc/releases/download/draft-20230929-e5c800e661a53efe3c2678d71a306323b60eb13b/riscv-abi.pdf [2] > Signed-off-by: Pu Lehui <pulehui@huawei.com> > Acked-by: Björn Töpel <bjorn@kernel.org> > Reviewed-by: Björn Töpel <bjorn@rivosinc.com> Acked-by: Puranjay Mohan <puranjay@kernel.org> Thanks, Puranjay
Pu Lehui <pulehui@huaweicloud.com> writes: > From: Pu Lehui <pulehui@huawei.com> > > This patch adds 12 function arguments support for riscv64 bpf > trampoline. The current bpf trampoline supports <= sizeof(u64) bytes > scalar arguments [0] and <= 16 bytes struct arguments [1]. Therefore, we > focus on the situation where scalars are at most XLEN bits and > aggregates whose total size does not exceed 2×XLEN bits in the riscv > calling convention [2]. > > Link: https://elixir.bootlin.com/linux/v6.8/source/kernel/bpf/btf.c#L6184 [0] > Link: https://elixir.bootlin.com/linux/v6.8/source/kernel/bpf/btf.c#L6769 [1] > Link: https://github.com/riscv-non-isa/riscv-elf-psabi-doc/releases/download/draft-20230929-e5c800e661a53efe3c2678d71a306323b60eb13b/riscv-abi.pdf [2] > Signed-off-by: Pu Lehui <pulehui@huawei.com> > Acked-by: Björn Töpel <bjorn@kernel.org> > Reviewed-by: Björn Töpel <bjorn@rivosinc.com> > --- > arch/riscv/net/bpf_jit_comp64.c | 66 +++++++++++++++++++++++---------- > 1 file changed, 47 insertions(+), 19 deletions(-) > > diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c > index 351e1484205e..685c7389ae7e 100644 > --- a/arch/riscv/net/bpf_jit_comp64.c > +++ b/arch/riscv/net/bpf_jit_comp64.c > @@ -15,6 +15,7 @@ > #include <asm/percpu.h> > #include "bpf_jit.h" > > +#define RV_MAX_REG_ARGS 8 > #define RV_FENTRY_NINSNS 2 > /* imm that allows emit_imm to emit max count insns */ > #define RV_MAX_COUNT_IMM 0x7FFF7FF7FF7FF7FF > @@ -692,26 +693,45 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type, > return ret; > } > > -static void store_args(int nregs, int args_off, struct rv_jit_context *ctx) > +static void store_args(int nr_arg_slots, int args_off, struct rv_jit_context *ctx) > { > int i; > > - for (i = 0; i < nregs; i++) { > - emit_sd(RV_REG_FP, -args_off, RV_REG_A0 + i, ctx); > + for (i = 0; i < nr_arg_slots; i++) { > + if (i < RV_MAX_REG_ARGS) { > + emit_sd(RV_REG_FP, -args_off, RV_REG_A0 + i, ctx); > + } else { > + /* skip slots for T0 and FP of traced function */ > + emit_ld(RV_REG_T1, 16 + (i - RV_MAX_REG_ARGS) * 8, RV_REG_FP, ctx); > + emit_sd(RV_REG_FP, -args_off, RV_REG_T1, ctx); > + } > args_off -= 8; > } > } > > -static void restore_args(int nregs, int args_off, struct rv_jit_context *ctx) > +static void restore_args(int nr_reg_args, int args_off, struct rv_jit_context *ctx) > { > int i; > > - for (i = 0; i < nregs; i++) { > + for (i = 0; i < nr_reg_args; i++) { > emit_ld(RV_REG_A0 + i, -args_off, RV_REG_FP, ctx); > args_off -= 8; > } > } > > +static void restore_stack_args(int nr_stack_args, int args_off, int stk_arg_off, > + struct rv_jit_context *ctx) > +{ > + int i; > + > + for (i = 0; i < nr_stack_args; i++) { > + emit_ld(RV_REG_T1, -(args_off - RV_MAX_REG_ARGS * 8), RV_REG_FP, ctx); > + emit_sd(RV_REG_FP, -stk_arg_off, RV_REG_T1, ctx); > + args_off -= 8; > + stk_arg_off -= 8; > + } > +} > + > static int invoke_bpf_prog(struct bpf_tramp_link *l, int args_off, int retval_off, > int run_ctx_off, bool save_ret, struct rv_jit_context *ctx) > { > @@ -784,8 +804,8 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, > { > int i, ret, offset; > int *branches_off = NULL; > - int stack_size = 0, nregs = m->nr_args; > - int retval_off, args_off, nregs_off, ip_off, run_ctx_off, sreg_off; > + int stack_size = 0, nr_arg_slots = 0; > + int retval_off, args_off, nregs_off, ip_off, run_ctx_off, sreg_off, stk_arg_off; > struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY]; > struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT]; > struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN]; > @@ -831,20 +851,21 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, > * FP - sreg_off [ callee saved reg ] > * > * [ pads ] pads for 16 bytes alignment > + * > + * [ stack_argN ] > + * [ ... ] > + * FP - stk_arg_off [ stack_arg1 ] BPF_TRAMP_F_CALL_ORIG > */ > > if (flags & (BPF_TRAMP_F_ORIG_STACK | BPF_TRAMP_F_SHARE_IPMODIFY)) > return -ENOTSUPP; > > - /* extra regiters for struct arguments */ > - for (i = 0; i < m->nr_args; i++) > - if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) > - nregs += round_up(m->arg_size[i], 8) / 8 - 1; > - > - /* 8 arguments passed by registers */ > - if (nregs > 8) > + if (m->nr_args > MAX_BPF_FUNC_ARGS) > return -ENOTSUPP; > > + for (i = 0; i < m->nr_args; i++) > + nr_arg_slots += round_up(m->arg_size[i], 8) / 8; > + > /* room of trampoline frame to store return address and frame pointer */ > stack_size += 16; > > @@ -854,7 +875,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, > retval_off = stack_size; > } > > - stack_size += nregs * 8; > + stack_size += nr_arg_slots * 8; > args_off = stack_size; > > stack_size += 8; > @@ -871,8 +892,14 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, > stack_size += 8; > sreg_off = stack_size; > > + if (nr_arg_slots - RV_MAX_REG_ARGS > 0) > + stack_size += (nr_arg_slots - RV_MAX_REG_ARGS) * 8; Hi Pu, Although this is merged now, while working on this for arm64 I realised that the above doesn't check for BPF_TRAMP_F_CALL_ORIG and can waste some stack space, we should change this to: if ((flags & BPF_TRAMP_F_CALL_ORIG) && (nr_arg_slots - RV_MAX_REG_ARGS > 0)) stack_size += (nr_arg_slots - RV_MAX_REG_ARGS) * 8; It will save some stack space when BPF_TRAMP_F_CALL_ORIG is not set? I can send a patch if you think this is worth fixing. Thanks, Puranjay
On 2024/7/5 20:51, Puranjay Mohan wrote: > Pu Lehui <pulehui@huaweicloud.com> writes: > >> From: Pu Lehui <pulehui@huawei.com> >> >> This patch adds 12 function arguments support for riscv64 bpf >> trampoline. The current bpf trampoline supports <= sizeof(u64) bytes >> scalar arguments [0] and <= 16 bytes struct arguments [1]. Therefore, we >> focus on the situation where scalars are at most XLEN bits and >> aggregates whose total size does not exceed 2×XLEN bits in the riscv >> calling convention [2]. [SNIP] >> @@ -854,7 +875,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, >> retval_off = stack_size; >> } >> >> - stack_size += nregs * 8; >> + stack_size += nr_arg_slots * 8; >> args_off = stack_size; >> >> stack_size += 8; >> @@ -871,8 +892,14 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, >> stack_size += 8; >> sreg_off = stack_size; >> >> + if (nr_arg_slots - RV_MAX_REG_ARGS > 0) >> + stack_size += (nr_arg_slots - RV_MAX_REG_ARGS) * 8; > > Hi Pu, > Although this is merged now, while working on this for arm64 I realised > that the above doesn't check for BPF_TRAMP_F_CALL_ORIG and can waste > some stack space, we should change this to: > > if ((flags & BPF_TRAMP_F_CALL_ORIG) && (nr_arg_slots - RV_MAX_REG_ARGS > 0)) > stack_size += (nr_arg_slots - RV_MAX_REG_ARGS) * 8; > > It will save some stack space when BPF_TRAMP_F_CALL_ORIG is not set? Nice catch. It will be better. Feel free to patch it. Thanks! > > I can send a patch if you think this is worth fixing. > > > Thanks, > Puranjay
diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c index 351e1484205e..685c7389ae7e 100644 --- a/arch/riscv/net/bpf_jit_comp64.c +++ b/arch/riscv/net/bpf_jit_comp64.c @@ -15,6 +15,7 @@ #include <asm/percpu.h> #include "bpf_jit.h" +#define RV_MAX_REG_ARGS 8 #define RV_FENTRY_NINSNS 2 /* imm that allows emit_imm to emit max count insns */ #define RV_MAX_COUNT_IMM 0x7FFF7FF7FF7FF7FF @@ -692,26 +693,45 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type, return ret; } -static void store_args(int nregs, int args_off, struct rv_jit_context *ctx) +static void store_args(int nr_arg_slots, int args_off, struct rv_jit_context *ctx) { int i; - for (i = 0; i < nregs; i++) { - emit_sd(RV_REG_FP, -args_off, RV_REG_A0 + i, ctx); + for (i = 0; i < nr_arg_slots; i++) { + if (i < RV_MAX_REG_ARGS) { + emit_sd(RV_REG_FP, -args_off, RV_REG_A0 + i, ctx); + } else { + /* skip slots for T0 and FP of traced function */ + emit_ld(RV_REG_T1, 16 + (i - RV_MAX_REG_ARGS) * 8, RV_REG_FP, ctx); + emit_sd(RV_REG_FP, -args_off, RV_REG_T1, ctx); + } args_off -= 8; } } -static void restore_args(int nregs, int args_off, struct rv_jit_context *ctx) +static void restore_args(int nr_reg_args, int args_off, struct rv_jit_context *ctx) { int i; - for (i = 0; i < nregs; i++) { + for (i = 0; i < nr_reg_args; i++) { emit_ld(RV_REG_A0 + i, -args_off, RV_REG_FP, ctx); args_off -= 8; } } +static void restore_stack_args(int nr_stack_args, int args_off, int stk_arg_off, + struct rv_jit_context *ctx) +{ + int i; + + for (i = 0; i < nr_stack_args; i++) { + emit_ld(RV_REG_T1, -(args_off - RV_MAX_REG_ARGS * 8), RV_REG_FP, ctx); + emit_sd(RV_REG_FP, -stk_arg_off, RV_REG_T1, ctx); + args_off -= 8; + stk_arg_off -= 8; + } +} + static int invoke_bpf_prog(struct bpf_tramp_link *l, int args_off, int retval_off, int run_ctx_off, bool save_ret, struct rv_jit_context *ctx) { @@ -784,8 +804,8 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, { int i, ret, offset; int *branches_off = NULL; - int stack_size = 0, nregs = m->nr_args; - int retval_off, args_off, nregs_off, ip_off, run_ctx_off, sreg_off; + int stack_size = 0, nr_arg_slots = 0; + int retval_off, args_off, nregs_off, ip_off, run_ctx_off, sreg_off, stk_arg_off; struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY]; struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT]; struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN]; @@ -831,20 +851,21 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, * FP - sreg_off [ callee saved reg ] * * [ pads ] pads for 16 bytes alignment + * + * [ stack_argN ] + * [ ... ] + * FP - stk_arg_off [ stack_arg1 ] BPF_TRAMP_F_CALL_ORIG */ if (flags & (BPF_TRAMP_F_ORIG_STACK | BPF_TRAMP_F_SHARE_IPMODIFY)) return -ENOTSUPP; - /* extra regiters for struct arguments */ - for (i = 0; i < m->nr_args; i++) - if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) - nregs += round_up(m->arg_size[i], 8) / 8 - 1; - - /* 8 arguments passed by registers */ - if (nregs > 8) + if (m->nr_args > MAX_BPF_FUNC_ARGS) return -ENOTSUPP; + for (i = 0; i < m->nr_args; i++) + nr_arg_slots += round_up(m->arg_size[i], 8) / 8; + /* room of trampoline frame to store return address and frame pointer */ stack_size += 16; @@ -854,7 +875,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, retval_off = stack_size; } - stack_size += nregs * 8; + stack_size += nr_arg_slots * 8; args_off = stack_size; stack_size += 8; @@ -871,8 +892,14 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, stack_size += 8; sreg_off = stack_size; + if (nr_arg_slots - RV_MAX_REG_ARGS > 0) + stack_size += (nr_arg_slots - RV_MAX_REG_ARGS) * 8; + stack_size = round_up(stack_size, STACK_ALIGN); + /* room for args on stack must be at the top of stack */ + stk_arg_off = stack_size; + if (!is_struct_ops) { /* For the trampoline called from function entry, * the frame of traced function and the frame of @@ -908,10 +935,10 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, emit_sd(RV_REG_FP, -ip_off, RV_REG_T1, ctx); } - emit_li(RV_REG_T1, nregs, ctx); + emit_li(RV_REG_T1, nr_arg_slots, ctx); emit_sd(RV_REG_FP, -nregs_off, RV_REG_T1, ctx); - store_args(nregs, args_off, ctx); + store_args(nr_arg_slots, args_off, ctx); /* skip to actual body of traced function */ if (flags & BPF_TRAMP_F_SKIP_FRAME) @@ -951,7 +978,8 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, } if (flags & BPF_TRAMP_F_CALL_ORIG) { - restore_args(nregs, args_off, ctx); + restore_args(min_t(int, nr_arg_slots, RV_MAX_REG_ARGS), args_off, ctx); + restore_stack_args(nr_arg_slots - RV_MAX_REG_ARGS, args_off, stk_arg_off, ctx); ret = emit_call((const u64)orig_call, true, ctx); if (ret) goto out; @@ -986,7 +1014,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, } if (flags & BPF_TRAMP_F_RESTORE_REGS) - restore_args(nregs, args_off, ctx); + restore_args(min_t(int, nr_arg_slots, RV_MAX_REG_ARGS), args_off, ctx); if (save_ret) { emit_ld(RV_REG_A0, -retval_off, RV_REG_FP, ctx);