Message ID | 20220202135333.190761-3-jolsa@kernel.org (mailing list archive) |
---|---|
State | Changes Requested |
Delegated to: | BPF |
Headers | show |
Series | bpf: Add fprobe link | expand |
Context | Check | Description |
---|---|---|
bpf/vmtest-bpf-next | fail | VM_Test |
bpf/vmtest-bpf-next-PR | fail | PR summary |
netdev/tree_selection | success | Guessing tree name failed - patch did not apply, async |
On Wed, Feb 2, 2022 at 5:53 AM Jiri Olsa <jolsa@redhat.com> wrote: > > Adding support to call get_func_ip_fprobe helper from kprobe > programs attached by fprobe link. > > Also adding support to inline it, because it's single load > instruction. > > Signed-off-by: Jiri Olsa <jolsa@kernel.org> > --- > kernel/bpf/verifier.c | 19 ++++++++++++++++++- > kernel/trace/bpf_trace.c | 16 +++++++++++++++- > 2 files changed, 33 insertions(+), 2 deletions(-) > > diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c > index 1ae41d0cf96c..a745ded00635 100644 > --- a/kernel/bpf/verifier.c > +++ b/kernel/bpf/verifier.c > @@ -13625,7 +13625,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env) > continue; > } > > - /* Implement bpf_get_func_ip inline. */ > + /* Implement tracing bpf_get_func_ip inline. */ > if (prog_type == BPF_PROG_TYPE_TRACING && > insn->imm == BPF_FUNC_get_func_ip) { > /* Load IP address from ctx - 16 */ > @@ -13640,6 +13640,23 @@ static int do_misc_fixups(struct bpf_verifier_env *env) > continue; > } > > + /* Implement kprobe/fprobe bpf_get_func_ip inline. */ > + if (prog_type == BPF_PROG_TYPE_KPROBE && > + eatype == BPF_TRACE_FPROBE && > + insn->imm == BPF_FUNC_get_func_ip) { > + /* Load IP address from ctx (struct pt_regs) ip */ > + insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, > + offsetof(struct pt_regs, ip)); Isn't this architecture-specific? I'm starting to dislike this inlining whole more and more. It's just a complication in verifier without clear real-world benefits. We are clearly prematurely optimizing here. In practice you'll just call bpf_get_func_ip() once and that's it. Function call overhead will be negligible compare to other *userful* work you'll be doing in your BPF program. > + > + new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1); > + if (!new_prog) > + return -ENOMEM; > + > + env->prog = prog = new_prog; > + insn = new_prog->insnsi + i + delta; > + continue; > + } > + > patch_call_imm: > fn = env->ops->get_func_proto(insn->imm, env->prog); > /* all functions that have prototype and verifier allowed > diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c > index a2024ba32a20..28e59e31e3db 100644 > --- a/kernel/trace/bpf_trace.c > +++ b/kernel/trace/bpf_trace.c > @@ -1036,6 +1036,19 @@ static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = { > .arg1_type = ARG_PTR_TO_CTX, > }; > > +BPF_CALL_1(bpf_get_func_ip_fprobe, struct pt_regs *, regs) > +{ > + /* This helper call is inlined by verifier. */ > + return regs->ip; > +} > + > +static const struct bpf_func_proto bpf_get_func_ip_proto_fprobe = { > + .func = bpf_get_func_ip_fprobe, > + .gpl_only = false, > + .ret_type = RET_INTEGER, > + .arg1_type = ARG_PTR_TO_CTX, > +}; > + > BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx) > { > struct bpf_trace_run_ctx *run_ctx; > @@ -1279,7 +1292,8 @@ kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) > return &bpf_override_return_proto; > #endif > case BPF_FUNC_get_func_ip: > - return &bpf_get_func_ip_proto_kprobe; > + return prog->expected_attach_type == BPF_TRACE_FPROBE ? > + &bpf_get_func_ip_proto_fprobe : &bpf_get_func_ip_proto_kprobe; > case BPF_FUNC_get_attach_cookie: > return &bpf_get_attach_cookie_proto_trace; > default: > -- > 2.34.1 >
On Mon, Feb 7, 2022 at 10:59 AM Andrii Nakryiko <andrii.nakryiko@gmail.com> wrote: > > On Wed, Feb 2, 2022 at 5:53 AM Jiri Olsa <jolsa@redhat.com> wrote: > > > > Adding support to call get_func_ip_fprobe helper from kprobe > > programs attached by fprobe link. > > > > Also adding support to inline it, because it's single load > > instruction. > > > > Signed-off-by: Jiri Olsa <jolsa@kernel.org> > > --- > > kernel/bpf/verifier.c | 19 ++++++++++++++++++- > > kernel/trace/bpf_trace.c | 16 +++++++++++++++- > > 2 files changed, 33 insertions(+), 2 deletions(-) > > > > diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c > > index 1ae41d0cf96c..a745ded00635 100644 > > --- a/kernel/bpf/verifier.c > > +++ b/kernel/bpf/verifier.c > > @@ -13625,7 +13625,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env) > > continue; > > } > > > > - /* Implement bpf_get_func_ip inline. */ > > + /* Implement tracing bpf_get_func_ip inline. */ > > if (prog_type == BPF_PROG_TYPE_TRACING && > > insn->imm == BPF_FUNC_get_func_ip) { > > /* Load IP address from ctx - 16 */ > > @@ -13640,6 +13640,23 @@ static int do_misc_fixups(struct bpf_verifier_env *env) > > continue; > > } > > > > + /* Implement kprobe/fprobe bpf_get_func_ip inline. */ > > + if (prog_type == BPF_PROG_TYPE_KPROBE && > > + eatype == BPF_TRACE_FPROBE && > > + insn->imm == BPF_FUNC_get_func_ip) { > > + /* Load IP address from ctx (struct pt_regs) ip */ > > + insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, > > + offsetof(struct pt_regs, ip)); > > Isn't this architecture-specific? I'm starting to dislike this > inlining whole more and more. It's just a complication in verifier > without clear real-world benefits. We are clearly prematurely > optimizing here. In practice you'll just call bpf_get_func_ip() once > and that's it. Function call overhead will be negligible compare to > other *userful* work you'll be doing in your BPF program. We should be doing inlining when we can. Every bit of performance matters.
On Mon, Feb 07, 2022 at 10:59:18AM -0800, Andrii Nakryiko wrote: > On Wed, Feb 2, 2022 at 5:53 AM Jiri Olsa <jolsa@redhat.com> wrote: > > > > Adding support to call get_func_ip_fprobe helper from kprobe > > programs attached by fprobe link. > > > > Also adding support to inline it, because it's single load > > instruction. > > > > Signed-off-by: Jiri Olsa <jolsa@kernel.org> > > --- > > kernel/bpf/verifier.c | 19 ++++++++++++++++++- > > kernel/trace/bpf_trace.c | 16 +++++++++++++++- > > 2 files changed, 33 insertions(+), 2 deletions(-) > > > > diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c > > index 1ae41d0cf96c..a745ded00635 100644 > > --- a/kernel/bpf/verifier.c > > +++ b/kernel/bpf/verifier.c > > @@ -13625,7 +13625,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env) > > continue; > > } > > > > - /* Implement bpf_get_func_ip inline. */ > > + /* Implement tracing bpf_get_func_ip inline. */ > > if (prog_type == BPF_PROG_TYPE_TRACING && > > insn->imm == BPF_FUNC_get_func_ip) { > > /* Load IP address from ctx - 16 */ > > @@ -13640,6 +13640,23 @@ static int do_misc_fixups(struct bpf_verifier_env *env) > > continue; > > } > > > > + /* Implement kprobe/fprobe bpf_get_func_ip inline. */ > > + if (prog_type == BPF_PROG_TYPE_KPROBE && > > + eatype == BPF_TRACE_FPROBE && > > + insn->imm == BPF_FUNC_get_func_ip) { > > + /* Load IP address from ctx (struct pt_regs) ip */ > > + insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, > > + offsetof(struct pt_regs, ip)); > > Isn't this architecture-specific? I'm starting to dislike this ugh, it is.. I'm not sure we want #ifdef CONFIG_X86 in here, or some arch_* specific function? jirka > inlining whole more and more. It's just a complication in verifier > without clear real-world benefits. We are clearly prematurely > optimizing here. In practice you'll just call bpf_get_func_ip() once > and that's it. Function call overhead will be negligible compare to > other *userful* work you'll be doing in your BPF program. > > > > + > > + new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1); > > + if (!new_prog) > > + return -ENOMEM; > > + > > + env->prog = prog = new_prog; > > + insn = new_prog->insnsi + i + delta; > > + continue; > > + } > > + > > patch_call_imm: > > fn = env->ops->get_func_proto(insn->imm, env->prog); > > /* all functions that have prototype and verifier allowed > > diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c > > index a2024ba32a20..28e59e31e3db 100644 > > --- a/kernel/trace/bpf_trace.c > > +++ b/kernel/trace/bpf_trace.c > > @@ -1036,6 +1036,19 @@ static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = { > > .arg1_type = ARG_PTR_TO_CTX, > > }; > > > > +BPF_CALL_1(bpf_get_func_ip_fprobe, struct pt_regs *, regs) > > +{ > > + /* This helper call is inlined by verifier. */ > > + return regs->ip; > > +} > > + > > +static const struct bpf_func_proto bpf_get_func_ip_proto_fprobe = { > > + .func = bpf_get_func_ip_fprobe, > > + .gpl_only = false, > > + .ret_type = RET_INTEGER, > > + .arg1_type = ARG_PTR_TO_CTX, > > +}; > > + > > BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx) > > { > > struct bpf_trace_run_ctx *run_ctx; > > @@ -1279,7 +1292,8 @@ kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) > > return &bpf_override_return_proto; > > #endif > > case BPF_FUNC_get_func_ip: > > - return &bpf_get_func_ip_proto_kprobe; > > + return prog->expected_attach_type == BPF_TRACE_FPROBE ? > > + &bpf_get_func_ip_proto_fprobe : &bpf_get_func_ip_proto_kprobe; > > case BPF_FUNC_get_attach_cookie: > > return &bpf_get_attach_cookie_proto_trace; > > default: > > -- > > 2.34.1 > >
On Wed, Feb 9, 2022 at 7:01 AM Jiri Olsa <olsajiri@gmail.com> wrote: > > On Mon, Feb 07, 2022 at 10:59:18AM -0800, Andrii Nakryiko wrote: > > On Wed, Feb 2, 2022 at 5:53 AM Jiri Olsa <jolsa@redhat.com> wrote: > > > > > > Adding support to call get_func_ip_fprobe helper from kprobe > > > programs attached by fprobe link. > > > > > > Also adding support to inline it, because it's single load > > > instruction. > > > > > > Signed-off-by: Jiri Olsa <jolsa@kernel.org> > > > --- > > > kernel/bpf/verifier.c | 19 ++++++++++++++++++- > > > kernel/trace/bpf_trace.c | 16 +++++++++++++++- > > > 2 files changed, 33 insertions(+), 2 deletions(-) > > > > > > diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c > > > index 1ae41d0cf96c..a745ded00635 100644 > > > --- a/kernel/bpf/verifier.c > > > +++ b/kernel/bpf/verifier.c > > > @@ -13625,7 +13625,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env) > > > continue; > > > } > > > > > > - /* Implement bpf_get_func_ip inline. */ > > > + /* Implement tracing bpf_get_func_ip inline. */ > > > if (prog_type == BPF_PROG_TYPE_TRACING && > > > insn->imm == BPF_FUNC_get_func_ip) { > > > /* Load IP address from ctx - 16 */ > > > @@ -13640,6 +13640,23 @@ static int do_misc_fixups(struct bpf_verifier_env *env) > > > continue; > > > } > > > > > > + /* Implement kprobe/fprobe bpf_get_func_ip inline. */ > > > + if (prog_type == BPF_PROG_TYPE_KPROBE && > > > + eatype == BPF_TRACE_FPROBE && > > > + insn->imm == BPF_FUNC_get_func_ip) { > > > + /* Load IP address from ctx (struct pt_regs) ip */ > > > + insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, > > > + offsetof(struct pt_regs, ip)); > > > > Isn't this architecture-specific? I'm starting to dislike this > > ugh, it is.. I'm not sure we want #ifdef CONFIG_X86 in here, > or some arch_* specific function? So not inlining it isn't even considered? this function will be called once or at most a few times per BPF program invocation. Anyone calling it in a tight loop is going to use it very-very suboptimally (and even then useful program logic will dominate). There is no point in inlining it. > > jirka > > > inlining whole more and more. It's just a complication in verifier > > without clear real-world benefits. We are clearly prematurely > > optimizing here. In practice you'll just call bpf_get_func_ip() once > > and that's it. Function call overhead will be negligible compare to > > other *userful* work you'll be doing in your BPF program. > > > > > > > + > > > + new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1); > > > + if (!new_prog) > > > + return -ENOMEM; > > > + > > > + env->prog = prog = new_prog; > > > + insn = new_prog->insnsi + i + delta; > > > + continue; > > > + } > > > + > > > patch_call_imm: > > > fn = env->ops->get_func_proto(insn->imm, env->prog); > > > /* all functions that have prototype and verifier allowed > > > diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c > > > index a2024ba32a20..28e59e31e3db 100644 > > > --- a/kernel/trace/bpf_trace.c > > > +++ b/kernel/trace/bpf_trace.c > > > @@ -1036,6 +1036,19 @@ static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = { > > > .arg1_type = ARG_PTR_TO_CTX, > > > }; > > > > > > +BPF_CALL_1(bpf_get_func_ip_fprobe, struct pt_regs *, regs) > > > +{ > > > + /* This helper call is inlined by verifier. */ > > > + return regs->ip; > > > +} > > > + > > > +static const struct bpf_func_proto bpf_get_func_ip_proto_fprobe = { > > > + .func = bpf_get_func_ip_fprobe, > > > + .gpl_only = false, > > > + .ret_type = RET_INTEGER, > > > + .arg1_type = ARG_PTR_TO_CTX, > > > +}; > > > + > > > BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx) > > > { > > > struct bpf_trace_run_ctx *run_ctx; > > > @@ -1279,7 +1292,8 @@ kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) > > > return &bpf_override_return_proto; > > > #endif > > > case BPF_FUNC_get_func_ip: > > > - return &bpf_get_func_ip_proto_kprobe; > > > + return prog->expected_attach_type == BPF_TRACE_FPROBE ? > > > + &bpf_get_func_ip_proto_fprobe : &bpf_get_func_ip_proto_kprobe; > > > case BPF_FUNC_get_attach_cookie: > > > return &bpf_get_attach_cookie_proto_trace; > > > default: > > > -- > > > 2.34.1 > > >
On Wed, Feb 09, 2022 at 08:05:05AM -0800, Andrii Nakryiko wrote: > On Wed, Feb 9, 2022 at 7:01 AM Jiri Olsa <olsajiri@gmail.com> wrote: > > > > On Mon, Feb 07, 2022 at 10:59:18AM -0800, Andrii Nakryiko wrote: > > > On Wed, Feb 2, 2022 at 5:53 AM Jiri Olsa <jolsa@redhat.com> wrote: > > > > > > > > Adding support to call get_func_ip_fprobe helper from kprobe > > > > programs attached by fprobe link. > > > > > > > > Also adding support to inline it, because it's single load > > > > instruction. > > > > > > > > Signed-off-by: Jiri Olsa <jolsa@kernel.org> > > > > --- > > > > kernel/bpf/verifier.c | 19 ++++++++++++++++++- > > > > kernel/trace/bpf_trace.c | 16 +++++++++++++++- > > > > 2 files changed, 33 insertions(+), 2 deletions(-) > > > > > > > > diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c > > > > index 1ae41d0cf96c..a745ded00635 100644 > > > > --- a/kernel/bpf/verifier.c > > > > +++ b/kernel/bpf/verifier.c > > > > @@ -13625,7 +13625,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env) > > > > continue; > > > > } > > > > > > > > - /* Implement bpf_get_func_ip inline. */ > > > > + /* Implement tracing bpf_get_func_ip inline. */ > > > > if (prog_type == BPF_PROG_TYPE_TRACING && > > > > insn->imm == BPF_FUNC_get_func_ip) { > > > > /* Load IP address from ctx - 16 */ > > > > @@ -13640,6 +13640,23 @@ static int do_misc_fixups(struct bpf_verifier_env *env) > > > > continue; > > > > } > > > > > > > > + /* Implement kprobe/fprobe bpf_get_func_ip inline. */ > > > > + if (prog_type == BPF_PROG_TYPE_KPROBE && > > > > + eatype == BPF_TRACE_FPROBE && > > > > + insn->imm == BPF_FUNC_get_func_ip) { > > > > + /* Load IP address from ctx (struct pt_regs) ip */ > > > > + insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, > > > > + offsetof(struct pt_regs, ip)); > > > > > > Isn't this architecture-specific? I'm starting to dislike this > > > > ugh, it is.. I'm not sure we want #ifdef CONFIG_X86 in here, > > or some arch_* specific function? > > > So not inlining it isn't even considered? this function will be called > once or at most a few times per BPF program invocation. Anyone calling > it in a tight loop is going to use it very-very suboptimally (and even > then useful program logic will dominate). There is no point in > inlining it. I agree that given its usage pattern there won't be too much gain, on the other hand it's simple verifier code changing call/load/ret into simple load, so I thought why not.. also there are just few helpers we can inline so easily but yea.. I can't think of any sane usage of this helper that inlining would matter for.. which doesn't mean there isn't one ;-) jirka > > > > > jirka > > > > > inlining whole more and more. It's just a complication in verifier > > > without clear real-world benefits. We are clearly prematurely > > > optimizing here. In practice you'll just call bpf_get_func_ip() once > > > and that's it. Function call overhead will be negligible compare to > > > other *userful* work you'll be doing in your BPF program. > > > > > > > > > > + > > > > + new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1); > > > > + if (!new_prog) > > > > + return -ENOMEM; > > > > + > > > > + env->prog = prog = new_prog; > > > > + insn = new_prog->insnsi + i + delta; > > > > + continue; > > > > + } > > > > + > > > > patch_call_imm: > > > > fn = env->ops->get_func_proto(insn->imm, env->prog); > > > > /* all functions that have prototype and verifier allowed > > > > diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c > > > > index a2024ba32a20..28e59e31e3db 100644 > > > > --- a/kernel/trace/bpf_trace.c > > > > +++ b/kernel/trace/bpf_trace.c > > > > @@ -1036,6 +1036,19 @@ static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = { > > > > .arg1_type = ARG_PTR_TO_CTX, > > > > }; > > > > > > > > +BPF_CALL_1(bpf_get_func_ip_fprobe, struct pt_regs *, regs) > > > > +{ > > > > + /* This helper call is inlined by verifier. */ > > > > + return regs->ip; > > > > +} > > > > + > > > > +static const struct bpf_func_proto bpf_get_func_ip_proto_fprobe = { > > > > + .func = bpf_get_func_ip_fprobe, > > > > + .gpl_only = false, > > > > + .ret_type = RET_INTEGER, > > > > + .arg1_type = ARG_PTR_TO_CTX, > > > > +}; > > > > + > > > > BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx) > > > > { > > > > struct bpf_trace_run_ctx *run_ctx; > > > > @@ -1279,7 +1292,8 @@ kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) > > > > return &bpf_override_return_proto; > > > > #endif > > > > case BPF_FUNC_get_func_ip: > > > > - return &bpf_get_func_ip_proto_kprobe; > > > > + return prog->expected_attach_type == BPF_TRACE_FPROBE ? > > > > + &bpf_get_func_ip_proto_fprobe : &bpf_get_func_ip_proto_kprobe; > > > > case BPF_FUNC_get_attach_cookie: > > > > return &bpf_get_attach_cookie_proto_trace; > > > > default: > > > > -- > > > > 2.34.1 > > > >
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 1ae41d0cf96c..a745ded00635 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -13625,7 +13625,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env) continue; } - /* Implement bpf_get_func_ip inline. */ + /* Implement tracing bpf_get_func_ip inline. */ if (prog_type == BPF_PROG_TYPE_TRACING && insn->imm == BPF_FUNC_get_func_ip) { /* Load IP address from ctx - 16 */ @@ -13640,6 +13640,23 @@ static int do_misc_fixups(struct bpf_verifier_env *env) continue; } + /* Implement kprobe/fprobe bpf_get_func_ip inline. */ + if (prog_type == BPF_PROG_TYPE_KPROBE && + eatype == BPF_TRACE_FPROBE && + insn->imm == BPF_FUNC_get_func_ip) { + /* Load IP address from ctx (struct pt_regs) ip */ + insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct pt_regs, ip)); + + new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1); + if (!new_prog) + return -ENOMEM; + + env->prog = prog = new_prog; + insn = new_prog->insnsi + i + delta; + continue; + } + patch_call_imm: fn = env->ops->get_func_proto(insn->imm, env->prog); /* all functions that have prototype and verifier allowed diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index a2024ba32a20..28e59e31e3db 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1036,6 +1036,19 @@ static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = { .arg1_type = ARG_PTR_TO_CTX, }; +BPF_CALL_1(bpf_get_func_ip_fprobe, struct pt_regs *, regs) +{ + /* This helper call is inlined by verifier. */ + return regs->ip; +} + +static const struct bpf_func_proto bpf_get_func_ip_proto_fprobe = { + .func = bpf_get_func_ip_fprobe, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, +}; + BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx) { struct bpf_trace_run_ctx *run_ctx; @@ -1279,7 +1292,8 @@ kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_override_return_proto; #endif case BPF_FUNC_get_func_ip: - return &bpf_get_func_ip_proto_kprobe; + return prog->expected_attach_type == BPF_TRACE_FPROBE ? + &bpf_get_func_ip_proto_fprobe : &bpf_get_func_ip_proto_kprobe; case BPF_FUNC_get_attach_cookie: return &bpf_get_attach_cookie_proto_trace; default:
Adding support to call get_func_ip_fprobe helper from kprobe programs attached by fprobe link. Also adding support to inline it, because it's single load instruction. Signed-off-by: Jiri Olsa <jolsa@kernel.org> --- kernel/bpf/verifier.c | 19 ++++++++++++++++++- kernel/trace/bpf_trace.c | 16 +++++++++++++++- 2 files changed, 33 insertions(+), 2 deletions(-)