Message ID | 20241127111020.1738105-2-elver@google.com (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | [bpf-next,v2,1/2] bpf: Remove bpf_probe_write_user() warning message | expand |
On Wed, Nov 27, 2024 at 12:10:01PM +0100, Marco Elver wrote: > With bpf_get_probe_write_proto() no longer printing a message, we can > avoid it being a special case with its own permission check. > > Refactor bpf_tracing_func_proto() similar to bpf_base_func_proto() to > have a section conditional on bpf_token_capable(CAP_SYS_ADMIN), where > the proto for bpf_probe_write_user() is returned. Finally, remove the > unnecessary bpf_get_probe_write_proto(). > > This simplifies the code, and adding additional CAP_SYS_ADMIN-only > helpers in future avoids duplicating the same CAP_SYS_ADMIN check. > > Suggested-by: Andrii Nakryiko <andrii@kernel.org> > Signed-off-by: Marco Elver <elver@google.com> > --- > v2: > * New patch. Acked-by: Jiri Olsa <jolsa@kernel.org> jirka > --- > kernel/trace/bpf_trace.c | 30 ++++++++++++++++++------------ > 1 file changed, 18 insertions(+), 12 deletions(-) > > diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c > index 0ab56af2e298..d312b77993dc 100644 > --- a/kernel/trace/bpf_trace.c > +++ b/kernel/trace/bpf_trace.c > @@ -357,14 +357,6 @@ static const struct bpf_func_proto bpf_probe_write_user_proto = { > .arg3_type = ARG_CONST_SIZE, > }; > > -static const struct bpf_func_proto *bpf_get_probe_write_proto(void) > -{ > - if (!capable(CAP_SYS_ADMIN)) > - return NULL; > - > - return &bpf_probe_write_user_proto; > -} > - > #define MAX_TRACE_PRINTK_VARARGS 3 > #define BPF_TRACE_PRINTK_SIZE 1024 > > @@ -1417,6 +1409,12 @@ late_initcall(bpf_key_sig_kfuncs_init); > static const struct bpf_func_proto * > bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) > { > + const struct bpf_func_proto *func_proto; > + > + func_proto = bpf_base_func_proto(func_id, prog); > + if (func_proto) > + return func_proto; > + > switch (func_id) { > case BPF_FUNC_map_lookup_elem: > return &bpf_map_lookup_elem_proto; > @@ -1458,9 +1456,6 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) > return &bpf_perf_event_read_proto; > case BPF_FUNC_get_prandom_u32: > return &bpf_get_prandom_u32_proto; > - case BPF_FUNC_probe_write_user: > - return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ? > - NULL : bpf_get_probe_write_proto(); > case BPF_FUNC_probe_read_user: > return &bpf_probe_read_user_proto; > case BPF_FUNC_probe_read_kernel: > @@ -1539,7 +1534,18 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) > case BPF_FUNC_trace_vprintk: > return bpf_get_trace_vprintk_proto(); > default: > - return bpf_base_func_proto(func_id, prog); > + break; > + } > + > + if (!bpf_token_capable(prog->aux->token, CAP_SYS_ADMIN)) > + return NULL; > + > + switch (func_id) { > + case BPF_FUNC_probe_write_user: > + return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ? > + NULL : &bpf_probe_write_user_proto; > + default: > + return NULL; > } > } > > -- > 2.47.0.338.g60cca15819-goog >
On Wed, 27 Nov 2024 at 12:10, Marco Elver <elver@google.com> wrote: > > With bpf_get_probe_write_proto() no longer printing a message, we can > avoid it being a special case with its own permission check. > > Refactor bpf_tracing_func_proto() similar to bpf_base_func_proto() to > have a section conditional on bpf_token_capable(CAP_SYS_ADMIN), where > the proto for bpf_probe_write_user() is returned. Finally, remove the > unnecessary bpf_get_probe_write_proto(). > > This simplifies the code, and adding additional CAP_SYS_ADMIN-only > helpers in future avoids duplicating the same CAP_SYS_ADMIN check. > > Suggested-by: Andrii Nakryiko <andrii@kernel.org> > Signed-off-by: Marco Elver <elver@google.com> > --- > v2: > * New patch. > --- > kernel/trace/bpf_trace.c | 30 ++++++++++++++++++------------ > 1 file changed, 18 insertions(+), 12 deletions(-) > > diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c > index 0ab56af2e298..d312b77993dc 100644 > --- a/kernel/trace/bpf_trace.c > +++ b/kernel/trace/bpf_trace.c > @@ -357,14 +357,6 @@ static const struct bpf_func_proto bpf_probe_write_user_proto = { > .arg3_type = ARG_CONST_SIZE, > }; > > -static const struct bpf_func_proto *bpf_get_probe_write_proto(void) > -{ > - if (!capable(CAP_SYS_ADMIN)) > - return NULL; > - > - return &bpf_probe_write_user_proto; > -} > - > #define MAX_TRACE_PRINTK_VARARGS 3 > #define BPF_TRACE_PRINTK_SIZE 1024 > > @@ -1417,6 +1409,12 @@ late_initcall(bpf_key_sig_kfuncs_init); > static const struct bpf_func_proto * > bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) > { > + const struct bpf_func_proto *func_proto; > + > + func_proto = bpf_base_func_proto(func_id, prog); > + if (func_proto) > + return func_proto; As indicated by the patch robot failure, we can't move this call up and needs to remain the last call after all others because we may override a function proto in bpf_base_func_proto here (like done for BPF_FUNC_get_smp_processor_id). Let me fix that.
On 11/27/24 1:06 PM, Marco Elver wrote: > On Wed, 27 Nov 2024 at 12:10, Marco Elver <elver@google.com> wrote: >> >> With bpf_get_probe_write_proto() no longer printing a message, we can >> avoid it being a special case with its own permission check. >> >> Refactor bpf_tracing_func_proto() similar to bpf_base_func_proto() to >> have a section conditional on bpf_token_capable(CAP_SYS_ADMIN), where >> the proto for bpf_probe_write_user() is returned. Finally, remove the >> unnecessary bpf_get_probe_write_proto(). >> >> This simplifies the code, and adding additional CAP_SYS_ADMIN-only >> helpers in future avoids duplicating the same CAP_SYS_ADMIN check. >> >> Suggested-by: Andrii Nakryiko <andrii@kernel.org> >> Signed-off-by: Marco Elver <elver@google.com> >> --- >> v2: >> * New patch. >> --- >> kernel/trace/bpf_trace.c | 30 ++++++++++++++++++------------ >> 1 file changed, 18 insertions(+), 12 deletions(-) >> >> diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c >> index 0ab56af2e298..d312b77993dc 100644 >> --- a/kernel/trace/bpf_trace.c >> +++ b/kernel/trace/bpf_trace.c >> @@ -357,14 +357,6 @@ static const struct bpf_func_proto bpf_probe_write_user_proto = { >> .arg3_type = ARG_CONST_SIZE, >> }; >> >> -static const struct bpf_func_proto *bpf_get_probe_write_proto(void) >> -{ >> - if (!capable(CAP_SYS_ADMIN)) >> - return NULL; >> - >> - return &bpf_probe_write_user_proto; >> -} >> - >> #define MAX_TRACE_PRINTK_VARARGS 3 >> #define BPF_TRACE_PRINTK_SIZE 1024 >> >> @@ -1417,6 +1409,12 @@ late_initcall(bpf_key_sig_kfuncs_init); >> static const struct bpf_func_proto * >> bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) >> { >> + const struct bpf_func_proto *func_proto; >> + >> + func_proto = bpf_base_func_proto(func_id, prog); >> + if (func_proto) >> + return func_proto; > > As indicated by the patch robot failure, we can't move this call up > and needs to remain the last call after all others because we may > override a function proto in bpf_base_func_proto here (like done for > BPF_FUNC_get_smp_processor_id). > > Let me fix that. I was about to comment on that, I would leave this as it was before, otherwise rest lgtm.
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 0ab56af2e298..d312b77993dc 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -357,14 +357,6 @@ static const struct bpf_func_proto bpf_probe_write_user_proto = { .arg3_type = ARG_CONST_SIZE, }; -static const struct bpf_func_proto *bpf_get_probe_write_proto(void) -{ - if (!capable(CAP_SYS_ADMIN)) - return NULL; - - return &bpf_probe_write_user_proto; -} - #define MAX_TRACE_PRINTK_VARARGS 3 #define BPF_TRACE_PRINTK_SIZE 1024 @@ -1417,6 +1409,12 @@ late_initcall(bpf_key_sig_kfuncs_init); static const struct bpf_func_proto * bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { + const struct bpf_func_proto *func_proto; + + func_proto = bpf_base_func_proto(func_id, prog); + if (func_proto) + return func_proto; + switch (func_id) { case BPF_FUNC_map_lookup_elem: return &bpf_map_lookup_elem_proto; @@ -1458,9 +1456,6 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_perf_event_read_proto; case BPF_FUNC_get_prandom_u32: return &bpf_get_prandom_u32_proto; - case BPF_FUNC_probe_write_user: - return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ? - NULL : bpf_get_probe_write_proto(); case BPF_FUNC_probe_read_user: return &bpf_probe_read_user_proto; case BPF_FUNC_probe_read_kernel: @@ -1539,7 +1534,18 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) case BPF_FUNC_trace_vprintk: return bpf_get_trace_vprintk_proto(); default: - return bpf_base_func_proto(func_id, prog); + break; + } + + if (!bpf_token_capable(prog->aux->token, CAP_SYS_ADMIN)) + return NULL; + + switch (func_id) { + case BPF_FUNC_probe_write_user: + return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ? + NULL : &bpf_probe_write_user_proto; + default: + return NULL; } }
With bpf_get_probe_write_proto() no longer printing a message, we can avoid it being a special case with its own permission check. Refactor bpf_tracing_func_proto() similar to bpf_base_func_proto() to have a section conditional on bpf_token_capable(CAP_SYS_ADMIN), where the proto for bpf_probe_write_user() is returned. Finally, remove the unnecessary bpf_get_probe_write_proto(). This simplifies the code, and adding additional CAP_SYS_ADMIN-only helpers in future avoids duplicating the same CAP_SYS_ADMIN check. Suggested-by: Andrii Nakryiko <andrii@kernel.org> Signed-off-by: Marco Elver <elver@google.com> --- v2: * New patch. --- kernel/trace/bpf_trace.c | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-)