Message ID | AM6PR03MB50805D6F4B8710EDB304CF5C99F72@AM6PR03MB5080.eurprd03.prod.outlook.com (mailing list archive) |
---|---|
State | Superseded |
Delegated to: | BPF |
Headers | show |
Series | bpf, sched_ext: Make kfunc filters support struct_ops context to reduce runtime overhead | expand |
Hi Juntong, On Wed, Feb 05, 2025 at 07:30:14PM +0000, Juntong Deng wrote: > This patch adds filter for scx_kfunc_ids_select_cpu. > > The kfuncs in the scx_kfunc_ids_select_cpu set can be used in select_cpu > and other rq-locked operations. The only function in scx_kfunc_ids_select_cpu is scx_bpf_select_cpu_dfl(), which should be called exclusively from ops.select_cpu() and not from any rq-locked ops. > > Signed-off-by: Juntong Deng <juntong.deng@outlook.com> > --- > kernel/sched/ext.c | 42 ++++++++++++++++++++++++++++++++++++++++++ > 1 file changed, 42 insertions(+) > > diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c > index 8857c0709bdd..c92949aa23f6 100644 > --- a/kernel/sched/ext.c > +++ b/kernel/sched/ext.c > @@ -6401,9 +6401,51 @@ BTF_KFUNCS_START(scx_kfunc_ids_select_cpu) > BTF_ID_FLAGS(func, scx_bpf_select_cpu_dfl, KF_RCU) > BTF_KFUNCS_END(scx_kfunc_ids_select_cpu) > > +static int scx_kfunc_ids_other_rqlocked_filter(const struct bpf_prog *prog, u32 kfunc_id) > +{ > + u32 moff = prog->aux->attach_st_ops_member_off; > + > + if (moff == offsetof(struct sched_ext_ops, runnable) || > + moff == offsetof(struct sched_ext_ops, dequeue) || > + moff == offsetof(struct sched_ext_ops, stopping) || > + moff == offsetof(struct sched_ext_ops, quiescent) || > + moff == offsetof(struct sched_ext_ops, yield) || > + moff == offsetof(struct sched_ext_ops, cpu_acquire) || > + moff == offsetof(struct sched_ext_ops, running) || > + moff == offsetof(struct sched_ext_ops, core_sched_before) || > + moff == offsetof(struct sched_ext_ops, set_cpumask) || > + moff == offsetof(struct sched_ext_ops, update_idle) || > + moff == offsetof(struct sched_ext_ops, tick) || > + moff == offsetof(struct sched_ext_ops, enable) || > + moff == offsetof(struct sched_ext_ops, set_weight) || > + moff == offsetof(struct sched_ext_ops, disable) || > + moff == offsetof(struct sched_ext_ops, exit_task) || > + moff == offsetof(struct sched_ext_ops, dump_task) || > + moff == offsetof(struct sched_ext_ops, dump_cpu)) > + return 0; > + > + return -EACCES; > +} > + > +static int scx_kfunc_ids_select_cpu_filter(const struct bpf_prog *prog, u32 kfunc_id) > +{ > + u32 moff; > + > + if (!btf_id_set8_contains(&scx_kfunc_ids_select_cpu, kfunc_id) || > + prog->aux->st_ops != &bpf_sched_ext_ops) > + return 0; > + > + moff = prog->aux->attach_st_ops_member_off; > + if (moff == offsetof(struct sched_ext_ops, select_cpu)) > + return 0; > + > + return scx_kfunc_ids_other_rqlocked_filter(prog, kfunc_id); So, I think we just need to return -EACCES here. > +} > + > static const struct btf_kfunc_id_set scx_kfunc_set_select_cpu = { > .owner = THIS_MODULE, > .set = &scx_kfunc_ids_select_cpu, > + .filter = scx_kfunc_ids_select_cpu_filter, > }; > > static bool scx_dsq_insert_preamble(struct task_struct *p, u64 enq_flags) > -- > 2.39.5 > Thanks, -Andrea
On Wed, Feb 05, 2025 at 07:30:14PM +0000, Juntong Deng wrote: ... > +static int scx_kfunc_ids_other_rqlocked_filter(const struct bpf_prog *prog, u32 kfunc_id) > +{ > + u32 moff = prog->aux->attach_st_ops_member_off; > + > + if (moff == offsetof(struct sched_ext_ops, runnable) || > + moff == offsetof(struct sched_ext_ops, dequeue) || > + moff == offsetof(struct sched_ext_ops, stopping) || > + moff == offsetof(struct sched_ext_ops, quiescent) || > + moff == offsetof(struct sched_ext_ops, yield) || > + moff == offsetof(struct sched_ext_ops, cpu_acquire) || > + moff == offsetof(struct sched_ext_ops, running) || > + moff == offsetof(struct sched_ext_ops, core_sched_before) || > + moff == offsetof(struct sched_ext_ops, set_cpumask) || > + moff == offsetof(struct sched_ext_ops, update_idle) || > + moff == offsetof(struct sched_ext_ops, tick) || > + moff == offsetof(struct sched_ext_ops, enable) || > + moff == offsetof(struct sched_ext_ops, set_weight) || > + moff == offsetof(struct sched_ext_ops, disable) || > + moff == offsetof(struct sched_ext_ops, exit_task) || > + moff == offsetof(struct sched_ext_ops, dump_task) || > + moff == offsetof(struct sched_ext_ops, dump_cpu)) > + return 0; > + > + return -EACCES; Actually, do we need this filter at all? I think the other filters in your patch set should be sufficient to establish the correct permissions for all kfuncs, as none of them need to be called from any rq-locked operations. Or am I missing something? -Andrea
On 2025/2/6 23:39, Andrea Righi wrote: > On Wed, Feb 05, 2025 at 07:30:14PM +0000, Juntong Deng wrote: > ... >> +static int scx_kfunc_ids_other_rqlocked_filter(const struct bpf_prog *prog, u32 kfunc_id) >> +{ >> + u32 moff = prog->aux->attach_st_ops_member_off; >> + >> + if (moff == offsetof(struct sched_ext_ops, runnable) || >> + moff == offsetof(struct sched_ext_ops, dequeue) || >> + moff == offsetof(struct sched_ext_ops, stopping) || >> + moff == offsetof(struct sched_ext_ops, quiescent) || >> + moff == offsetof(struct sched_ext_ops, yield) || >> + moff == offsetof(struct sched_ext_ops, cpu_acquire) || >> + moff == offsetof(struct sched_ext_ops, running) || >> + moff == offsetof(struct sched_ext_ops, core_sched_before) || >> + moff == offsetof(struct sched_ext_ops, set_cpumask) || >> + moff == offsetof(struct sched_ext_ops, update_idle) || >> + moff == offsetof(struct sched_ext_ops, tick) || >> + moff == offsetof(struct sched_ext_ops, enable) || >> + moff == offsetof(struct sched_ext_ops, set_weight) || >> + moff == offsetof(struct sched_ext_ops, disable) || >> + moff == offsetof(struct sched_ext_ops, exit_task) || >> + moff == offsetof(struct sched_ext_ops, dump_task) || >> + moff == offsetof(struct sched_ext_ops, dump_cpu)) >> + return 0; >> + >> + return -EACCES; > > Actually, do we need this filter at all? > > I think the other filters in your patch set should be sufficient to > establish the correct permissions for all kfuncs, as none of them need to > be called from any rq-locked operations. Or am I missing something? > Thanks for your reply. I think I misunderstood SCX_KF_REST. I incorrectly thought that all but SCX_KF_UNLOCKED belonged to SCX_KF_REST (including SCX_KF_CPU_RELEASE, SCX_KF_DISPATCH, etc.). I will remove scx_kfunc_ids_other_rqlocked_filter in the next version. If you find any other mistakes, please let me know. > -Andrea
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 8857c0709bdd..c92949aa23f6 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -6401,9 +6401,51 @@ BTF_KFUNCS_START(scx_kfunc_ids_select_cpu) BTF_ID_FLAGS(func, scx_bpf_select_cpu_dfl, KF_RCU) BTF_KFUNCS_END(scx_kfunc_ids_select_cpu) +static int scx_kfunc_ids_other_rqlocked_filter(const struct bpf_prog *prog, u32 kfunc_id) +{ + u32 moff = prog->aux->attach_st_ops_member_off; + + if (moff == offsetof(struct sched_ext_ops, runnable) || + moff == offsetof(struct sched_ext_ops, dequeue) || + moff == offsetof(struct sched_ext_ops, stopping) || + moff == offsetof(struct sched_ext_ops, quiescent) || + moff == offsetof(struct sched_ext_ops, yield) || + moff == offsetof(struct sched_ext_ops, cpu_acquire) || + moff == offsetof(struct sched_ext_ops, running) || + moff == offsetof(struct sched_ext_ops, core_sched_before) || + moff == offsetof(struct sched_ext_ops, set_cpumask) || + moff == offsetof(struct sched_ext_ops, update_idle) || + moff == offsetof(struct sched_ext_ops, tick) || + moff == offsetof(struct sched_ext_ops, enable) || + moff == offsetof(struct sched_ext_ops, set_weight) || + moff == offsetof(struct sched_ext_ops, disable) || + moff == offsetof(struct sched_ext_ops, exit_task) || + moff == offsetof(struct sched_ext_ops, dump_task) || + moff == offsetof(struct sched_ext_ops, dump_cpu)) + return 0; + + return -EACCES; +} + +static int scx_kfunc_ids_select_cpu_filter(const struct bpf_prog *prog, u32 kfunc_id) +{ + u32 moff; + + if (!btf_id_set8_contains(&scx_kfunc_ids_select_cpu, kfunc_id) || + prog->aux->st_ops != &bpf_sched_ext_ops) + return 0; + + moff = prog->aux->attach_st_ops_member_off; + if (moff == offsetof(struct sched_ext_ops, select_cpu)) + return 0; + + return scx_kfunc_ids_other_rqlocked_filter(prog, kfunc_id); +} + static const struct btf_kfunc_id_set scx_kfunc_set_select_cpu = { .owner = THIS_MODULE, .set = &scx_kfunc_ids_select_cpu, + .filter = scx_kfunc_ids_select_cpu_filter, }; static bool scx_dsq_insert_preamble(struct task_struct *p, u64 enq_flags)
This patch adds filter for scx_kfunc_ids_select_cpu. The kfuncs in the scx_kfunc_ids_select_cpu set can be used in select_cpu and other rq-locked operations. Signed-off-by: Juntong Deng <juntong.deng@outlook.com> --- kernel/sched/ext.c | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+)