Message ID | 20250214194134.658939-7-arighi@nvidia.com (mailing list archive) |
---|---|
State | Not Applicable |
Headers | show |
Series | [1/8] nodemask: add nodes_copy() | expand |
Context | Check | Description |
---|---|---|
netdev/tree_selection | success | Not a local patch |
On Fri, Feb 14, 2025 at 08:40:05PM +0100, Andrea Righi wrote: > Add the new scheduler flag SCX_OPS_BUILTIN_IDLE_PER_NODE, which allows > BPF schedulers to select between using a global flat idle cpumask or > multiple per-node cpumasks. > > This only introduces the flag and the mechanism to enable/disable this > feature without affecting any scheduling behavior. > > Cc: Yury Norov [NVIDIA] <yury.norov@gmail.com> > Signed-off-by: Andrea Righi <arighi@nvidia.com> Reviewed-by: Yury Norov [NVIDIA] <yury.norov@gmail.com> > --- > kernel/sched/ext.c | 21 ++++++++++++++++++-- > kernel/sched/ext_idle.c | 29 +++++++++++++++++++++------- > kernel/sched/ext_idle.h | 4 ++-- > tools/sched_ext/include/scx/compat.h | 3 +++ > 4 files changed, 46 insertions(+), 11 deletions(-) > > diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c > index 7c17e05ed15b1..330a359d79301 100644 > --- a/kernel/sched/ext.c > +++ b/kernel/sched/ext.c > @@ -154,6 +154,12 @@ enum scx_ops_flags { > */ > SCX_OPS_ALLOW_QUEUED_WAKEUP = 1LLU << 5, > > + /* > + * If set, enable per-node idle cpumasks. If clear, use a single global > + * flat idle cpumask. > + */ > + SCX_OPS_BUILTIN_IDLE_PER_NODE = 1LLU << 6, > + > /* > * CPU cgroup support flags > */ > @@ -165,6 +171,7 @@ enum scx_ops_flags { > SCX_OPS_ENQ_MIGRATION_DISABLED | > SCX_OPS_ALLOW_QUEUED_WAKEUP | > SCX_OPS_SWITCH_PARTIAL | > + SCX_OPS_BUILTIN_IDLE_PER_NODE | > SCX_OPS_HAS_CGROUP_WEIGHT, > }; > > @@ -3427,7 +3434,7 @@ static void handle_hotplug(struct rq *rq, bool online) > atomic_long_inc(&scx_hotplug_seq); > > if (scx_enabled()) > - scx_idle_update_selcpu_topology(); > + scx_idle_update_selcpu_topology(&scx_ops); > > if (online && SCX_HAS_OP(cpu_online)) > SCX_CALL_OP(SCX_KF_UNLOCKED, cpu_online, cpu); > @@ -5228,6 +5235,16 @@ static int validate_ops(const struct sched_ext_ops *ops) > return -EINVAL; > } > > + /* > + * SCX_OPS_BUILTIN_IDLE_PER_NODE requires built-in CPU idle > + * selection policy to be enabled. > + */ > + if ((ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE) && > + (ops->update_idle && !(ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE))) { > + scx_ops_error("SCX_OPS_BUILTIN_IDLE_PER_NODE requires CPU idle selection enabled"); > + return -EINVAL; > + } > + > return 0; > } > > @@ -5352,7 +5369,7 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link) > static_branch_enable_cpuslocked(&scx_has_op[i]); > > check_hotplug_seq(ops); > - scx_idle_update_selcpu_topology(); > + scx_idle_update_selcpu_topology(ops); > > cpus_read_unlock(); > > diff --git a/kernel/sched/ext_idle.c b/kernel/sched/ext_idle.c > index ed1804506585b..0912f94b95cdc 100644 > --- a/kernel/sched/ext_idle.c > +++ b/kernel/sched/ext_idle.c > @@ -14,6 +14,9 @@ > /* Enable/disable built-in idle CPU selection policy */ > static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_enabled); > > +/* Enable/disable per-node idle cpumasks */ > +static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_per_node); > + > #ifdef CONFIG_SMP > #ifdef CONFIG_CPUMASK_OFFSTACK > #define CL_ALIGNED_IF_ONSTACK > @@ -204,7 +207,7 @@ static bool llc_numa_mismatch(void) > * CPU belongs to a single LLC domain, and that each LLC domain is entirely > * contained within a single NUMA node. > */ > -void scx_idle_update_selcpu_topology(void) > +void scx_idle_update_selcpu_topology(struct sched_ext_ops *ops) > { > bool enable_llc = false, enable_numa = false; > unsigned int nr_cpus; > @@ -237,13 +240,19 @@ void scx_idle_update_selcpu_topology(void) > * If all CPUs belong to the same NUMA node and the same LLC domain, > * enabling both NUMA and LLC optimizations is unnecessary, as checking > * for an idle CPU in the same domain twice is redundant. > + * > + * If SCX_OPS_BUILTIN_IDLE_PER_NODE is enabled ignore the NUMA > + * optimization, as we would naturally select idle CPUs within > + * specific NUMA nodes querying the corresponding per-node cpumask. > */ > - nr_cpus = numa_weight(cpu); > - if (nr_cpus > 0) { > - if (nr_cpus < num_online_cpus() && llc_numa_mismatch()) > - enable_numa = true; > - pr_debug("sched_ext: NUMA=%*pb weight=%u\n", > - cpumask_pr_args(numa_span(cpu)), numa_weight(cpu)); > + if (!(ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE)) { > + nr_cpus = numa_weight(cpu); > + if (nr_cpus > 0) { > + if (nr_cpus < num_online_cpus() && llc_numa_mismatch()) > + enable_numa = true; > + pr_debug("sched_ext: NUMA=%*pb weight=%u\n", > + cpumask_pr_args(numa_span(cpu)), nr_cpus); > + } > } > rcu_read_unlock(); > > @@ -530,6 +539,11 @@ void scx_idle_enable(struct sched_ext_ops *ops) > } > static_branch_enable(&scx_builtin_idle_enabled); > > + if (ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE) > + static_branch_enable(&scx_builtin_idle_per_node); > + else > + static_branch_disable(&scx_builtin_idle_per_node); > + > #ifdef CONFIG_SMP > /* > * Consider all online cpus idle. Should converge to the actual state > @@ -543,6 +557,7 @@ void scx_idle_enable(struct sched_ext_ops *ops) > void scx_idle_disable(void) > { > static_branch_disable(&scx_builtin_idle_enabled); > + static_branch_disable(&scx_builtin_idle_per_node); > } > > /******************************************************************************** > diff --git a/kernel/sched/ext_idle.h b/kernel/sched/ext_idle.h > index bbac0fd9a5ddd..339b6ec9c4cb7 100644 > --- a/kernel/sched/ext_idle.h > +++ b/kernel/sched/ext_idle.h > @@ -13,12 +13,12 @@ > struct sched_ext_ops; > > #ifdef CONFIG_SMP > -void scx_idle_update_selcpu_topology(void); > +void scx_idle_update_selcpu_topology(struct sched_ext_ops *ops); > void scx_idle_init_masks(void); > bool scx_idle_test_and_clear_cpu(int cpu); > s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags); > #else /* !CONFIG_SMP */ > -static inline void scx_idle_update_selcpu_topology(void) {} > +static inline void scx_idle_update_selcpu_topology(struct sched_ext_ops *ops) {} > static inline void scx_idle_init_masks(void) {} > static inline bool scx_idle_test_and_clear_cpu(int cpu) { return false; } > static inline s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags) > diff --git a/tools/sched_ext/include/scx/compat.h b/tools/sched_ext/include/scx/compat.h > index b50280e2ba2ba..d63cf40be8eee 100644 > --- a/tools/sched_ext/include/scx/compat.h > +++ b/tools/sched_ext/include/scx/compat.h > @@ -109,6 +109,9 @@ static inline bool __COMPAT_struct_has_field(const char *type, const char *field > #define SCX_OPS_SWITCH_PARTIAL \ > __COMPAT_ENUM_OR_ZERO("scx_ops_flags", "SCX_OPS_SWITCH_PARTIAL") > > +#define SCX_OPS_BUILTIN_IDLE_PER_NODE \ > + __COMPAT_ENUM_OR_ZERO("scx_ops_flags", "SCX_OPS_BUILTIN_IDLE_PER_NODE") > + > static inline long scx_hotplug_seq(void) > { > int fd; > -- > 2.48.1
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 7c17e05ed15b1..330a359d79301 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -154,6 +154,12 @@ enum scx_ops_flags { */ SCX_OPS_ALLOW_QUEUED_WAKEUP = 1LLU << 5, + /* + * If set, enable per-node idle cpumasks. If clear, use a single global + * flat idle cpumask. + */ + SCX_OPS_BUILTIN_IDLE_PER_NODE = 1LLU << 6, + /* * CPU cgroup support flags */ @@ -165,6 +171,7 @@ enum scx_ops_flags { SCX_OPS_ENQ_MIGRATION_DISABLED | SCX_OPS_ALLOW_QUEUED_WAKEUP | SCX_OPS_SWITCH_PARTIAL | + SCX_OPS_BUILTIN_IDLE_PER_NODE | SCX_OPS_HAS_CGROUP_WEIGHT, }; @@ -3427,7 +3434,7 @@ static void handle_hotplug(struct rq *rq, bool online) atomic_long_inc(&scx_hotplug_seq); if (scx_enabled()) - scx_idle_update_selcpu_topology(); + scx_idle_update_selcpu_topology(&scx_ops); if (online && SCX_HAS_OP(cpu_online)) SCX_CALL_OP(SCX_KF_UNLOCKED, cpu_online, cpu); @@ -5228,6 +5235,16 @@ static int validate_ops(const struct sched_ext_ops *ops) return -EINVAL; } + /* + * SCX_OPS_BUILTIN_IDLE_PER_NODE requires built-in CPU idle + * selection policy to be enabled. + */ + if ((ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE) && + (ops->update_idle && !(ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE))) { + scx_ops_error("SCX_OPS_BUILTIN_IDLE_PER_NODE requires CPU idle selection enabled"); + return -EINVAL; + } + return 0; } @@ -5352,7 +5369,7 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link) static_branch_enable_cpuslocked(&scx_has_op[i]); check_hotplug_seq(ops); - scx_idle_update_selcpu_topology(); + scx_idle_update_selcpu_topology(ops); cpus_read_unlock(); diff --git a/kernel/sched/ext_idle.c b/kernel/sched/ext_idle.c index ed1804506585b..0912f94b95cdc 100644 --- a/kernel/sched/ext_idle.c +++ b/kernel/sched/ext_idle.c @@ -14,6 +14,9 @@ /* Enable/disable built-in idle CPU selection policy */ static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_enabled); +/* Enable/disable per-node idle cpumasks */ +static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_per_node); + #ifdef CONFIG_SMP #ifdef CONFIG_CPUMASK_OFFSTACK #define CL_ALIGNED_IF_ONSTACK @@ -204,7 +207,7 @@ static bool llc_numa_mismatch(void) * CPU belongs to a single LLC domain, and that each LLC domain is entirely * contained within a single NUMA node. */ -void scx_idle_update_selcpu_topology(void) +void scx_idle_update_selcpu_topology(struct sched_ext_ops *ops) { bool enable_llc = false, enable_numa = false; unsigned int nr_cpus; @@ -237,13 +240,19 @@ void scx_idle_update_selcpu_topology(void) * If all CPUs belong to the same NUMA node and the same LLC domain, * enabling both NUMA and LLC optimizations is unnecessary, as checking * for an idle CPU in the same domain twice is redundant. + * + * If SCX_OPS_BUILTIN_IDLE_PER_NODE is enabled ignore the NUMA + * optimization, as we would naturally select idle CPUs within + * specific NUMA nodes querying the corresponding per-node cpumask. */ - nr_cpus = numa_weight(cpu); - if (nr_cpus > 0) { - if (nr_cpus < num_online_cpus() && llc_numa_mismatch()) - enable_numa = true; - pr_debug("sched_ext: NUMA=%*pb weight=%u\n", - cpumask_pr_args(numa_span(cpu)), numa_weight(cpu)); + if (!(ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE)) { + nr_cpus = numa_weight(cpu); + if (nr_cpus > 0) { + if (nr_cpus < num_online_cpus() && llc_numa_mismatch()) + enable_numa = true; + pr_debug("sched_ext: NUMA=%*pb weight=%u\n", + cpumask_pr_args(numa_span(cpu)), nr_cpus); + } } rcu_read_unlock(); @@ -530,6 +539,11 @@ void scx_idle_enable(struct sched_ext_ops *ops) } static_branch_enable(&scx_builtin_idle_enabled); + if (ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE) + static_branch_enable(&scx_builtin_idle_per_node); + else + static_branch_disable(&scx_builtin_idle_per_node); + #ifdef CONFIG_SMP /* * Consider all online cpus idle. Should converge to the actual state @@ -543,6 +557,7 @@ void scx_idle_enable(struct sched_ext_ops *ops) void scx_idle_disable(void) { static_branch_disable(&scx_builtin_idle_enabled); + static_branch_disable(&scx_builtin_idle_per_node); } /******************************************************************************** diff --git a/kernel/sched/ext_idle.h b/kernel/sched/ext_idle.h index bbac0fd9a5ddd..339b6ec9c4cb7 100644 --- a/kernel/sched/ext_idle.h +++ b/kernel/sched/ext_idle.h @@ -13,12 +13,12 @@ struct sched_ext_ops; #ifdef CONFIG_SMP -void scx_idle_update_selcpu_topology(void); +void scx_idle_update_selcpu_topology(struct sched_ext_ops *ops); void scx_idle_init_masks(void); bool scx_idle_test_and_clear_cpu(int cpu); s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags); #else /* !CONFIG_SMP */ -static inline void scx_idle_update_selcpu_topology(void) {} +static inline void scx_idle_update_selcpu_topology(struct sched_ext_ops *ops) {} static inline void scx_idle_init_masks(void) {} static inline bool scx_idle_test_and_clear_cpu(int cpu) { return false; } static inline s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags) diff --git a/tools/sched_ext/include/scx/compat.h b/tools/sched_ext/include/scx/compat.h index b50280e2ba2ba..d63cf40be8eee 100644 --- a/tools/sched_ext/include/scx/compat.h +++ b/tools/sched_ext/include/scx/compat.h @@ -109,6 +109,9 @@ static inline bool __COMPAT_struct_has_field(const char *type, const char *field #define SCX_OPS_SWITCH_PARTIAL \ __COMPAT_ENUM_OR_ZERO("scx_ops_flags", "SCX_OPS_SWITCH_PARTIAL") +#define SCX_OPS_BUILTIN_IDLE_PER_NODE \ + __COMPAT_ENUM_OR_ZERO("scx_ops_flags", "SCX_OPS_BUILTIN_IDLE_PER_NODE") + static inline long scx_hotplug_seq(void) { int fd;
Add the new scheduler flag SCX_OPS_BUILTIN_IDLE_PER_NODE, which allows BPF schedulers to select between using a global flat idle cpumask or multiple per-node cpumasks. This only introduces the flag and the mechanism to enable/disable this feature without affecting any scheduling behavior. Cc: Yury Norov [NVIDIA] <yury.norov@gmail.com> Signed-off-by: Andrea Righi <arighi@nvidia.com> --- kernel/sched/ext.c | 21 ++++++++++++++++++-- kernel/sched/ext_idle.c | 29 +++++++++++++++++++++------- kernel/sched/ext_idle.h | 4 ++-- tools/sched_ext/include/scx/compat.h | 3 +++ 4 files changed, 46 insertions(+), 11 deletions(-)