@@ -3237,22 +3237,23 @@ static int scx_ops_init_task(struct task
return 0;
}
-static void set_task_scx_weight(struct task_struct *p)
-{
- u32 weight = sched_prio_to_weight[p->static_prio - MAX_RT_PRIO];
-
- p->scx.weight = sched_weight_to_cgroup(weight);
-}
-
static void scx_ops_enable_task(struct task_struct *p)
{
+ u32 weight;
+
lockdep_assert_rq_held(task_rq(p));
/*
* Set the weight before calling ops.enable() so that the scheduler
* doesn't see a stale value if they inspect the task struct.
*/
- set_task_scx_weight(p);
+ if (task_has_idle_policy(p))
+ weight = WEIGHT_IDLEPRIO;
+ else
+ weight = sched_prio_to_weight[p->static_prio - MAX_RT_PRIO];
+
+ p->scx.weight = sched_weight_to_cgroup(weight);
+
if (SCX_HAS_OP(enable))
SCX_CALL_OP_TASK(SCX_KF_REST, enable, p);
scx_set_task_state(p, SCX_TASK_ENABLED);
@@ -3405,9 +3406,11 @@ void sched_ext_free(struct task_struct *
static void reweight_task_scx(struct rq *rq, struct task_struct *p, int newprio)
{
+ unsigned long weight = sched_prio_to_weight[newprio];
+
lockdep_assert_rq_held(task_rq(p));
- set_task_scx_weight(p);
+ p->scx.weight = sched_weight_to_cgroup(weight);
if (SCX_HAS_OP(set_weight))
SCX_CALL_OP_TASK(SCX_KF_REST, set_weight, p, p->scx.weight);
}
When initializing p->scx.weight, scx_ops_enable_task() wasn't considering whether the task is SCHED_IDLE. Update it to use WEIGHT_IDLEPRIO as the source weight for SCHED_IDLE tasks. This leaves reweight_task_scx() the sole user of set_task_scx_weight(). Open code it. @weight is going to be provided by sched core in the future anyway. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: David Vernet <void@manifault.com> Cc: Peter Zijlstra <peterz@infradead.org> --- This is a related fix for sched_ext. Thanks. kernel/sched/ext.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-)