@@ -767,6 +767,7 @@ static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
update_rq_clock(rq);
sched_info_queued(rq, p);
p->sched_class->enqueue_task(rq, p, flags);
+ update_cpu_concurrency(rq);
}
static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
@@ -774,6 +775,7 @@ static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
update_rq_clock(rq);
sched_info_dequeued(rq, p);
p->sched_class->dequeue_task(rq, p, flags);
+ update_cpu_concurrency(rq);
}
void activate_task(struct rq *rq, struct task_struct *p, int flags)
@@ -2428,6 +2430,7 @@ void scheduler_tick(void)
update_rq_clock(rq);
curr->sched_class->task_tick(rq, curr, 0);
update_cpu_load_active(rq);
+ update_cpu_concurrency(rq);
raw_spin_unlock(&rq->lock);
perf_event_task_tick();
@@ -2563,6 +2563,7 @@ static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
void idle_enter_fair(struct rq *this_rq)
{
update_rq_runnable_avg(this_rq, 1);
+ update_cpu_concurrency(this_rq);
}
/*
@@ -2573,6 +2574,7 @@ void idle_enter_fair(struct rq *this_rq)
void idle_exit_fair(struct rq *this_rq)
{
update_rq_runnable_avg(this_rq, 0);
+ update_cpu_concurrency(this_rq);
}
static int idle_balance(struct rq *this_rq);
CC can only be modified when enqueue and dequeue the CPU rq. And we also track it in scheduler tick and idle enter/exit in case we may not have enqueue and dequeue for a long time. Therefore, we track CC in and only in these four points: 1. dequeue 2. enqueue 3. scheduler tick 4. idle enter and exit TODO: use existing load tracking framework Signed-off-by: Yuyang Du <yuyang.du@intel.com> --- kernel/sched/core.c | 3 +++ kernel/sched/fair.c | 2 ++ 2 files changed, 5 insertions(+)