@@ -2789,7 +2789,8 @@ static long __update_entity_utilization_avg_contrib(struct sched_entity *se)
__update_task_entity_utilization(se);
else
se->avg.utilization_avg_contrib =
- group_cfs_rq(se)->utilization_load_avg;
+ group_cfs_rq(se)->utilization_load_avg +
+ group_cfs_rq(se)->utilization_blocked_avg;
return se->avg.utilization_avg_contrib - old_contrib;
}
@@ -5046,13 +5047,17 @@ static int select_idle_sibling(struct task_struct *p, int target)
static int get_cpu_usage(int cpu)
{
+ int sum;
unsigned long usage = cpu_rq(cpu)->cfs.utilization_load_avg;
+ unsigned long blocked = cpu_rq(cpu)->cfs.utilization_blocked_avg;
unsigned long capacity_orig = capacity_orig_of(cpu);
- if (usage >= capacity_orig)
+ sum = usage + blocked;
+
+ if (sum >= capacity_orig)
return capacity_orig;
- return usage;
+ return sum;
}
/*
Add the blocked utilization contribution to group sched_entity utilization (se->avg.utilization_avg_contrib) and to get_cpu_usage(). With this change cpu usage now includes recent usage by currently non-runnable tasks, hence it provides a more stable view of the cpu usage. It does, however, also mean that the meaning of usage is changed: A cpu may be momentarily idle while usage is >0. It can no longer be assumed that cpu usage >0 implies runnable tasks on the rq. cfs_rq->utilization_load_avg or nr_running should be used instead to get the current rq status. cc: Ingo Molnar <mingo@redhat.com> cc: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Morten Rasmussen <morten.rasmussen@arm.com> --- kernel/sched/fair.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-)