@@ -4365,9 +4365,16 @@ static int select_idle_sibling(struct task_struct *p, int target)
struct sched_domain *sd;
struct sched_group *sg;
int i = task_cpu(p);
+#ifdef CONFIG_WORKLOAD_CONSOLIDATION
+ int ret;
+ ret = workload_consolidation_wakeup(i, target);
+ if (ret < nr_cpu_ids)
+ return ret;
+#else
if (idle_cpu(target))
return target;
+#endif
/*
* If the prevous cpu is cache affine and idle, don't be stupid.
@@ -4460,7 +4467,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
}
while (sd) {
- struct sched_group *group;
+ struct sched_group *group = NULL;
int weight;
if (!(sd->flags & sd_flag)) {
@@ -4468,6 +4475,12 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
continue;
}
+#ifdef CONFIG_WORKLOAD_CONSOLIDATION
+ if (sd->flags & SD_ASYM_CONCURRENCY)
+ group = workload_consolidation_find_group(sd, p, cpu);
+
+ if (!group)
+#endif
group = find_idlest_group(sd, p, cpu, sd_flag);
if (!group) {
sd = sd->child;
Signed-off-by: Yuyang Du <yuyang.du@intel.com> --- kernel/sched/fair.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-)