@@ -7260,6 +7260,36 @@ static void run_rebalance_domains(struct softirq_action *h)
enum cpu_idle_type idle = this_rq->idle_balance ?
CPU_IDLE : CPU_NOT_IDLE;
+#ifdef CONFIG_WORKLOAD_CONSOLIDATION
+ struct cpumask *nonshielded = __get_cpu_var(local_cpu_mask);
+ int this_cpu = cpu_of(this_rq);
+
+ /*
+ * if we encounter shadowded cpus here, don't do balance on them
+ */
+ cpumask_copy(nonshielded, cpu_active_mask);
+
+ rcu_read_lock();
+ workload_consolidation_nonshielded_mask(this_cpu, nonshielded);
+ rcu_read_unlock();
+
+ /*
+ * aggressively unload the shielded cpus to unshielded cpus
+ */
+ workload_consolidation_unload(nonshielded);
+
+ if (cpumask_test_cpu(this_cpu, nonshielded)) {
+ rebalance_domains(this_rq, idle);
+
+ /*
+ * If this cpu has a pending nohz_balance_kick, then do the
+ * balancing on behalf of the other idle cpus whose ticks are
+ * stopped.
+ */
+ cpumask_and(nonshielded, nonshielded, nohz.idle_cpus_mask);
+ nohz_idle_balance(this_rq, idle, nonshielded);
+ }
+#else
rebalance_domains(this_rq, idle);
/*
@@ -7267,7 +7297,8 @@ static void run_rebalance_domains(struct softirq_action *h)
* balancing on behalf of the other idle cpus whose ticks are
* stopped.
*/
- nohz_idle_balance(this_rq, idle);
+ nohz_idle_balance(this_rq, idle, nohz.idle_cpus_mask);
+#endif
}
/*
Signed-off-by: Yuyang Du <yuyang.du@intel.com> --- kernel/sched/fair.c | 33 ++++++++++++++++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-)