===================================================================
@@ -950,12 +950,67 @@ static DEFINE_MUTEX(hybrid_capacity_lock
*/
struct hybrid_domain {
struct hybrid_domain *next;
+ struct device *dev;
cpumask_t cpumask;
int scaling;
};
static struct hybrid_domain *hybrid_domains;
+static int hybrid_get_cost(struct device *dev, unsigned long freq_not_used,
+ unsigned long *cost)
+{
+ struct pstate_data *pd = &all_cpu_data[dev->id]->pstate;
+
+ /*
+ * The smaller the perf-to-frequency scaling factor, the larger the IPC
+ * ratio between the CPUs in the given domain and the least capable CPU
+ * in the system. Assume the cost to be proportional to that IPC ratio
+ * (and note that perf_ctl_scaling is always the same for all CPUs in
+ * the system and it is equal to the perf-to-frequency scaling factor
+ * for one CPU type).
+ */
+ *cost = div_u64(100ULL * pd->perf_ctl_scaling, pd->scaling);
+
+ return 0;
+}
+
+static bool hybrid_register_perf_domain(struct hybrid_domain *hd)
+{
+ static struct em_data_callback em_cb = EM_ADV_DATA_CB(NULL, hybrid_get_cost);
+
+ /*
+ * Registering EM perf domains without enabling asymmetric CPU capacity
+ * support is not really useful and one domain should not be registered
+ * more than once.
+ */
+ if (!hybrid_max_perf_cpu || hd->dev)
+ return false;
+
+ hd->dev = get_cpu_device(cpumask_any(&hd->cpumask));
+ if (!hd->dev)
+ return false;
+
+ /*
+ * Use one EM state in each domain to indicate that the cost associated
+ * with it applies to all CPUs in it regardless of the frequency.
+ */
+ if (em_dev_register_perf_domain(hd->dev, 1, &em_cb, &hd->cpumask, false)) {
+ hd->dev = NULL;
+ return false;
+ }
+
+ return true;
+}
+
+static void hybrid_register_all_perf_domains(void)
+{
+ struct hybrid_domain *hd;
+
+ for (hd = hybrid_domains; hd; hd = hd->next)
+ hybrid_register_perf_domain(hd);
+}
+
static void hybrid_add_to_domain(struct cpudata *cpudata)
{
int scaling = cpudata->pstate.scaling;
@@ -975,6 +1030,8 @@ static void hybrid_add_to_domain(struct
return;
cpumask_set_cpu(cpu, &hd->cpumask);
+ if (hd->dev)
+ em_dev_expand_perf_domain(hd->dev, cpu);
pr_debug("CPU %d added to hybrid domain %*pbl\n", cpu,
cpumask_pr_args(&hd->cpumask));
@@ -991,11 +1048,14 @@ static void hybrid_add_to_domain(struct
hd->scaling = scaling;
hd->next = hybrid_domains;
hybrid_domains = hd;
+ if (hybrid_register_perf_domain(hd))
+ em_rebuild_perf_domains();
pr_debug("New hybrid domain %*pbl: scaling = %d\n",
cpumask_pr_args(&hd->cpumask), hd->scaling);
}
#else /* CONFIG_ENERGY_MODEL */
+static inline void hybrid_register_all_perf_domains(void) {}
static inline void hybrid_add_to_domain(struct cpudata *cpudata) {}
#endif /* !CONFIG_ENERGY_MODEL */
@@ -1093,6 +1153,11 @@ static void hybrid_refresh_cpu_capacity_
guard(mutex)(&hybrid_capacity_lock);
__hybrid_refresh_cpu_capacity_scaling();
+ /*
+ * Perf domains are not registered before setting hybrid_max_perf_cpu,
+ * so register them all after setting up CPU capacity scaling.
+ */
+ hybrid_register_all_perf_domains();
}
static void hybrid_init_cpu_capacity_scaling(bool refresh)
@@ -1116,7 +1181,7 @@ static void hybrid_init_cpu_capacity_sca
hybrid_refresh_cpu_capacity_scaling();
/*
* Disabling ITMT causes sched domains to be rebuilt to disable asym
- * packing and enable asym capacity.
+ * packing and enable asym capacity and EAS.
*/
sched_clear_itmt_support();
}
@@ -3467,6 +3532,8 @@ static ssize_t intel_pstate_show_status(
static int intel_pstate_update_status(const char *buf, size_t size)
{
+ int ret = -EINVAL;
+
if (size == 3 && !strncmp(buf, "off", size)) {
if (!intel_pstate_driver)
return -EINVAL;
@@ -3476,6 +3543,8 @@ static int intel_pstate_update_status(co
cpufreq_unregister_driver(intel_pstate_driver);
intel_pstate_driver_cleanup();
+ /* Trigger EAS support reconfiguration in case it was used. */
+ rebuild_sched_domains_energy();
return 0;
}
@@ -3487,7 +3556,13 @@ static int intel_pstate_update_status(co
cpufreq_unregister_driver(intel_pstate_driver);
}
- return intel_pstate_register_driver(&intel_pstate);
+ ret = intel_pstate_register_driver(&intel_pstate);
+ /*
+ * If the previous status had been "passive" and the schedutil
+ * governor had been used, it disabled EAS on exit, so trigger
+ * sched domains rebuild in case EAS needs to be enabled again.
+ */
+ rebuild_sched_domains_energy();
}
if (size == 7 && !strncmp(buf, "passive", size)) {
@@ -3499,10 +3574,10 @@ static int intel_pstate_update_status(co
intel_pstate_sysfs_hide_hwp_dynamic_boost();
}
- return intel_pstate_register_driver(&intel_cpufreq);
+ ret = intel_pstate_register_driver(&intel_cpufreq);
}
- return -EINVAL;
+ return ret;
}
static int no_load __initdata;