===================================================================
@@ -118,6 +118,178 @@
mutex_unlock(&userspace_mutex);
}
+static int cpufreq_is_userspace_governor(int cpu)
+{
+ int ret;
+
+ mutex_lock(&userspace_mutex);
+ ret = per_cpu(cpu_is_managed, cpu);
+ mutex_unlock(&userspace_mutex);
+
+ return ret;
+}
+
+int cpufreq_userspace_freq_up(int cpu)
+{
+ unsigned int curfreq, nextminfreq;
+ unsigned int ret = 0;
+ struct cpufreq_frequency_table *pos, *table;
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+
+ if (!policy)
+ return -EINVAL;
+
+ if (!cpufreq_is_userspace_governor(cpu)) {
+ cpufreq_cpu_put(policy);
+ return -EINVAL;
+ }
+
+ cpufreq_cpu_put(policy);
+
+ mutex_lock(&userspace_mutex);
+ table = policy->freq_table;
+ if (!table) {
+ mutex_unlock(&userspace_mutex);
+ return -ENODEV;
+ }
+ nextminfreq = cpufreq_quick_get_max(cpu);
+ curfreq = policy->cur;
+
+ cpufreq_for_each_valid_entry(pos, table) {
+ if (pos->frequency > curfreq &&
+ pos->frequency < nextminfreq)
+ nextminfreq = pos->frequency;
+ }
+
+ if (nextminfreq != curfreq) {
+ unsigned int *setspeed = policy->governor_data;
+
+ *setspeed = nextminfreq;
+ ret = __cpufreq_driver_target(policy, nextminfreq,
+ CPUFREQ_RELATION_L);
+ } else
+ ret = 1;
+ mutex_unlock(&userspace_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpufreq_userspace_freq_up);
+
+int cpufreq_userspace_freq_down(int cpu)
+{
+ unsigned int curfreq, prevmaxfreq;
+ unsigned int ret = 0;
+ struct cpufreq_frequency_table *pos, *table;
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+
+ if (!policy)
+ return -EINVAL;
+
+ if (!cpufreq_is_userspace_governor(cpu)) {
+ cpufreq_cpu_put(policy);
+ return -EINVAL;
+ }
+
+ cpufreq_cpu_put(policy);
+
+ mutex_lock(&userspace_mutex);
+ table = policy->freq_table;
+ if (!table) {
+ mutex_unlock(&userspace_mutex);
+ return -ENODEV;
+ }
+ prevmaxfreq = policy->min;
+ curfreq = policy->cur;
+
+ cpufreq_for_each_valid_entry(pos, table) {
+ if (pos->frequency < curfreq &&
+ pos->frequency > prevmaxfreq)
+ prevmaxfreq = pos->frequency;
+ }
+
+ if (prevmaxfreq != curfreq) {
+ unsigned int *setspeed = policy->governor_data;
+
+ *setspeed = prevmaxfreq;
+ ret = __cpufreq_driver_target(policy, prevmaxfreq,
+ CPUFREQ_RELATION_L);
+ } else
+ ret = 1;
+ mutex_unlock(&userspace_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpufreq_userspace_freq_down);
+
+int cpufreq_userspace_freq_max(int cpu)
+{
+ unsigned int maxfreq;
+ unsigned int ret = 0;
+ struct cpufreq_frequency_table *table;
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ unsigned int *setspeed = policy->governor_data;
+
+
+ if (!policy)
+ return -EINVAL;
+
+ if (!cpufreq_is_userspace_governor(cpu)) {
+ cpufreq_cpu_put(policy);
+ return -EINVAL;
+ }
+
+ cpufreq_cpu_put(policy);
+
+ mutex_lock(&userspace_mutex);
+ table = policy->freq_table;
+ if (!table) {
+ mutex_unlock(&userspace_mutex);
+ return -ENODEV;
+ }
+ maxfreq = cpufreq_quick_get_max(cpu);
+
+ *setspeed = maxfreq;
+ ret = __cpufreq_driver_target(policy, maxfreq, CPUFREQ_RELATION_L);
+ mutex_unlock(&userspace_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpufreq_userspace_freq_max);
+
+int cpufreq_userspace_freq_min(int cpu)
+{
+ unsigned int minfreq;
+ unsigned int ret = 0;
+ struct cpufreq_frequency_table *table;
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ unsigned int *setspeed = policy->governor_data;
+
+ if (!policy)
+ return -EINVAL;
+
+ if (!cpufreq_is_userspace_governor(cpu)) {
+ cpufreq_cpu_put(policy);
+ return -EINVAL;
+ }
+ minfreq = policy->min;
+
+ cpufreq_cpu_put(policy);
+
+ mutex_lock(&userspace_mutex);
+ table = policy->freq_table;
+ if (!table) {
+ mutex_unlock(&userspace_mutex);
+ return -ENODEV;
+ }
+
+ *setspeed = minfreq;
+ ret = __cpufreq_driver_target(policy, minfreq, CPUFREQ_RELATION_L);
+ mutex_unlock(&userspace_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpufreq_userspace_freq_min);
+
static struct cpufreq_governor cpufreq_gov_userspace = {
.name = "userspace",
.init = cpufreq_userspace_policy_init,
===================================================================
@@ -890,4 +890,11 @@
int cpufreq_generic_init(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table,
unsigned int transition_latency);
+#ifdef CONFIG_CPU_FREQ
+int cpufreq_userspace_freq_down(int cpu);
+int cpufreq_userspace_freq_up(int cpu);
+int cpufreq_userspace_freq_max(int cpu);
+int cpufreq_userspace_freq_min(int cpu);
+#else
+#endif
#endif /* _LINUX_CPUFREQ_H */
Implement functions in cpufreq userspace code to: * Change current frequency to {max,min,up,down} frequencies. up/down being relative to current one. These will be used to implement KVM hypercalls for the guest to issue frequency changes. Current situation with DPDK and frequency changes is as follows: An algorithm in the guest decides when to increase/decrease frequency based on the queue length of the device. On the host, a power manager daemon is used to listen for frequency change requests (on another core) and issue these requests. However frequency changes are performance sensitive events because: On a change from low load condition to max load condition, the frequency should be raised as soon as possible. Sending a virtio-serial notification to another pCPU, waiting for that pCPU to initiate an IPI to the requestor pCPU to change frequency, is slower and more cache costly than a direct hypercall to host to switch the frequency. Moreover, if the pCPU where the power manager daemon is running is not busy spinning on requests from the isolated DPDK vcpus, there is also the cost of HLT wakeup for that pCPU. Instructions to setup: Disable the intel_pstate driver (intel_pstate=disable host kernel command line option), and set cpufreq userspace governor for the isolated pCPU. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> --- drivers/cpufreq/cpufreq_userspace.c | 172 ++++++++++++++++++++++++++++++++++++ include/linux/cpufreq.h | 7 + 2 files changed, 179 insertions(+)