===================================================================
@@ -179,7 +179,7 @@ struct vid_data {
* based on the MSR_IA32_MISC_ENABLE value and whether or
* not the maximum reported turbo P-state is different from
* the maximum reported non-turbo one.
- * @turbo_disabled_s: Saved @turbo_disabled value.
+ * @turbo_disabled_mf: The @turbo_disabled value reflected by cpuinfo.max_freq.
* @min_perf_pct: Minimum capacity limit in percent of the maximum turbo
* P-state capacity.
* @max_perf_pct: Maximum capacity limit in percent of the maximum turbo
@@ -188,7 +188,7 @@ struct vid_data {
struct global_params {
bool no_turbo;
bool turbo_disabled;
- bool turbo_disabled_s;
+ bool turbo_disabled_mf;
int max_perf_pct;
int min_perf_pct;
};
@@ -896,6 +896,28 @@ static void intel_pstate_update_policies
cpufreq_update_policy(cpu);
}
+static void intel_pstate_update_max_freq(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
+ struct cpufreq_policy new_policy;
+ struct cpudata *cpudata;
+
+ if (!policy)
+ return;
+
+ cpudata = all_cpu_data[cpu];
+ policy->cpuinfo.max_freq = global.turbo_disabled_mf ?
+ cpudata->pstate.max_freq : cpudata->pstate.turbo_freq;
+
+ memcpy(&new_policy, policy, sizeof(*policy));
+ new_policy.max = min(policy->user_policy.max, policy->cpuinfo.max_freq);
+ new_policy.min = min(policy->user_policy.min, new_policy.max);
+
+ cpufreq_set_policy(policy, &new_policy);
+
+ cpufreq_cpu_release(policy);
+}
+
static void intel_pstate_update_limits(unsigned int cpu)
{
mutex_lock(&intel_pstate_driver_lock);
@@ -905,9 +927,10 @@ static void intel_pstate_update_limits(u
* If turbo has been turned on or off globally, policy limits for
* all CPUs need to be updated to reflect that.
*/
- if (global.turbo_disabled_s != global.turbo_disabled) {
- global.turbo_disabled_s = global.turbo_disabled;
- intel_pstate_update_policies();
+ if (global.turbo_disabled_mf != global.turbo_disabled) {
+ global.turbo_disabled_mf = global.turbo_disabled;
+ for_each_possible_cpu(cpu)
+ intel_pstate_update_max_freq(cpu);
} else {
cpufreq_update_policy(cpu);
}
@@ -2156,7 +2179,7 @@ static int __intel_pstate_cpu_init(struc
/* cpuinfo and default policy values */
policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
update_turbo_state();
- global.turbo_disabled_s = global.turbo_disabled;
+ global.turbo_disabled_mf = global.turbo_disabled;
policy->cpuinfo.max_freq = global.turbo_disabled ?
cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
policy->cpuinfo.max_freq *= cpu->pstate.scaling;
===================================================================
@@ -34,11 +34,6 @@
static LIST_HEAD(cpufreq_policy_list);
-static inline bool policy_is_inactive(struct cpufreq_policy *policy)
-{
- return cpumask_empty(policy->cpus);
-}
-
/* Macros to iterate over CPU policies */
#define for_each_suitable_policy(__policy, __active) \
list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
@@ -260,7 +255,7 @@ EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
* cpufreq_cpu_release - Unlock a policy and decrement its usage counter.
* @policy: cpufreq policy returned by cpufreq_cpu_acquire().
*/
-static void cpufreq_cpu_release(struct cpufreq_policy *policy)
+void cpufreq_cpu_release(struct cpufreq_policy *policy)
{
if (WARN_ON(!policy))
return;
@@ -284,7 +279,7 @@ static void cpufreq_cpu_release(struct c
* cpufreq_cpu_release() in order to release its rwsem and balance its usage
* counter properly.
*/
-static struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu)
+struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
@@ -720,9 +715,6 @@ static ssize_t show_scaling_cur_freq(str
return ret;
}
-static int cpufreq_set_policy(struct cpufreq_policy *policy,
- struct cpufreq_policy *new_policy);
-
/**
* cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
*/
@@ -2280,8 +2272,8 @@ EXPORT_SYMBOL(cpufreq_get_policy);
*
* The cpuinfo part of @policy is not updated by this function.
*/
-static int cpufreq_set_policy(struct cpufreq_policy *policy,
- struct cpufreq_policy *new_policy)
+int cpufreq_set_policy(struct cpufreq_policy *policy,
+ struct cpufreq_policy *new_policy)
{
struct cpufreq_governor *old_gov;
int ret;
===================================================================
@@ -178,6 +178,11 @@ static inline struct cpufreq_policy *cpu
static inline void cpufreq_cpu_put(struct cpufreq_policy *policy) { }
#endif
+static inline bool policy_is_inactive(struct cpufreq_policy *policy)
+{
+ return cpumask_empty(policy->cpus);
+}
+
static inline bool policy_is_shared(struct cpufreq_policy *policy)
{
return cpumask_weight(policy->cpus) > 1;
@@ -193,7 +198,12 @@ unsigned int cpufreq_quick_get_max(unsig
void disable_cpufreq(void);
u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
+
+struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu);
+void cpufreq_cpu_release(struct cpufreq_policy *policy);
int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
+int cpufreq_set_policy(struct cpufreq_policy *policy,
+ struct cpufreq_policy *new_policy);
void cpufreq_update_policy(unsigned int cpu);
void cpufreq_update_limits(unsigned int cpu);
bool have_governor_per_policy(void);