Message ID | 1eb27e3bbcbb2c67e6eadc0893c9b41e5d76894b.1553057341.git.viresh.kumar@linaro.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | [V3] cpufreq: Call transition notifier only once for each policy | expand |
On Wed, Mar 20, 2019 at 10:22:23AM +0530, Viresh Kumar wrote: > diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c > index 3fae23834069..b2fe665878f7 100644 > --- a/arch/x86/kernel/tsc.c > +++ b/arch/x86/kernel/tsc.c > @@ -958,10 +958,15 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, > struct cpufreq_freqs *freq = data; > unsigned long *lpj; > > + if (WARN_ON_ONCE(cpumask_weight(freq->policy->related_cpus) != 1)) { > + mark_tsc_unstable("cpufreq changes: related CPUs affected"); I suspect this is a big fat nop, but it won't hurt. > + return 0; > + } > + > lpj = &boot_cpu_data.loops_per_jiffy; > #ifdef CONFIG_SMP > if (!(freq->flags & CPUFREQ_CONST_LOOPS)) > - lpj = &cpu_data(freq->cpu).loops_per_jiffy; > + lpj = &cpu_data(freq->policy->cpu).loops_per_jiffy; > #endif > > if (!ref_freq) { > @@ -977,7 +982,7 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, > if (!(freq->flags & CPUFREQ_CONST_LOOPS)) > mark_tsc_unstable("cpufreq changes"); > > - set_cyc2ns_scale(tsc_khz, freq->cpu, rdtsc()); > + set_cyc2ns_scale(tsc_khz, freq->policy->cpu, rdtsc()); > } > > return 0; Just wondering, since we say x86 cpufreq handlers will only have a single CPU here, > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > index 65e4559eef2f..1ac8c710cccc 100644 > --- a/arch/x86/kvm/x86.c > +++ b/arch/x86/kvm/x86.c > @@ -6649,10 +6649,8 @@ static void kvm_hyperv_tsc_notifier(void) > } > #endif > > -static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val, > - void *data) > +static void __kvmclock_cpufreq_notifier(struct cpufreq_freqs *freq, int cpu) > { > - struct cpufreq_freqs *freq = data; > struct kvm *kvm; > struct kvm_vcpu *vcpu; > int i, send_ipi = 0; > @@ -6696,17 +6694,12 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va > * > */ > > - if (val == CPUFREQ_PRECHANGE && freq->old > freq->new) > - return 0; > - if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new) > - return 0; > - > - smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1); > + smp_call_function_single(cpu, tsc_khz_changed, freq, 1); > > spin_lock(&kvm_lock); > list_for_each_entry(kvm, &vm_list, vm_list) { > kvm_for_each_vcpu(i, vcpu, kvm) { > - if (vcpu->cpu != freq->cpu) > + if (vcpu->cpu != cpu) > continue; > kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); > if (vcpu->cpu != smp_processor_id()) > @@ -6728,8 +6721,24 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va > * guest context is entered kvmclock will be updated, > * so the guest will not see stale values. > */ > - smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1); > + smp_call_function_single(cpu, tsc_khz_changed, freq, 1); > } > +} > + > +static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val, > + void *data) > +{ > + struct cpufreq_freqs *freq = data; > + int cpu; > + > + if (val == CPUFREQ_PRECHANGE && freq->old > freq->new) > + return 0; > + if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new) > + return 0; > + > + for_each_cpu(cpu, freq->policy->cpus) > + __kvmclock_cpufreq_notifier(freq, cpu); > + > return 0; > } > Then why to we pretend otherwise here?
On Wed, 20 Mar 2019, Viresh Kumar wrote: > Currently we call these notifiers once for each CPU of the policy->cpus Nitpick: We call nothing. The notifiers are called .... > cpumask. It would be more optimal if the notifier can be called only > once and all the relevant information be provided to it. Out of the 23 > diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c > index 3fae23834069..b2fe665878f7 100644 > --- a/arch/x86/kernel/tsc.c > +++ b/arch/x86/kernel/tsc.c > @@ -958,10 +958,15 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, > struct cpufreq_freqs *freq = data; > unsigned long *lpj; > > + if (WARN_ON_ONCE(cpumask_weight(freq->policy->related_cpus) != 1)) { > + mark_tsc_unstable("cpufreq changes: related CPUs affected"); > + return 0; > + } You might add a check which ensures that policy->cpu == smp_processor_id() because if this is not the case .... Thanks, tglx
On 21-03-19, 12:45, Peter Zijlstra wrote: > On Wed, Mar 20, 2019 at 10:22:23AM +0530, Viresh Kumar wrote: > > diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c > > index 3fae23834069..b2fe665878f7 100644 > > --- a/arch/x86/kernel/tsc.c > > +++ b/arch/x86/kernel/tsc.c > > @@ -958,10 +958,15 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, > > struct cpufreq_freqs *freq = data; > > unsigned long *lpj; > > > > + if (WARN_ON_ONCE(cpumask_weight(freq->policy->related_cpus) != 1)) { > > + mark_tsc_unstable("cpufreq changes: related CPUs affected"); > > I suspect this is a big fat nop, but it won't hurt. > > > + return 0; > > + } > > + > > lpj = &boot_cpu_data.loops_per_jiffy; > > #ifdef CONFIG_SMP > > if (!(freq->flags & CPUFREQ_CONST_LOOPS)) > > - lpj = &cpu_data(freq->cpu).loops_per_jiffy; > > + lpj = &cpu_data(freq->policy->cpu).loops_per_jiffy; > > #endif > > > > if (!ref_freq) { > > @@ -977,7 +982,7 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, > > if (!(freq->flags & CPUFREQ_CONST_LOOPS)) > > mark_tsc_unstable("cpufreq changes"); > > > > - set_cyc2ns_scale(tsc_khz, freq->cpu, rdtsc()); > > + set_cyc2ns_scale(tsc_khz, freq->policy->cpu, rdtsc()); > > } > > > > return 0; > > Just wondering, since we say x86 cpufreq handlers will only have a > single CPU here, > > > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > > index 65e4559eef2f..1ac8c710cccc 100644 > > --- a/arch/x86/kvm/x86.c > > +++ b/arch/x86/kvm/x86.c > > @@ -6649,10 +6649,8 @@ static void kvm_hyperv_tsc_notifier(void) > > } > > #endif > > > > -static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val, > > - void *data) > > +static void __kvmclock_cpufreq_notifier(struct cpufreq_freqs *freq, int cpu) > > { > > - struct cpufreq_freqs *freq = data; > > struct kvm *kvm; > > struct kvm_vcpu *vcpu; > > int i, send_ipi = 0; > > @@ -6696,17 +6694,12 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va > > * > > */ > > > > - if (val == CPUFREQ_PRECHANGE && freq->old > freq->new) > > - return 0; > > - if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new) > > - return 0; > > - > > - smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1); > > + smp_call_function_single(cpu, tsc_khz_changed, freq, 1); > > > > spin_lock(&kvm_lock); > > list_for_each_entry(kvm, &vm_list, vm_list) { > > kvm_for_each_vcpu(i, vcpu, kvm) { > > - if (vcpu->cpu != freq->cpu) > > + if (vcpu->cpu != cpu) > > continue; > > kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); > > if (vcpu->cpu != smp_processor_id()) > > @@ -6728,8 +6721,24 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va > > * guest context is entered kvmclock will be updated, > > * so the guest will not see stale values. > > */ > > - smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1); > > + smp_call_function_single(cpu, tsc_khz_changed, freq, 1); > > } > > +} > > + > > +static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val, > > + void *data) > > +{ > > + struct cpufreq_freqs *freq = data; > > + int cpu; > > + > > + if (val == CPUFREQ_PRECHANGE && freq->old > freq->new) > > + return 0; > > + if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new) > > + return 0; > > + > > + for_each_cpu(cpu, freq->policy->cpus) > > + __kvmclock_cpufreq_notifier(freq, cpu); > > + > > return 0; > > } > > > > Then why to we pretend otherwise here? My intention was to not add any bug here because of lack of my knowledge of the architecture in question and so I tried to be safe. If you guys think the behavior should be same here as of the tsc, then we can add similar checks here.
On 21-03-19, 16:49, Thomas Gleixner wrote: > On Wed, 20 Mar 2019, Viresh Kumar wrote: > > > > diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c > > index 3fae23834069..b2fe665878f7 100644 > > --- a/arch/x86/kernel/tsc.c > > +++ b/arch/x86/kernel/tsc.c > > @@ -958,10 +958,15 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, > > struct cpufreq_freqs *freq = data; > > unsigned long *lpj; > > > > + if (WARN_ON_ONCE(cpumask_weight(freq->policy->related_cpus) != 1)) { > > + mark_tsc_unstable("cpufreq changes: related CPUs affected"); > > + return 0; > > + } > > You might add a check which ensures that policy->cpu == smp_processor_id() > because if this is not the case .... How about something like this ? if (WARN_ON_ONCE(cpumask_weight(freq->policy->related_cpus) != 1 || freq->policy->cpu != smp_processor_id())) { mark_tsc_unstable("cpufreq changes: related CPUs affected"); return 0; } Thanks for your feedback.
On Fri, Mar 22, 2019 at 7:28 AM Viresh Kumar <viresh.kumar@linaro.org> wrote: > > On 21-03-19, 16:49, Thomas Gleixner wrote: > > On Wed, 20 Mar 2019, Viresh Kumar wrote: > > > > > > diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c > > > index 3fae23834069..b2fe665878f7 100644 > > > --- a/arch/x86/kernel/tsc.c > > > +++ b/arch/x86/kernel/tsc.c > > > @@ -958,10 +958,15 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, > > > struct cpufreq_freqs *freq = data; > > > unsigned long *lpj; > > > > > > + if (WARN_ON_ONCE(cpumask_weight(freq->policy->related_cpus) != 1)) { > > > + mark_tsc_unstable("cpufreq changes: related CPUs affected"); > > > + return 0; > > > + } > > > > You might add a check which ensures that policy->cpu == smp_processor_id() > > because if this is not the case .... > > How about something like this ? > > if (WARN_ON_ONCE(cpumask_weight(freq->policy->related_cpus) != 1 || > freq->policy->cpu != smp_processor_id())) { > mark_tsc_unstable("cpufreq changes: related CPUs affected"); > return 0; > } > > > Thanks for your feedback. Peter suggested something like this IIRC. Anyway, I'm still concerned that this approach in general fundamentally doesn't work on SMP with frequency synchronization, which is the case for the platforms affected by the problem it attempts to overcome. The frequency has just been changed on one CPU, presumably to the requested value (so this cannot work when turbo is enabled anyway), but then it also has changed for all of the other CPUs in the system (or at least in the package), so it is not sufficient to update the single CPU here as it is only a messenger, so to speak. However, updating the other CPUs from here would be fundamentally racy AFAICS.
On 22-03-19, 11:49, Viresh Kumar wrote: > On 21-03-19, 12:45, Peter Zijlstra wrote: > > On Wed, Mar 20, 2019 at 10:22:23AM +0530, Viresh Kumar wrote: > > > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > > > index 65e4559eef2f..1ac8c710cccc 100644 > > > --- a/arch/x86/kvm/x86.c > > > +++ b/arch/x86/kvm/x86.c > > > @@ -6649,10 +6649,8 @@ static void kvm_hyperv_tsc_notifier(void) > > > } > > > #endif > > > > > > -static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val, > > > - void *data) > > > +static void __kvmclock_cpufreq_notifier(struct cpufreq_freqs *freq, int cpu) > > > { > > > - struct cpufreq_freqs *freq = data; > > > struct kvm *kvm; > > > struct kvm_vcpu *vcpu; > > > int i, send_ipi = 0; > > > @@ -6696,17 +6694,12 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va > > > * > > > */ > > > > > > - if (val == CPUFREQ_PRECHANGE && freq->old > freq->new) > > > - return 0; > > > - if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new) > > > - return 0; > > > - > > > - smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1); > > > + smp_call_function_single(cpu, tsc_khz_changed, freq, 1); > > > > > > spin_lock(&kvm_lock); > > > list_for_each_entry(kvm, &vm_list, vm_list) { > > > kvm_for_each_vcpu(i, vcpu, kvm) { > > > - if (vcpu->cpu != freq->cpu) > > > + if (vcpu->cpu != cpu) > > > continue; > > > kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); > > > if (vcpu->cpu != smp_processor_id()) > > > @@ -6728,8 +6721,24 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va > > > * guest context is entered kvmclock will be updated, > > > * so the guest will not see stale values. > > > */ > > > - smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1); > > > + smp_call_function_single(cpu, tsc_khz_changed, freq, 1); > > > } > > > +} > > > + > > > +static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val, > > > + void *data) > > > +{ > > > + struct cpufreq_freqs *freq = data; > > > + int cpu; > > > + > > > + if (val == CPUFREQ_PRECHANGE && freq->old > freq->new) > > > + return 0; > > > + if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new) > > > + return 0; > > > + > > > + for_each_cpu(cpu, freq->policy->cpus) > > > + __kvmclock_cpufreq_notifier(freq, cpu); > > > + > > > return 0; > > > } > > > > > > > Then why to we pretend otherwise here? > > My intention was to not add any bug here because of lack of my > knowledge of the architecture in question and so I tried to be safe. > > If you guys think the behavior should be same here as of the tsc, then > we can add similar checks here. I am rebasing this patch over Rafael's patch [1] and wondering if I should change anything here.
On Wed, Apr 24, 2019 at 8:48 AM Viresh Kumar <viresh.kumar@linaro.org> wrote: > > On 22-03-19, 11:49, Viresh Kumar wrote: > > On 21-03-19, 12:45, Peter Zijlstra wrote: > > > On Wed, Mar 20, 2019 at 10:22:23AM +0530, Viresh Kumar wrote: > > > > > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > > > > index 65e4559eef2f..1ac8c710cccc 100644 > > > > --- a/arch/x86/kvm/x86.c > > > > +++ b/arch/x86/kvm/x86.c > > > > @@ -6649,10 +6649,8 @@ static void kvm_hyperv_tsc_notifier(void) > > > > } > > > > #endif > > > > > > > > -static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val, > > > > - void *data) > > > > +static void __kvmclock_cpufreq_notifier(struct cpufreq_freqs *freq, int cpu) > > > > { > > > > - struct cpufreq_freqs *freq = data; > > > > struct kvm *kvm; > > > > struct kvm_vcpu *vcpu; > > > > int i, send_ipi = 0; > > > > @@ -6696,17 +6694,12 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va > > > > * > > > > */ > > > > > > > > - if (val == CPUFREQ_PRECHANGE && freq->old > freq->new) > > > > - return 0; > > > > - if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new) > > > > - return 0; > > > > - > > > > - smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1); > > > > + smp_call_function_single(cpu, tsc_khz_changed, freq, 1); > > > > > > > > spin_lock(&kvm_lock); > > > > list_for_each_entry(kvm, &vm_list, vm_list) { > > > > kvm_for_each_vcpu(i, vcpu, kvm) { > > > > - if (vcpu->cpu != freq->cpu) > > > > + if (vcpu->cpu != cpu) > > > > continue; > > > > kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); > > > > if (vcpu->cpu != smp_processor_id()) > > > > @@ -6728,8 +6721,24 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va > > > > * guest context is entered kvmclock will be updated, > > > > * so the guest will not see stale values. > > > > */ > > > > - smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1); > > > > + smp_call_function_single(cpu, tsc_khz_changed, freq, 1); > > > > } > > > > +} > > > > + > > > > +static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val, > > > > + void *data) > > > > +{ > > > > + struct cpufreq_freqs *freq = data; > > > > + int cpu; > > > > + > > > > + if (val == CPUFREQ_PRECHANGE && freq->old > freq->new) > > > > + return 0; > > > > + if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new) > > > > + return 0; > > > > + > > > > + for_each_cpu(cpu, freq->policy->cpus) > > > > + __kvmclock_cpufreq_notifier(freq, cpu); > > > > + > > > > return 0; > > > > } > > > > > > > > > > Then why to we pretend otherwise here? > > > > My intention was to not add any bug here because of lack of my > > knowledge of the architecture in question and so I tried to be safe. > > > > If you guys think the behavior should be same here as of the tsc, then > > we can add similar checks here. > > I am rebasing this patch over Rafael's patch [1] and wondering if I > should change anything here. I guess please repost when my patch makes it into linux-next. > [1] https://lore.kernel.org/lkml/38900622.ao2n2t5aPS@kreacher/
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index facd4240ca02..c6d37563610a 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -754,15 +754,20 @@ static int cpufreq_callback(struct notifier_block *nb, unsigned long val, void *data) { struct cpufreq_freqs *freq = data; - int cpu = freq->cpu; + struct cpumask *cpus = freq->policy->cpus; + int cpu, first = cpumask_first(cpus); + unsigned int lpj; if (freq->flags & CPUFREQ_CONST_LOOPS) return NOTIFY_OK; - if (!per_cpu(l_p_j_ref, cpu)) { - per_cpu(l_p_j_ref, cpu) = - per_cpu(cpu_data, cpu).loops_per_jiffy; - per_cpu(l_p_j_ref_freq, cpu) = freq->old; + if (!per_cpu(l_p_j_ref, first)) { + for_each_cpu(cpu, cpus) { + per_cpu(l_p_j_ref, cpu) = + per_cpu(cpu_data, cpu).loops_per_jiffy; + per_cpu(l_p_j_ref_freq, cpu) = freq->old; + } + if (!global_l_p_j_ref) { global_l_p_j_ref = loops_per_jiffy; global_l_p_j_ref_freq = freq->old; @@ -774,10 +779,11 @@ static int cpufreq_callback(struct notifier_block *nb, loops_per_jiffy = cpufreq_scale(global_l_p_j_ref, global_l_p_j_ref_freq, freq->new); - per_cpu(cpu_data, cpu).loops_per_jiffy = - cpufreq_scale(per_cpu(l_p_j_ref, cpu), - per_cpu(l_p_j_ref_freq, cpu), - freq->new); + + lpj = cpufreq_scale(per_cpu(l_p_j_ref, first), + per_cpu(l_p_j_ref_freq, first), freq->new); + for_each_cpu(cpu, cpus) + per_cpu(cpu_data, cpu).loops_per_jiffy = lpj; } return NOTIFY_OK; } diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c index 3eb77943ce12..89fb05f90609 100644 --- a/arch/sparc/kernel/time_64.c +++ b/arch/sparc/kernel/time_64.c @@ -653,19 +653,23 @@ static int sparc64_cpufreq_notifier(struct notifier_block *nb, unsigned long val void *data) { struct cpufreq_freqs *freq = data; - unsigned int cpu = freq->cpu; - struct freq_table *ft = &per_cpu(sparc64_freq_table, cpu); + unsigned int cpu; + struct freq_table *ft; - if (!ft->ref_freq) { - ft->ref_freq = freq->old; - ft->clock_tick_ref = cpu_data(cpu).clock_tick; - } - if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || - (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) { - cpu_data(cpu).clock_tick = - cpufreq_scale(ft->clock_tick_ref, - ft->ref_freq, - freq->new); + for_each_cpu(cpu, freq->policy->cpus) { + ft = &per_cpu(sparc64_freq_table, cpu); + + if (!ft->ref_freq) { + ft->ref_freq = freq->old; + ft->clock_tick_ref = cpu_data(cpu).clock_tick; + } + + if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || + (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) { + cpu_data(cpu).clock_tick = + cpufreq_scale(ft->clock_tick_ref, ft->ref_freq, + freq->new); + } } return 0; diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 3fae23834069..b2fe665878f7 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -958,10 +958,15 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, struct cpufreq_freqs *freq = data; unsigned long *lpj; + if (WARN_ON_ONCE(cpumask_weight(freq->policy->related_cpus) != 1)) { + mark_tsc_unstable("cpufreq changes: related CPUs affected"); + return 0; + } + lpj = &boot_cpu_data.loops_per_jiffy; #ifdef CONFIG_SMP if (!(freq->flags & CPUFREQ_CONST_LOOPS)) - lpj = &cpu_data(freq->cpu).loops_per_jiffy; + lpj = &cpu_data(freq->policy->cpu).loops_per_jiffy; #endif if (!ref_freq) { @@ -977,7 +982,7 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, if (!(freq->flags & CPUFREQ_CONST_LOOPS)) mark_tsc_unstable("cpufreq changes"); - set_cyc2ns_scale(tsc_khz, freq->cpu, rdtsc()); + set_cyc2ns_scale(tsc_khz, freq->policy->cpu, rdtsc()); } return 0; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 65e4559eef2f..1ac8c710cccc 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6649,10 +6649,8 @@ static void kvm_hyperv_tsc_notifier(void) } #endif -static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val, - void *data) +static void __kvmclock_cpufreq_notifier(struct cpufreq_freqs *freq, int cpu) { - struct cpufreq_freqs *freq = data; struct kvm *kvm; struct kvm_vcpu *vcpu; int i, send_ipi = 0; @@ -6696,17 +6694,12 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va * */ - if (val == CPUFREQ_PRECHANGE && freq->old > freq->new) - return 0; - if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new) - return 0; - - smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1); + smp_call_function_single(cpu, tsc_khz_changed, freq, 1); spin_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) { kvm_for_each_vcpu(i, vcpu, kvm) { - if (vcpu->cpu != freq->cpu) + if (vcpu->cpu != cpu) continue; kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); if (vcpu->cpu != smp_processor_id()) @@ -6728,8 +6721,24 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va * guest context is entered kvmclock will be updated, * so the guest will not see stale values. */ - smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1); + smp_call_function_single(cpu, tsc_khz_changed, freq, 1); } +} + +static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val, + void *data) +{ + struct cpufreq_freqs *freq = data; + int cpu; + + if (val == CPUFREQ_PRECHANGE && freq->old > freq->new) + return 0; + if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new) + return 0; + + for_each_cpu(cpu, freq->policy->cpus) + __kvmclock_cpufreq_notifier(freq, cpu); + return 0; } diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index e10922709d13..fba38bf27d26 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -300,11 +300,14 @@ static void cpufreq_notify_transition(struct cpufreq_policy *policy, struct cpufreq_freqs *freqs, unsigned int state) { + int cpu; + BUG_ON(irqs_disabled()); if (cpufreq_disabled()) return; + freqs->policy = policy; freqs->flags = cpufreq_driver->flags; pr_debug("notification %u of frequency transition to %u kHz\n", state, freqs->new); @@ -324,10 +327,8 @@ static void cpufreq_notify_transition(struct cpufreq_policy *policy, } } - for_each_cpu(freqs->cpu, policy->cpus) { - srcu_notifier_call_chain(&cpufreq_transition_notifier_list, - CPUFREQ_PRECHANGE, freqs); - } + srcu_notifier_call_chain(&cpufreq_transition_notifier_list, + CPUFREQ_PRECHANGE, freqs); adjust_jiffies(CPUFREQ_PRECHANGE, freqs); break; @@ -337,11 +338,11 @@ static void cpufreq_notify_transition(struct cpufreq_policy *policy, pr_debug("FREQ: %u - CPUs: %*pbl\n", freqs->new, cpumask_pr_args(policy->cpus)); - for_each_cpu(freqs->cpu, policy->cpus) { - trace_cpu_frequency(freqs->new, freqs->cpu); - srcu_notifier_call_chain(&cpufreq_transition_notifier_list, - CPUFREQ_POSTCHANGE, freqs); - } + for_each_cpu(cpu, policy->cpus) + trace_cpu_frequency(freqs->new, cpu); + + srcu_notifier_call_chain(&cpufreq_transition_notifier_list, + CPUFREQ_POSTCHANGE, freqs); cpufreq_stats_record_transition(policy, freqs->new); policy->cur = freqs->new; diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index b160e98076e3..e327523ddff2 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -42,13 +42,6 @@ enum cpufreq_table_sorting { CPUFREQ_TABLE_SORTED_DESCENDING }; -struct cpufreq_freqs { - unsigned int cpu; /* cpu nr */ - unsigned int old; - unsigned int new; - u8 flags; /* flags of cpufreq_driver, see below. */ -}; - struct cpufreq_cpuinfo { unsigned int max_freq; unsigned int min_freq; @@ -156,6 +149,13 @@ struct cpufreq_policy { struct thermal_cooling_device *cdev; }; +struct cpufreq_freqs { + struct cpufreq_policy *policy; + unsigned int old; + unsigned int new; + u8 flags; /* flags of cpufreq_driver, see below. */ +}; + /* Only for ACPI */ #define CPUFREQ_SHARED_TYPE_NONE (0) /* None */ #define CPUFREQ_SHARED_TYPE_HW (1) /* HW does needed coordination */