@@ -112,7 +112,9 @@ void native_play_dead(void);
void play_dead_common(void);
void wbinvd_on_cpu(int cpu);
void wbinvd_on_all_cpus(void);
+void wbinvd_on_many_cpus(struct cpumask *cpus);
void wbnoinvd_on_all_cpus(void);
+void wbnoinvd_on_many_cpus(struct cpumask *cpus);
void smp_kick_mwait_play_dead(void);
@@ -160,11 +162,21 @@ static inline void wbinvd_on_all_cpus(void)
wbinvd();
}
+static inline void wbinvd_on_many_cpus(struct cpumask *cpus)
+{
+ wbinvd();
+}
+
static inline void wbnoinvd_on_all_cpus(void)
{
wbnoinvd();
}
+static inline wbnoinvd_on_many_cpus(struct cpumask *cpus)
+{
+ wbnoinvd();
+}
+
static inline struct cpumask *cpu_llc_shared_mask(int cpu)
{
return (struct cpumask *)cpumask_of(0);
@@ -4957,11 +4957,6 @@ long kvm_arch_dev_ioctl(struct file *filp,
return r;
}
-static void wbinvd_ipi(void *garbage)
-{
- wbinvd();
-}
-
static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
{
return kvm_arch_has_noncoherent_dma(vcpu->kvm);
@@ -8236,8 +8231,7 @@ static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu)
int cpu = get_cpu();
cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
- on_each_cpu_mask(vcpu->arch.wbinvd_dirty_mask,
- wbinvd_ipi, NULL, 1);
+ wbinvd_on_many_cpus(vcpu->arch.wbinvd_dirty_mask);
put_cpu();
cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
} else
@@ -20,6 +20,12 @@ void wbinvd_on_all_cpus(void)
}
EXPORT_SYMBOL(wbinvd_on_all_cpus);
+void wbinvd_on_many_cpus(struct cpumask *cpus)
+{
+ on_each_cpu_mask(cpus, __wbinvd, NULL, 1);
+}
+EXPORT_SYMBOL_GPL(wbinvd_on_many_cpus);
+
static void __wbnoinvd(void *dummy)
{
wbnoinvd();
@@ -30,3 +36,9 @@ void wbnoinvd_on_all_cpus(void)
on_each_cpu(__wbnoinvd, NULL, 1);
}
EXPORT_SYMBOL(wbnoinvd_on_all_cpus);
+
+void wbnoinvd_on_many_cpus(struct cpumask *cpus)
+{
+ on_each_cpu_mask(cpus, __wbnoinvd, NULL, 1);
+}
+EXPORT_SYMBOL_GPL(wbnoinvd_on_many_cpus);