@@ -457,6 +457,7 @@ static int kvm_cpu_online(unsigned int cpu)
#ifdef CONFIG_SMP
+static struct apic orig_apic;
static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask);
static bool pv_tlb_flush_supported(void)
@@ -543,6 +544,11 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)
static void kvm_send_ipi_mask(const struct cpumask *mask, int vector)
{
+ if (unlikely(!this_cpu_cpumask_var_ptr(__pv_cpu_mask))) {
+ orig_apic.send_IPI_mask(mask, vector);
+ return;
+ }
+
__send_ipi_mask(mask, vector);
}
@@ -552,6 +558,11 @@ static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
struct cpumask *new_mask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
const struct cpumask *local_mask;
+ if (unlikely(!new_mask)) {
+ orig_apic.send_IPI_mask_allbutself(mask, vector);
+ return;
+ }
+
cpumask_copy(new_mask, mask);
cpumask_clear_cpu(this_cpu, new_mask);
local_mask = new_mask;
@@ -612,6 +623,7 @@ late_initcall(setup_efi_kvm_sev_migration);
*/
static void kvm_setup_pv_ipi(void)
{
+ orig_apic = *apic;
apic->send_IPI_mask = kvm_send_ipi_mask;
apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
pr_info("setup PV IPIs\n");
@@ -640,6 +652,11 @@ static void kvm_flush_tlb_multi(const struct cpumask *cpumask,
struct kvm_steal_time *src;
struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
+ if (unlikely(!flushmask)) {
+ native_flush_tlb_multi(cpumask, info);
+ return;
+ }
+
cpumask_copy(flushmask, cpumask);
/*
* We have to call flush only on online vCPUs. And
@@ -672,11 +689,16 @@ static __init int kvm_alloc_cpumask(void)
if (pv_tlb_flush_supported() || pv_ipi_supported())
for_each_possible_cpu(cpu) {
- zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
- GFP_KERNEL, cpu_to_node(cpu));
+ if (!zalloc_cpumask_var_node(&per_cpu(__pv_cpu_mask, cpu),
+ GFP_KERNEL, cpu_to_node(cpu)))
+ goto err_out;
}
return 0;
+err_out:
+ for_each_possible_cpu(cpu)
+ free_cpumask_var(per_cpu(__pv_cpu_mask, cpu));
+ return -ENOMEM;
}
arch_initcall(kvm_alloc_cpumask);