@@ -161,8 +161,11 @@ static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
static int init_rmode(struct kvm *kvm);
static u64 construct_eptp(unsigned long root_hpa);
+static void kvm_cpu_vmxon(u64 addr);
+static void kvm_cpu_vmxoff(void);
static DEFINE_PER_CPU(struct vmcs *, vmxarea);
+static DEFINE_PER_CPU(bool, vmx_mode);
static unsigned long *vmx_io_bitmap_a;
static unsigned long *vmx_io_bitmap_b;
@@ -755,7 +758,9 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
u64 tsc_this, delta, new_offset;
+ u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
+ kvm_cpu_vmxon(phys_addr);
vmcs_load(vmx->vmcs);
if (vcpu->cpu != cpu) {
@@ -796,6 +801,7 @@ static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
vmcs_clear(vmx->vmcs);
rdtscll(vmx->vcpu.arch.host_tsc);
vmx->launched = 0;
+ kvm_cpu_vmxoff();
}
static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
@@ -1187,9 +1193,11 @@ static __init int vmx_disabled_by_bios(void)
static void kvm_cpu_vmxon(u64 addr)
{
+ int cpu = raw_smp_processor_id();
asm volatile (ASM_VMX_VMXON_RAX
: : "a"(&addr), "m"(addr)
: "memory", "cc");
+ per_cpu(vmx_mode, cpu) = 1;
}
static int hardware_enable(void *garbage)
@@ -1223,12 +1231,16 @@ static int hardware_enable(void *garbage)
*/
static void kvm_cpu_vmxoff(void)
{
+ int cpu = raw_smp_processor_id();
asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc");
+ per_cpu(vmx_mode, cpu) = 0;
}
static void hardware_disable(void *garbage)
{
- kvm_cpu_vmxoff();
+ int cpu = raw_smp_processor_id();
+ if (per_cpu(vmx_mode, cpu))
+ kvm_cpu_vmxoff();
write_cr4(read_cr4() & ~X86_CR4_VMXE);
}
@@ -3930,6 +3942,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
{
int err;
struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
+ u64 phys_addr = __pa(per_cpu(vmxarea, smp_processor_id()));
if (!vmx)
return ERR_PTR(-ENOMEM);
@@ -3951,9 +3964,11 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
goto free_msrs;
preempt_disable();
+ kvm_cpu_vmxon(phys_addr);
vmcs_load(vmx->vmcs);
err = vmx_vcpu_setup(vmx);
vmcs_clear(vmx->vmcs);
+ kvm_cpu_vmxoff();
preempt_enable();
if (err)
@@ -4091,10 +4106,13 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ int cpu = raw_smp_processor_id();
+ u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
u32 exec_control;
vmx->rdtscp_enabled = false;
if (vmx_rdtscp_supported()) {
+ kvm_cpu_vmxon(phys_addr);
vmcs_load(vmx->vmcs);
exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
if (exec_control & SECONDARY_EXEC_RDTSCP) {
@@ -4108,6 +4126,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
}
}
vmcs_clear(vmx->vmcs);
+ kvm_cpu_vmxoff();
}
}