@@ -168,6 +168,9 @@ static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
static int init_rmode(struct kvm *kvm);
static u64 construct_eptp(unsigned long root_hpa);
+static void kvm_cpu_vmxon(u64 addr);
+static void kvm_cpu_vmxoff(void);
+
static DEFINE_PER_CPU(struct vmcs *, vmxarea);
static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
@@ -787,8 +790,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
u64 tsc_this, delta, new_offset;
+ u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
if (vmm_coexistence) {
+ kvm_cpu_vmxon(phys_addr);
vmcs_load(vmx->vmcs);
set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests);
} else {
@@ -846,6 +851,7 @@ static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
vmcs_clear(vmx->vmcs);
rdtscll(vmx->vcpu.arch.host_tsc);
vmx->launched = 0;
+ kvm_cpu_vmxoff();
}
}
@@ -1269,9 +1275,11 @@ static int hardware_enable(void *garbage)
FEATURE_CONTROL_LOCKED |
FEATURE_CONTROL_VMXON_ENABLED);
write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
- kvm_cpu_vmxon(phys_addr);
- ept_sync_global();
+ if (!vmm_coexistence) {
+ kvm_cpu_vmxon(phys_addr);
+ ept_sync_global();
+ }
return 0;
}
@@ -1297,9 +1305,10 @@ static void kvm_cpu_vmxoff(void)
static void hardware_disable(void *garbage)
{
- if (!vmm_coexistence)
+ if (!vmm_coexistence) {
vmclear_local_vcpus();
- kvm_cpu_vmxoff();
+ kvm_cpu_vmxoff();
+ }
write_cr4(read_cr4() & ~X86_CR4_VMXE);
}
@@ -3967,6 +3976,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
{
int err;
struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
+ u64 phys_addr = __pa(per_cpu(vmxarea, smp_processor_id()));
int cpu;
if (!vmx)
@@ -3990,9 +4000,11 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
if (vmm_coexistence) {
preempt_disable();
+ kvm_cpu_vmxon(phys_addr);
vmcs_load(vmx->vmcs);
err = vmx_vcpu_setup(vmx);
vmcs_clear(vmx->vmcs);
+ kvm_cpu_vmxoff();
preempt_enable();
} else {
vmcs_clear(vmx->vmcs);
@@ -4138,12 +4150,16 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ int cpu = raw_smp_processor_id();
+ u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
u32 exec_control;
vmx->rdtscp_enabled = false;
if (vmx_rdtscp_supported()) {
- if (vmm_coexistence)
+ if (vmm_coexistence) {
+ kvm_cpu_vmxon(phys_addr);
vmcs_load(vmx->vmcs);
+ }
exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
if (exec_control & SECONDARY_EXEC_RDTSCP) {
best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
@@ -4155,8 +4171,10 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
exec_control);
}
}
- if (vmm_coexistence)
+ if (vmm_coexistence) {
vmcs_clear(vmx->vmcs);
+ kvm_cpu_vmxoff();
+ }
}
}