diff mbox

[4/24] Allow setting the VMXE bit in CR4

Message ID 20100615144426.GC9826@fermat.math.technion.ac.il (mailing list archive)
State New, archived
Headers show

Commit Message

Nadav Har'El June 15, 2010, 2:44 p.m. UTC
None
diff mbox

Patch

--- .before/arch/x86/include/asm/kvm_host.h	2010-06-15 17:20:01.000000000 +0300
+++ .after/arch/x86/include/asm/kvm_host.h	2010-06-15 17:20:01.000000000 +0300
@@ -490,7 +490,7 @@  struct kvm_x86_ops {
 	void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
 	void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
 	void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
-	void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
+	int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
 	void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
 	void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
 	void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
--- .before/arch/x86/kvm/svm.c	2010-06-15 17:20:01.000000000 +0300
+++ .after/arch/x86/kvm/svm.c	2010-06-15 17:20:01.000000000 +0300
@@ -1242,11 +1242,14 @@  static void svm_set_cr0(struct kvm_vcpu 
 	update_cr0_intercept(svm);
 }
 
-static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+static int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 {
 	unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE;
 	unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
 
+	if (cr4 & X86_CR4_VMXE)
+		return 1;
+
 	if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
 		force_new_asid(vcpu);
 
@@ -1255,6 +1258,7 @@  static void svm_set_cr4(struct kvm_vcpu 
 		cr4 |= X86_CR4_PAE;
 	cr4 |= host_cr4_mce;
 	to_svm(vcpu)->vmcb->save.cr4 = cr4;
+	return 0;
 }
 
 static void svm_set_segment(struct kvm_vcpu *vcpu,
--- .before/arch/x86/kvm/x86.c	2010-06-15 17:20:01.000000000 +0300
+++ .after/arch/x86/kvm/x86.c	2010-06-15 17:20:01.000000000 +0300
@@ -490,11 +490,9 @@  int __kvm_set_cr4(struct kvm_vcpu *vcpu,
 		   && !load_pdptrs(vcpu, vcpu->arch.cr3))
 		return 1;
 
-	if (cr4 & X86_CR4_VMXE)
+	if (kvm_x86_ops->set_cr4(vcpu, cr4))
 		return 1;
 
-	kvm_x86_ops->set_cr4(vcpu, cr4);
-
 	if ((cr4 ^ old_cr4) & pdptr_bits)
 		kvm_mmu_reset_context(vcpu);
 
--- .before/arch/x86/kvm/vmx.c	2010-06-15 17:20:01.000000000 +0300
+++ .after/arch/x86/kvm/vmx.c	2010-06-15 17:20:01.000000000 +0300
@@ -1874,7 +1874,7 @@  static void ept_save_pdptrs(struct kvm_v
 		  (unsigned long *)&vcpu->arch.regs_dirty);
 }
 
-static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
+static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
 
 static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
 					unsigned long cr0,
@@ -1969,11 +1969,19 @@  static void vmx_set_cr3(struct kvm_vcpu 
 	vmcs_writel(GUEST_CR3, guest_cr3);
 }
 
-static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 {
 	unsigned long hw_cr4 = cr4 | (to_vmx(vcpu)->rmode.vm86_active ?
 		    KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
 
+	if (cr4 & X86_CR4_VMXE){
+		if (!nested)
+			return 1;
+	} else {
+		if (nested && to_vmx(vcpu)->nested.vmxon)
+			return 1;
+	}
+
 	vcpu->arch.cr4 = cr4;
 	if (enable_ept) {
 		if (!is_paging(vcpu)) {
@@ -1986,6 +1994,7 @@  static void vmx_set_cr4(struct kvm_vcpu 
 
 	vmcs_writel(CR4_READ_SHADOW, cr4);
 	vmcs_writel(GUEST_CR4, hw_cr4);
+	return 0;
 }
 
 static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)