diff mbox

[24/28] nVMX: Handling of CR0 and CR4 modifying instructions

Message ID 201012081712.oB8HCFak008814@rice.haifa.ibm.com (mailing list archive)
State New, archived
Headers show

Commit Message

Nadav Har'El Dec. 8, 2010, 5:12 p.m. UTC
None
diff mbox

Patch

--- .before/arch/x86/kvm/vmx.c	2010-12-08 18:56:51.000000000 +0200
+++ .after/arch/x86/kvm/vmx.c	2010-12-08 18:56:51.000000000 +0200
@@ -3877,6 +3877,54 @@  static void complete_insn_gp(struct kvm_
 		skip_emulated_instruction(vcpu);
 }
 
+/* called to set cr0 as approriate for a mov-to-cr0 exit. */
+static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
+{
+	if (is_guest_mode(vcpu)) {
+		/*
+		 * We get here when L2 changed cr0 in a way that did not change
+		 * any of L1's shadowed bits (see nested_vmx_exit_handled_cr),
+		 * but did change L0 shadowed bits. This can currently happen
+		 * with the TS bit: L0 may want to leave TS on (for lazy fpu
+		 * loading) while pretending to allow the guest to change it.
+		 */
+		vmcs_writel(GUEST_CR0,
+		   (val & vcpu->arch.cr0_guest_owned_bits) |
+		   (vmcs_readl(GUEST_CR0) & ~vcpu->arch.cr0_guest_owned_bits));
+		vmcs_writel(CR0_READ_SHADOW, val);
+		vcpu->arch.cr0 = val;
+		return 0;
+	} else
+		return kvm_set_cr0(vcpu, val);
+}
+
+static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val)
+{
+	if (is_guest_mode(vcpu)) {
+		vmcs_writel(GUEST_CR4,
+		  (val & vcpu->arch.cr4_guest_owned_bits) |
+		  (vmcs_readl(GUEST_CR4) & ~vcpu->arch.cr4_guest_owned_bits));
+		vmcs_writel(CR4_READ_SHADOW, val);
+		vcpu->arch.cr4 = val;
+		return 0;
+	} else
+		return kvm_set_cr4(vcpu, val);
+}
+
+
+/* called to set cr0 as approriate for clts instruction exit. */
+static void handle_clts(struct kvm_vcpu *vcpu)
+{
+	if (is_guest_mode(vcpu)) {
+		/* As in handle_set_cr0(), we can't call vmx_set_cr0 here */
+		vmcs_writel(GUEST_CR0, vmcs_readl(GUEST_CR0) & ~X86_CR0_TS);
+		vmcs_writel(CR0_READ_SHADOW,
+			vmcs_readl(CR0_READ_SHADOW) & ~X86_CR0_TS);
+		vcpu->arch.cr0 &= ~X86_CR0_TS;
+	} else
+		vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
+}
+
 static int handle_cr(struct kvm_vcpu *vcpu)
 {
 	unsigned long exit_qualification, val;
@@ -3893,7 +3941,7 @@  static int handle_cr(struct kvm_vcpu *vc
 		trace_kvm_cr_write(cr, val);
 		switch (cr) {
 		case 0:
-			err = kvm_set_cr0(vcpu, val);
+			err = handle_set_cr0(vcpu, val);
 			complete_insn_gp(vcpu, err);
 			return 1;
 		case 3:
@@ -3901,7 +3949,7 @@  static int handle_cr(struct kvm_vcpu *vc
 			complete_insn_gp(vcpu, err);
 			return 1;
 		case 4:
-			err = kvm_set_cr4(vcpu, val);
+			err = handle_set_cr4(vcpu, val);
 			complete_insn_gp(vcpu, err);
 			return 1;
 		case 8: {
@@ -3919,7 +3967,7 @@  static int handle_cr(struct kvm_vcpu *vc
 		};
 		break;
 	case 2: /* clts */
-		vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
+		handle_clts(vcpu);
 		trace_kvm_cr_write(0, kvm_read_cr0(vcpu));
 		skip_emulated_instruction(vcpu);
 		vmx_fpu_activate(vcpu);