From patchwork Wed Dec 8 17:12:15 2010 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Nadav Har'El X-Patchwork-Id: 391272 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by demeter1.kernel.org (8.14.4/8.14.3) with ESMTP id oB8HCM08020657 for ; Wed, 8 Dec 2010 17:12:22 GMT Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1754311Ab0LHRMU (ORCPT ); Wed, 8 Dec 2010 12:12:20 -0500 Received: from mtagate5.uk.ibm.com ([194.196.100.165]:50924 "EHLO mtagate5.uk.ibm.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753467Ab0LHRMT (ORCPT ); Wed, 8 Dec 2010 12:12:19 -0500 Received: from d06nrmr1707.portsmouth.uk.ibm.com (d06nrmr1707.portsmouth.uk.ibm.com [9.149.39.225]) by mtagate5.uk.ibm.com (8.13.1/8.13.1) with ESMTP id oB8HCHwV000913 for ; Wed, 8 Dec 2010 17:12:17 GMT Received: from d06av02.portsmouth.uk.ibm.com (d06av02.portsmouth.uk.ibm.com [9.149.37.228]) by d06nrmr1707.portsmouth.uk.ibm.com (8.13.8/8.13.8/NCO v10.0) with ESMTP id oB8HCJXg2641944 for ; Wed, 8 Dec 2010 17:12:19 GMT Received: from d06av02.portsmouth.uk.ibm.com (loopback [127.0.0.1]) by d06av02.portsmouth.uk.ibm.com (8.14.4/8.13.1/NCO v10.0 AVout) with ESMTP id oB8HCHOJ014630 for ; Wed, 8 Dec 2010 10:12:17 -0700 Received: from rice.haifa.ibm.com (rice.haifa.ibm.com [9.148.8.217]) by d06av02.portsmouth.uk.ibm.com (8.14.4/8.13.1/NCO v10.0 AVin) with ESMTP id oB8HCGSP014612 (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-SHA bits=256 verify=NO); Wed, 8 Dec 2010 10:12:17 -0700 Received: from rice.haifa.ibm.com (lnx-nyh.haifa.ibm.com [127.0.0.1]) by rice.haifa.ibm.com (8.14.4/8.14.4) with ESMTP id oB8HCGxK008816; Wed, 8 Dec 2010 19:12:16 +0200 Received: (from nyh@localhost) by rice.haifa.ibm.com (8.14.4/8.14.4/Submit) id oB8HCFak008814; Wed, 8 Dec 2010 19:12:15 +0200 Date: Wed, 8 Dec 2010 19:12:15 +0200 Message-Id: <201012081712.oB8HCFak008814@rice.haifa.ibm.com> X-Authentication-Warning: rice.haifa.ibm.com: nyh set sender to "Nadav Har'El" using -f Cc: gleb@redhat.com, avi@redhat.com To: kvm@vger.kernel.org From: "Nadav Har'El" References: <1291827596-nyh@il.ibm.com> Subject: [PATCH 24/28] nVMX: Handling of CR0 and CR4 modifying instructions Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org X-Greylist: IP, sender and recipient auto-whitelisted, not delayed by milter-greylist-4.2.3 (demeter1.kernel.org [140.211.167.41]); Wed, 08 Dec 2010 17:12:22 +0000 (UTC) --- .before/arch/x86/kvm/vmx.c 2010-12-08 18:56:51.000000000 +0200 +++ .after/arch/x86/kvm/vmx.c 2010-12-08 18:56:51.000000000 +0200 @@ -3877,6 +3877,54 @@ static void complete_insn_gp(struct kvm_ skip_emulated_instruction(vcpu); } +/* called to set cr0 as approriate for a mov-to-cr0 exit. */ +static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val) +{ + if (is_guest_mode(vcpu)) { + /* + * We get here when L2 changed cr0 in a way that did not change + * any of L1's shadowed bits (see nested_vmx_exit_handled_cr), + * but did change L0 shadowed bits. This can currently happen + * with the TS bit: L0 may want to leave TS on (for lazy fpu + * loading) while pretending to allow the guest to change it. + */ + vmcs_writel(GUEST_CR0, + (val & vcpu->arch.cr0_guest_owned_bits) | + (vmcs_readl(GUEST_CR0) & ~vcpu->arch.cr0_guest_owned_bits)); + vmcs_writel(CR0_READ_SHADOW, val); + vcpu->arch.cr0 = val; + return 0; + } else + return kvm_set_cr0(vcpu, val); +} + +static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val) +{ + if (is_guest_mode(vcpu)) { + vmcs_writel(GUEST_CR4, + (val & vcpu->arch.cr4_guest_owned_bits) | + (vmcs_readl(GUEST_CR4) & ~vcpu->arch.cr4_guest_owned_bits)); + vmcs_writel(CR4_READ_SHADOW, val); + vcpu->arch.cr4 = val; + return 0; + } else + return kvm_set_cr4(vcpu, val); +} + + +/* called to set cr0 as approriate for clts instruction exit. */ +static void handle_clts(struct kvm_vcpu *vcpu) +{ + if (is_guest_mode(vcpu)) { + /* As in handle_set_cr0(), we can't call vmx_set_cr0 here */ + vmcs_writel(GUEST_CR0, vmcs_readl(GUEST_CR0) & ~X86_CR0_TS); + vmcs_writel(CR0_READ_SHADOW, + vmcs_readl(CR0_READ_SHADOW) & ~X86_CR0_TS); + vcpu->arch.cr0 &= ~X86_CR0_TS; + } else + vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS)); +} + static int handle_cr(struct kvm_vcpu *vcpu) { unsigned long exit_qualification, val; @@ -3893,7 +3941,7 @@ static int handle_cr(struct kvm_vcpu *vc trace_kvm_cr_write(cr, val); switch (cr) { case 0: - err = kvm_set_cr0(vcpu, val); + err = handle_set_cr0(vcpu, val); complete_insn_gp(vcpu, err); return 1; case 3: @@ -3901,7 +3949,7 @@ static int handle_cr(struct kvm_vcpu *vc complete_insn_gp(vcpu, err); return 1; case 4: - err = kvm_set_cr4(vcpu, val); + err = handle_set_cr4(vcpu, val); complete_insn_gp(vcpu, err); return 1; case 8: { @@ -3919,7 +3967,7 @@ static int handle_cr(struct kvm_vcpu *vc }; break; case 2: /* clts */ - vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS)); + handle_clts(vcpu); trace_kvm_cr_write(0, kvm_read_cr0(vcpu)); skip_emulated_instruction(vcpu); vmx_fpu_activate(vcpu);