From patchwork Thu Jan 7 12:15:44 2010 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Avi Kivity X-Patchwork-Id: 71577 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by demeter.kernel.org (8.14.3/8.14.2) with ESMTP id o07CFrBU029482 for ; Thu, 7 Jan 2010 12:16:08 GMT Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752516Ab0AGMPz (ORCPT ); Thu, 7 Jan 2010 07:15:55 -0500 Received: (majordomo@vger.kernel.org) by vger.kernel.org id S1752505Ab0AGMPx (ORCPT ); Thu, 7 Jan 2010 07:15:53 -0500 Received: from mx1.redhat.com ([209.132.183.28]:56310 "EHLO mx1.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752494Ab0AGMPu (ORCPT ); Thu, 7 Jan 2010 07:15:50 -0500 Received: from int-mx04.intmail.prod.int.phx2.redhat.com (int-mx04.intmail.prod.int.phx2.redhat.com [10.5.11.17]) by mx1.redhat.com (8.13.8/8.13.8) with ESMTP id o07CFmRO004884 (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-SHA bits=256 verify=OK); Thu, 7 Jan 2010 07:15:48 -0500 Received: from cleopatra.tlv.redhat.com (cleopatra.tlv.redhat.com [10.35.255.11]) by int-mx04.intmail.prod.int.phx2.redhat.com (8.13.8/8.13.8) with ESMTP id o07CFjLW000781; Thu, 7 Jan 2010 07:15:46 -0500 Received: from localhost.localdomain (file.tlv.redhat.com [10.35.255.8]) by cleopatra.tlv.redhat.com (Postfix) with ESMTP id DBD2E250052; Thu, 7 Jan 2010 14:15:44 +0200 (IST) From: Avi Kivity To: Marcelo Tosatti , Joerg Roedel Cc: kvm@vger.kernel.org Subject: [PATCH 3/3] KVM: SVM: Lazy fpu for npt Date: Thu, 7 Jan 2010 14:15:44 +0200 Message-Id: <1262866544-4985-4-git-send-email-avi@redhat.com> In-Reply-To: <1262866544-4985-1-git-send-email-avi@redhat.com> References: <1262866544-4985-1-git-send-email-avi@redhat.com> X-Scanned-By: MIMEDefang 2.67 on 10.5.11.17 Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index f4418e2..7f3d890 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -571,6 +571,7 @@ static void init_vmcb(struct vcpu_svm *svm) control->intercept = (1ULL << INTERCEPT_INTR) | (1ULL << INTERCEPT_NMI) | (1ULL << INTERCEPT_SMI) | + (1ULL << INTERCEPT_SELECTIVE_CR0) | (1ULL << INTERCEPT_CPUID) | (1ULL << INTERCEPT_INVD) | (1ULL << INTERCEPT_HLT) | @@ -643,10 +644,8 @@ static void init_vmcb(struct vcpu_svm *svm) control->intercept &= ~((1ULL << INTERCEPT_TASK_SWITCH) | (1ULL << INTERCEPT_INVLPG)); control->intercept_exceptions &= ~(1 << PF_VECTOR); - control->intercept_cr_read &= ~(INTERCEPT_CR0_MASK| - INTERCEPT_CR3_MASK); - control->intercept_cr_write &= ~(INTERCEPT_CR0_MASK| - INTERCEPT_CR3_MASK); + control->intercept_cr_read &= ~INTERCEPT_CR3_MASK; + control->intercept_cr_write &= ~INTERCEPT_CR3_MASK; save->g_pat = 0x0007040600070406ULL; save->cr3 = 0; save->cr4 = 0; @@ -965,6 +964,27 @@ static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) { } +static void update_cr0_intercept(struct vcpu_svm *svm) +{ + ulong gcr0 = svm->vcpu.arch.cr0; + u64 *hcr0 = &svm->vmcb->save.cr0; + + if (!svm->vcpu.fpu_active) + *hcr0 |= SVM_CR0_SELECTIVE_MASK; + else + *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK) + | (gcr0 & SVM_CR0_SELECTIVE_MASK); + + + if (gcr0 == *hcr0 && svm->vcpu.fpu_active) { + svm->vmcb->control.intercept_cr_read &= ~INTERCEPT_CR0_MASK; + svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR0_MASK; + } else { + svm->vmcb->control.intercept_cr_read |= INTERCEPT_CR0_MASK; + svm->vmcb->control.intercept_cr_write |= INTERCEPT_CR0_MASK; + } +} + static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) { struct vcpu_svm *svm = to_svm(vcpu); @@ -982,12 +1002,11 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) } } #endif - if (npt_enabled) - goto set; - vcpu->arch.cr0 = cr0; - cr0 |= X86_CR0_PG | X86_CR0_WP; -set: + + if (!npt_enabled) + cr0 |= X86_CR0_PG | X86_CR0_WP; + /* * re-enable caching here because the QEMU bios * does not do it - this results in some delay at @@ -995,6 +1014,7 @@ set: */ cr0 &= ~(X86_CR0_CD | X86_CR0_NW); svm->vmcb->save.cr0 = cr0; + update_cr0_intercept(svm); } static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) @@ -1240,11 +1260,8 @@ static int ud_interception(struct vcpu_svm *svm) static int nm_interception(struct vcpu_svm *svm) { svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); - if (!kvm_read_cr0_bits(&svm->vcpu, X86_CR0_TS)) - svm->vmcb->save.cr0 &= ~X86_CR0_TS; - else - svm->vmcb->save.cr0 |= X86_CR0_TS; svm->vcpu.fpu_active = 1; + update_cr0_intercept(svm); return 1; } @@ -2297,7 +2314,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = { [SVM_EXIT_READ_CR3] = emulate_on_interception, [SVM_EXIT_READ_CR4] = emulate_on_interception, [SVM_EXIT_READ_CR8] = emulate_on_interception, - /* for now: */ + [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, [SVM_EXIT_WRITE_CR0] = emulate_on_interception, [SVM_EXIT_WRITE_CR3] = emulate_on_interception, [SVM_EXIT_WRITE_CR4] = emulate_on_interception, @@ -2383,21 +2400,10 @@ static int handle_exit(struct kvm_vcpu *vcpu) svm_complete_interrupts(svm); - if (npt_enabled) { - int mmu_reload = 0; - if ((kvm_read_cr0_bits(vcpu, X86_CR0_PG) ^ svm->vmcb->save.cr0) - & X86_CR0_PG) { - svm_set_cr0(vcpu, svm->vmcb->save.cr0); - mmu_reload = 1; - } + if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR0_MASK)) vcpu->arch.cr0 = svm->vmcb->save.cr0; + if (npt_enabled) vcpu->arch.cr3 = svm->vmcb->save.cr3; - if (mmu_reload) { - kvm_mmu_reset_context(vcpu); - kvm_mmu_load(vcpu); - } - } - if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) { kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; @@ -2580,8 +2586,6 @@ static void svm_flush_tlb(struct kvm_vcpu *vcpu) static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu) { - if (npt_enabled) - vcpu->fpu_active = 1; } static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu) @@ -2920,14 +2924,8 @@ static void svm_fpu_deactivate(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); - if (npt_enabled) { - /* hack: npt requires active fpu at this time */ - vcpu->fpu_active = 1; - return; - } - + update_cr0_intercept(svm); svm->vmcb->control.intercept_exceptions |= 1 << NM_VECTOR; - svm->vmcb->save.cr0 |= X86_CR0_TS; } static struct kvm_x86_ops svm_x86_ops = {