From patchwork Mon Sep 6 15:55:59 2010 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Joerg Roedel X-Patchwork-Id: 159171 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by demeter1.kernel.org (8.14.4/8.14.3) with ESMTP id o86G4mW0023552 for ; Mon, 6 Sep 2010 16:04:48 GMT Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1754942Ab0IFQBW (ORCPT ); Mon, 6 Sep 2010 12:01:22 -0400 Received: from va3ehsobe006.messaging.microsoft.com ([216.32.180.16]:18530 "EHLO VA3EHSOBE006.bigfish.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1754976Ab0IFP5p (ORCPT ); Mon, 6 Sep 2010 11:57:45 -0400 Received: from mail55-va3-R.bigfish.com (10.7.14.243) by VA3EHSOBE006.bigfish.com (10.7.40.26) with Microsoft SMTP Server id 8.1.340.0; Mon, 6 Sep 2010 15:57:44 +0000 Received: from mail55-va3 (localhost.localdomain [127.0.0.1]) by mail55-va3-R.bigfish.com (Postfix) with ESMTP id 902254180AF; Mon, 6 Sep 2010 15:57:44 +0000 (UTC) X-SpamScore: 1 X-BigFish: VS1(zzzz1202hzz8275bhz32i2a8h87h43h61h) X-Spam-TCS-SCL: 0:0 X-FB-DOMAIN-IP-MATCH: fail Received: from mail55-va3 (localhost.localdomain [127.0.0.1]) by mail55-va3 (MessageSwitch) id 1283788663737102_2491; Mon, 6 Sep 2010 15:57:43 +0000 (UTC) Received: from VA3EHSMHS025.bigfish.com (unknown [10.7.14.237]) by mail55-va3.bigfish.com (Postfix) with ESMTP id AFF1618B004C; Mon, 6 Sep 2010 15:57:43 +0000 (UTC) Received: from ausb3extmailp01.amd.com (163.181.251.8) by VA3EHSMHS025.bigfish.com (10.7.99.35) with Microsoft SMTP Server (TLS) id 14.0.482.44; Mon, 6 Sep 2010 15:57:23 +0000 Received: from ausb3twp01.amd.com ([163.181.250.37]) by ausb3extmailp01.amd.com (Switch-3.2.7/Switch-3.2.7) with SMTP id o86FwSuF024310; Mon, 6 Sep 2010 10:58:57 -0500 X-WSS-ID: 0L8C1LA-01-D1N-02 X-M-MSG: Received: from sausexhtp02.amd.com (sausexhtp02.amd.com [163.181.3.152]) (using TLSv1 with cipher RC4-MD5 (128/128 bits)) (No client certificate requested) by ausb3twp01.amd.com (Tumbleweed MailGate 3.7.2) with ESMTP id 2BDD01028663; Mon, 6 Sep 2010 10:55:57 -0500 (CDT) Received: from storexhtp01.amd.com (172.24.4.3) by sausexhtp02.amd.com (163.181.3.152) with Microsoft SMTP Server (TLS) id 8.3.83.0; Mon, 6 Sep 2010 10:56:01 -0500 Received: from gwo.osrc.amd.com (165.204.16.204) by storexhtp01.amd.com (172.24.4.3) with Microsoft SMTP Server id 8.3.83.0; Mon, 6 Sep 2010 11:55:59 -0400 Received: from lemmy.osrc.amd.com (lemmy.osrc.amd.com [165.204.15.93]) by gwo.osrc.amd.com (Postfix) with ESMTP id 918B749C2BE; Mon, 6 Sep 2010 16:55:58 +0100 (BST) Received: by lemmy.osrc.amd.com (Postfix, from userid 1000) id 48B219FC04; Mon, 6 Sep 2010 17:56:29 +0200 (CEST) From: Joerg Roedel To: Avi Kivity , Marcelo Tosatti CC: Alexander Graf , , , , Joerg Roedel Subject: [PATCH 20/27] KVM: MMU: Add kvm_mmu parameter to load_pdptrs function Date: Mon, 6 Sep 2010 17:55:59 +0200 Message-ID: <1283788566-29186-21-git-send-email-joerg.roedel@amd.com> X-Mailer: git-send-email 1.7.0.4 In-Reply-To: <1283788566-29186-1-git-send-email-joerg.roedel@amd.com> References: <1283788566-29186-1-git-send-email-joerg.roedel@amd.com> MIME-Version: 1.0 X-Reverse-DNS: unknown Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org X-Greylist: IP, sender and recipient auto-whitelisted, not delayed by milter-greylist-4.2.3 (demeter1.kernel.org [140.211.167.41]); Mon, 06 Sep 2010 16:04:49 +0000 (UTC) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 173834b..1080c0f 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -259,6 +259,8 @@ struct kvm_mmu { u64 *pae_root; u64 rsvd_bits_mask[2][4]; + + u64 pdptrs[4]; /* pae */ }; struct kvm_vcpu_arch { @@ -278,7 +280,6 @@ struct kvm_vcpu_arch { unsigned long cr4_guest_owned_bits; unsigned long cr8; u32 hflags; - u64 pdptrs[4]; /* pae */ u64 efer; u64 apic_base; struct kvm_lapic *apic; /* kernel irqchip context */ @@ -594,7 +595,7 @@ void kvm_mmu_zap_all(struct kvm *kvm); unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm); void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); -int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); +int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3); int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, const void *val, int bytes); diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h index 6491ac8..a37abe2 100644 --- a/arch/x86/kvm/kvm_cache_regs.h +++ b/arch/x86/kvm/kvm_cache_regs.h @@ -42,7 +42,7 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index) (unsigned long *)&vcpu->arch.regs_avail)) kvm_x86_ops->cache_reg(vcpu, VCPU_EXREG_PDPTR); - return vcpu->arch.pdptrs[index]; + return vcpu->arch.walk_mmu->pdptrs[index]; } static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 094df31..a98ac52 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1010,7 +1010,7 @@ static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) switch (reg) { case VCPU_EXREG_PDPTR: BUG_ON(!npt_enabled); - load_pdptrs(vcpu, vcpu->arch.cr3); + load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3); break; default: BUG(); diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 0e62d8a..0a70194 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1848,20 +1848,20 @@ static void ept_load_pdptrs(struct kvm_vcpu *vcpu) return; if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { - vmcs_write64(GUEST_PDPTR0, vcpu->arch.pdptrs[0]); - vmcs_write64(GUEST_PDPTR1, vcpu->arch.pdptrs[1]); - vmcs_write64(GUEST_PDPTR2, vcpu->arch.pdptrs[2]); - vmcs_write64(GUEST_PDPTR3, vcpu->arch.pdptrs[3]); + vmcs_write64(GUEST_PDPTR0, vcpu->arch.mmu.pdptrs[0]); + vmcs_write64(GUEST_PDPTR1, vcpu->arch.mmu.pdptrs[1]); + vmcs_write64(GUEST_PDPTR2, vcpu->arch.mmu.pdptrs[2]); + vmcs_write64(GUEST_PDPTR3, vcpu->arch.mmu.pdptrs[3]); } } static void ept_save_pdptrs(struct kvm_vcpu *vcpu) { if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { - vcpu->arch.pdptrs[0] = vmcs_read64(GUEST_PDPTR0); - vcpu->arch.pdptrs[1] = vmcs_read64(GUEST_PDPTR1); - vcpu->arch.pdptrs[2] = vmcs_read64(GUEST_PDPTR2); - vcpu->arch.pdptrs[3] = vmcs_read64(GUEST_PDPTR3); + vcpu->arch.mmu.pdptrs[0] = vmcs_read64(GUEST_PDPTR0); + vcpu->arch.mmu.pdptrs[1] = vmcs_read64(GUEST_PDPTR1); + vcpu->arch.mmu.pdptrs[2] = vmcs_read64(GUEST_PDPTR2); + vcpu->arch.mmu.pdptrs[3] = vmcs_read64(GUEST_PDPTR3); } __set_bit(VCPU_EXREG_PDPTR, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ca69dcc..337f59f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -418,17 +418,17 @@ int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, /* * Load the pae pdptrs. Return true is they are all valid. */ -int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3) +int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3) { gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT; unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2; int i, error; int ret; - u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)]; + u64 pdpte[ARRAY_SIZE(mmu->pdptrs)]; - ret = kvm_read_nested_guest_page(vcpu, pdpt_gfn, pdpte, - offset * sizeof(u64), - sizeof(pdpte), &error); + ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte, + offset * sizeof(u64), + sizeof(pdpte), &error); if (ret < 0) { ret = 0; goto out; @@ -442,7 +442,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3) } ret = 1; - memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs)); + memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs)); __set_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_avail); __set_bit(VCPU_EXREG_PDPTR, @@ -455,7 +455,7 @@ EXPORT_SYMBOL_GPL(load_pdptrs); static bool pdptrs_changed(struct kvm_vcpu *vcpu) { - u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)]; + u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)]; bool changed = true; int offset; u32 error; @@ -475,7 +475,7 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu) sizeof(pdpte), &error); if (r < 0) goto out; - changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0; + changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0; out: return changed; @@ -514,7 +514,8 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) return 1; } else #endif - if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) + if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, + vcpu->arch.cr3)) return 1; } @@ -603,7 +604,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) return 1; } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) && ((cr4 ^ old_cr4) & pdptr_bits) - && !load_pdptrs(vcpu, vcpu->arch.cr3)) + && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3)) return 1; if (cr4 & X86_CR4_VMXE) @@ -636,7 +637,8 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) if (is_pae(vcpu)) { if (cr3 & CR3_PAE_RESERVED_BITS) return 1; - if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) + if (is_paging(vcpu) && + !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) return 1; } /* @@ -5412,7 +5414,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; kvm_x86_ops->set_cr4(vcpu, sregs->cr4); if (!is_long_mode(vcpu) && is_pae(vcpu)) { - load_pdptrs(vcpu, vcpu->arch.cr3); + load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3); mmu_reset_needed = 1; }