From patchwork Thu Sep 13 09:47:54 2012 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Avi Kivity X-Patchwork-Id: 1451101 Return-Path: X-Original-To: patchwork-kvm@patchwork.kernel.org Delivered-To: patchwork-process-083081@patchwork1.kernel.org Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by patchwork1.kernel.org (Postfix) with ESMTP id A16CA3FE79 for ; Thu, 13 Sep 2012 09:48:06 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753688Ab2IMJsD (ORCPT ); Thu, 13 Sep 2012 05:48:03 -0400 Received: from mx1.redhat.com ([209.132.183.28]:43732 "EHLO mx1.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752049Ab2IMJsA (ORCPT ); Thu, 13 Sep 2012 05:48:00 -0400 Received: from int-mx11.intmail.prod.int.phx2.redhat.com (int-mx11.intmail.prod.int.phx2.redhat.com [10.5.11.24]) by mx1.redhat.com (8.14.4/8.14.4) with ESMTP id q8D9lwM8015721 (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-SHA bits=256 verify=OK); Thu, 13 Sep 2012 05:47:58 -0400 Received: from balrog.usersys.tlv.redhat.com (dhcp-4-121.tlv.redhat.com [10.35.4.121]) by int-mx11.intmail.prod.int.phx2.redhat.com (8.14.4/8.14.4) with ESMTP id q8D9ltIJ020840; Thu, 13 Sep 2012 05:47:56 -0400 Message-ID: <5051ABCA.8080300@redhat.com> Date: Thu, 13 Sep 2012 12:47:54 +0300 From: Avi Kivity User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:15.0) Gecko/20120828 Thunderbird/15.0 MIME-Version: 1.0 To: Marcelo Tosatti CC: kvm@vger.kernel.org, Xiao Guangrong Subject: Re: [PATCH 6/5] KVM: MMU: Optimize is_last_gpte() References: <1347460194-11807-1-git-send-email-avi@redhat.com> <1347472187-14708-1-git-send-email-avi@redhat.com> <5050CE7C.20206@redhat.com> In-Reply-To: <5050CE7C.20206@redhat.com> X-Scanned-By: MIMEDefang 2.68 on 10.5.11.24 Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org On 09/12/2012 09:03 PM, Avi Kivity wrote: > On 09/12/2012 08:49 PM, Avi Kivity wrote: >> Instead of branchy code depending on level, gpte.ps, and mmu configuration, >> prepare everything in a bitmap during mode changes and look it up during >> runtime. > > 6/5 is buggy, sorry, will update it tomorrow. > > --------8<----------8<-------------- From: Avi Kivity Date: Wed, 12 Sep 2012 20:46:56 +0300 Subject: [PATCH v2 6/5] KVM: MMU: Optimize is_last_gpte() Instead of branchy code depending on level, gpte.ps, and mmu configuration, prepare everything in a bitmap during mode changes and look it up during runtime. Signed-off-by: Avi Kivity Reviewed-by: Xiao Guangrong --- v2: rearrange bitmap (one less shift) avoid stomping on local variable fix index calculation move check back to a function arch/x86/include/asm/kvm_host.h | 7 +++++++ arch/x86/kvm/mmu.c | 31 +++++++++++++++++++++++++++++++ arch/x86/kvm/mmu.h | 3 ++- arch/x86/kvm/paging_tmpl.h | 22 +--------------------- 4 files changed, 41 insertions(+), 22 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 3318bde..f9a48cf 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -298,6 +298,13 @@ struct kvm_mmu { u64 *lm_root; u64 rsvd_bits_mask[2][4]; + /* + * Bitmap: bit set = last pte in walk + * index[0]: pte.ps + * index[1:2]: level + */ + u8 last_pte_bitmap; + bool nx; u64 pdptrs[4]; /* pae */ diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index ce78408..32fe597 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -3447,6 +3447,15 @@ static inline unsigned gpte_access(struct kvm_vcpu *vcpu, u64 gpte) return access; } +static inline bool is_last_gpte(struct kvm_mmu *mmu, unsigned level, unsigned gpte) +{ + unsigned index; + + index = level - 1; + index |= (gpte & PT_PAGE_SIZE_MASK) >> (PT_PAGE_SIZE_SHIFT - 2); + return mmu->last_pte_bitmap & (1 << index); +} + #define PTTYPE 64 #include "paging_tmpl.h" #undef PTTYPE @@ -3548,6 +3557,24 @@ static void update_permission_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu } } +static void update_last_pte_bitmap(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) +{ + u8 map; + unsigned level, root_level = mmu->root_level; + const unsigned ps_set_index = 1 << 2; /* bit 2 of index: ps */ + + if (root_level == PT32E_ROOT_LEVEL) + --root_level; + /* PT_PAGE_TABLE_LEVEL always terminates */ + map = 1 | (1 << ps_set_index); + for (level = PT_DIRECTORY_LEVEL; level <= root_level; ++level) { + if (level <= PT_PDPE_LEVEL + && (mmu->root_level >= PT32E_ROOT_LEVEL || is_pse(vcpu))) + map |= 1 << (ps_set_index | (level - 1)); + } + mmu->last_pte_bitmap = map; +} + static int paging64_init_context_common(struct kvm_vcpu *vcpu, struct kvm_mmu *context, int level) @@ -3557,6 +3584,7 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu, reset_rsvds_bits_mask(vcpu, context); update_permission_bitmask(vcpu, context); + update_last_pte_bitmap(vcpu, context); ASSERT(is_pae(vcpu)); context->new_cr3 = paging_new_cr3; @@ -3586,6 +3614,7 @@ static int paging32_init_context(struct kvm_vcpu *vcpu, reset_rsvds_bits_mask(vcpu, context); update_permission_bitmask(vcpu, context); + update_last_pte_bitmap(vcpu, context); context->new_cr3 = paging_new_cr3; context->page_fault = paging32_page_fault; @@ -3647,6 +3676,7 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) } update_permission_bitmask(vcpu, context); + update_last_pte_bitmap(vcpu, context); return 0; } @@ -3724,6 +3754,7 @@ static int init_kvm_nested_mmu(struct kvm_vcpu *vcpu) } update_permission_bitmask(vcpu, g_context); + update_last_pte_bitmap(vcpu, g_context); return 0; } diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 143ee70..b08dd34 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -20,7 +20,8 @@ #define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT) #define PT_DIRTY_SHIFT 6 #define PT_DIRTY_MASK (1ULL << PT_DIRTY_SHIFT) -#define PT_PAGE_SIZE_MASK (1ULL << 7) +#define PT_PAGE_SIZE_SHIFT 7 +#define PT_PAGE_SIZE_MASK (1ULL << PT_PAGE_SIZE_SHIFT) #define PT_PAT_MASK (1ULL << 7) #define PT_GLOBAL_MASK (1ULL << 8) #define PT64_NX_SHIFT 63 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index eb4a668..ec1e101 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -101,24 +101,6 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, return (ret != orig_pte); } -static bool FNAME(is_last_gpte)(struct guest_walker *walker, - struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, - pt_element_t gpte) -{ - if (walker->level == PT_PAGE_TABLE_LEVEL) - return true; - - if ((walker->level == PT_DIRECTORY_LEVEL) && is_large_pte(gpte) && - (PTTYPE == 64 || is_pse(vcpu))) - return true; - - if ((walker->level == PT_PDPE_LEVEL) && is_large_pte(gpte) && - (mmu->root_level == PT64_ROOT_LEVEL)) - return true; - - return false; -} - /* * Fetch a guest pte for a guest virtual address */ @@ -219,9 +201,7 @@ retry_walk: mark_page_dirty(vcpu->kvm, table_gfn); pte |= PT_ACCESSED_MASK; } walker->ptes[walker->level - 1] = pte; - } while (!FNAME(is_last_gpte)(walker, vcpu, mmu, pte)); + } while (is_last_gpte(mmu, walker->level, pte)); if (unlikely(eperm)) { errcode |= PFERR_PRESENT_MASK;