diff mbox series

KVM: x86/mmu: Only allocate shadowed translation cache for sp->role.level <= KVM_MAX_HUGEPAGE_LEVEL

Message ID bf08a06675ed97d2456f7dcd9b0b2ef92f4602be.1715137643.git.houwenlong.hwl@antgroup.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86/mmu: Only allocate shadowed translation cache for sp->role.level <= KVM_MAX_HUGEPAGE_LEVEL | expand

Commit Message

Hou Wenlong May 8, 2024, 3:18 a.m. UTC
Only the indirect SP with sp->role.level <= KVM_MAX_HUGEPAGE_LEVEL might
have leaf gptes, so allocation of shadowed translation cache is needed
only for it. Additionally, use sp->shadowed_translation to determine
whether to use the information in shadowed translation cache or not.

Suggested-by: Lai Jiangshan <jiangshan.ljs@antgroup.com>
Signed-off-by: Hou Wenlong <houwenlong.hwl@antgroup.com>
---
 arch/x86/kvm/mmu/mmu.c | 27 +++++++++++++++++++--------
 1 file changed, 19 insertions(+), 8 deletions(-)

Comments

Sean Christopherson May 8, 2024, 2:55 p.m. UTC | #1
On Wed, May 08, 2024, Hou Wenlong wrote:
> Only the indirect SP with sp->role.level <= KVM_MAX_HUGEPAGE_LEVEL might
> have leaf gptes, so allocation of shadowed translation cache is needed
> only for it. Additionally, use sp->shadowed_translation to determine
> whether to use the information in shadowed translation cache or not.
> 
> Suggested-by: Lai Jiangshan <jiangshan.ljs@antgroup.com>
> Signed-off-by: Hou Wenlong <houwenlong.hwl@antgroup.com>
> ---
>  arch/x86/kvm/mmu/mmu.c | 27 +++++++++++++++++++--------
>  1 file changed, 19 insertions(+), 8 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index fc3b59b59ee1..8be987d0f05e 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -716,12 +716,12 @@ static bool sp_has_gptes(struct kvm_mmu_page *sp);
>  
>  static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
>  {
> +	if (sp->shadowed_translation)
> +		return sp->shadowed_translation[index] >> PAGE_SHIFT;
> +
>  	if (sp->role.passthrough)
>  		return sp->gfn;
>  
> -	if (!sp->role.direct)
> -		return sp->shadowed_translation[index] >> PAGE_SHIFT;

Why change the order?  Either the order doesn't matter, in which case go for the
smallest diff, or the order does matter, in which case this warrants an explanation
in the changelog (or perhaps even a separate patch, but that's likely overkill).

> -
>  	return sp->gfn + (index << ((sp->role.level - 1) * SPTE_LEVEL_BITS));
>  }
>  
> @@ -733,7 +733,7 @@ static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
>   */
>  static u32 kvm_mmu_page_get_access(struct kvm_mmu_page *sp, int index)

Can you also extend the WARN in FNAME(sync_spte)()?  Partly to help communicate
to future readers that sync_spte() operates on leaf GPTEs, and also to help make
it somewhat obvious that this patch doesn't break shadow_mmu_get_sp_for_split()

diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 7a87097cb45b..89b5d73e9e3c 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -911,7 +911,7 @@ static int FNAME(sync_spte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int
        gpa_t pte_gpa;
        gfn_t gfn;
 
-       if (WARN_ON_ONCE(!sp->spt[i]))
+       if (WARN_ON_ONCE(!sp->spt[i] || !sp->shadowed_translation))
                return 0;
 
        first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);

>  {
> -	if (sp_has_gptes(sp))
> +	if (sp->shadowed_translation)
>  		return sp->shadowed_translation[index] & ACC_ALL;
>  
>  	/*
> @@ -754,7 +754,7 @@ static u32 kvm_mmu_page_get_access(struct kvm_mmu_page *sp, int index)
>  static void kvm_mmu_page_set_translation(struct kvm_mmu_page *sp, int index,
>  					 gfn_t gfn, unsigned int access)
>  {
> -	if (sp_has_gptes(sp)) {
> +	if (sp->shadowed_translation) {
>  		sp->shadowed_translation[index] = (gfn << PAGE_SHIFT) | access;
>  		return;
>  	}
> @@ -1697,7 +1697,7 @@ static void kvm_mmu_free_shadow_page(struct kvm_mmu_page *sp)
>  	hlist_del(&sp->hash_link);
>  	list_del(&sp->link);
>  	free_page((unsigned long)sp->spt);
> -	if (!sp->role.direct)
> +	if (sp->shadowed_translation)
>  		free_page((unsigned long)sp->shadowed_translation);

Just drop the manual check, free_page() already handles the NULL/0 case.

>  	kmem_cache_free(mmu_page_header_cache, sp);
>  }
> @@ -1850,6 +1850,17 @@ static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
>  static void kvm_mmu_commit_zap_page(struct kvm *kvm,
>  				    struct list_head *invalid_list);
>  
> +static bool sp_might_have_leaf_gptes(struct kvm_mmu_page *sp)

Hmm, I think I'd prefer to forego the helper entirely, as this really is intended
to be used only when allocating the shadow page.  That way we avoid having to try
and clarify exactly what "might" means in this context, as well as potential future
goofs, e.g. if someone had the clever idea to check sp->shadowed_translation.

Oof, yeah, definitely omit the helper.  It took me a minute to fully appreciate
the difference between "leaf gptes" and "gptes" as it relates to write-protecting
gfns.

> +{
> +	if (sp->role.direct)
> +		return false;
> +
> +	if (sp->role.level > KVM_MAX_HUGEPAGE_LEVEL)
> +		return false;
> +
> +	return true;
> +}
> +
>  static bool sp_has_gptes(struct kvm_mmu_page *sp)
>  {
>  	if (sp->role.direct)
> @@ -2199,7 +2210,8 @@ static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm *kvm,
>  
>  	sp = kvm_mmu_memory_cache_alloc(caches->page_header_cache);
>  	sp->spt = kvm_mmu_memory_cache_alloc(caches->shadow_page_cache);
> -	if (!role.direct)
> +	sp->role = role;
> +	if (sp_might_have_leaf_gptes(sp))

And then this becomes:

	if (!role.direct && role.level <= KVM_MAX_HUGEPAGE_LEVEL)

>  		sp->shadowed_translation = kvm_mmu_memory_cache_alloc(caches->shadowed_info_cache);
>  
>  	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
> @@ -2216,7 +2228,6 @@ static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm *kvm,
>  	kvm_account_mmu_page(kvm, sp);
>  
>  	sp->gfn = gfn;
> -	sp->role = role;

And this code doesn't need to move.

>  	hlist_add_head(&sp->hash_link, sp_list);
>  	if (sp_has_gptes(sp))
>  		account_shadowed(kvm, sp);
> -- 
> 2.31.1
>
Hou Wenlong May 9, 2024, 2:48 a.m. UTC | #2
On Wed, May 08, 2024 at 10:55:02PM +0800, Sean Christopherson wrote:
> On Wed, May 08, 2024, Hou Wenlong wrote:
> > Only the indirect SP with sp->role.level <= KVM_MAX_HUGEPAGE_LEVEL might
> > have leaf gptes, so allocation of shadowed translation cache is needed
> > only for it. Additionally, use sp->shadowed_translation to determine
> > whether to use the information in shadowed translation cache or not.
> > 
> > Suggested-by: Lai Jiangshan <jiangshan.ljs@antgroup.com>
> > Signed-off-by: Hou Wenlong <houwenlong.hwl@antgroup.com>
> > ---
> >  arch/x86/kvm/mmu/mmu.c | 27 +++++++++++++++++++--------
> >  1 file changed, 19 insertions(+), 8 deletions(-)
> > 
> > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> > index fc3b59b59ee1..8be987d0f05e 100644
> > --- a/arch/x86/kvm/mmu/mmu.c
> > +++ b/arch/x86/kvm/mmu/mmu.c
> > @@ -716,12 +716,12 @@ static bool sp_has_gptes(struct kvm_mmu_page *sp);
> >  
> >  static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
> >  {
> > +	if (sp->shadowed_translation)
> > +		return sp->shadowed_translation[index] >> PAGE_SHIFT;
> > +
> >  	if (sp->role.passthrough)
> >  		return sp->gfn;
> >  
> > -	if (!sp->role.direct)
> > -		return sp->shadowed_translation[index] >> PAGE_SHIFT;
> 
> Why change the order?  Either the order doesn't matter, in which case go for the
> smallest diff, or the order does matter, in which case this warrants an explanation
> in the changelog (or perhaps even a separate patch, but that's likely overkill).
>
I believe the order doesn't matter, and the initial purpose is to keep
the code of the 2 cases, which doesn't have shadowed_translation, close
together. I'll drop the order change to keep the smallest diff.

> > -
> >  	return sp->gfn + (index << ((sp->role.level - 1) * SPTE_LEVEL_BITS));
> >  }
> >  
> > @@ -733,7 +733,7 @@ static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
> >   */
> >  static u32 kvm_mmu_page_get_access(struct kvm_mmu_page *sp, int index)
> 
> Can you also extend the WARN in FNAME(sync_spte)()?  Partly to help communicate
> to future readers that sync_spte() operates on leaf GPTEs, and also to help make
> it somewhat obvious that this patch doesn't break shadow_mmu_get_sp_for_split()
> 
> diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
> index 7a87097cb45b..89b5d73e9e3c 100644
> --- a/arch/x86/kvm/mmu/paging_tmpl.h
> +++ b/arch/x86/kvm/mmu/paging_tmpl.h
> @@ -911,7 +911,7 @@ static int FNAME(sync_spte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int
>         gpa_t pte_gpa;
>         gfn_t gfn;
>  
> -       if (WARN_ON_ONCE(!sp->spt[i]))
> +       if (WARN_ON_ONCE(!sp->spt[i] || !sp->shadowed_translation))
>                 return 0;
>  
>         first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
>
Sure, I'll add this in the v2.
 
Thanks!

> >  {
> > -	if (sp_has_gptes(sp))
> > +	if (sp->shadowed_translation)
> >  		return sp->shadowed_translation[index] & ACC_ALL;
> >  
> >  	/*
> > @@ -754,7 +754,7 @@ static u32 kvm_mmu_page_get_access(struct kvm_mmu_page *sp, int index)
> >  static void kvm_mmu_page_set_translation(struct kvm_mmu_page *sp, int index,
> >  					 gfn_t gfn, unsigned int access)
> >  {
> > -	if (sp_has_gptes(sp)) {
> > +	if (sp->shadowed_translation) {
> >  		sp->shadowed_translation[index] = (gfn << PAGE_SHIFT) | access;
> >  		return;
> >  	}
> > @@ -1697,7 +1697,7 @@ static void kvm_mmu_free_shadow_page(struct kvm_mmu_page *sp)
> >  	hlist_del(&sp->hash_link);
> >  	list_del(&sp->link);
> >  	free_page((unsigned long)sp->spt);
> > -	if (!sp->role.direct)
> > +	if (sp->shadowed_translation)
> >  		free_page((unsigned long)sp->shadowed_translation);
> 
> Just drop the manual check, free_page() already handles the NULL/0 case.
> 
> >  	kmem_cache_free(mmu_page_header_cache, sp);
> >  }
> > @@ -1850,6 +1850,17 @@ static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
> >  static void kvm_mmu_commit_zap_page(struct kvm *kvm,
> >  				    struct list_head *invalid_list);
> >  
> > +static bool sp_might_have_leaf_gptes(struct kvm_mmu_page *sp)
> 
> Hmm, I think I'd prefer to forego the helper entirely, as this really is intended
> to be used only when allocating the shadow page.  That way we avoid having to try
> and clarify exactly what "might" means in this context, as well as potential future
> goofs, e.g. if someone had the clever idea to check sp->shadowed_translation.
> 
> Oof, yeah, definitely omit the helper.  It took me a minute to fully appreciate
> the difference between "leaf gptes" and "gptes" as it relates to write-protecting
> gfns.
> 
> > +{
> > +	if (sp->role.direct)
> > +		return false;
> > +
> > +	if (sp->role.level > KVM_MAX_HUGEPAGE_LEVEL)
> > +		return false;
> > +
> > +	return true;
> > +}
> > +
> >  static bool sp_has_gptes(struct kvm_mmu_page *sp)
> >  {
> >  	if (sp->role.direct)
> > @@ -2199,7 +2210,8 @@ static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm *kvm,
> >  
> >  	sp = kvm_mmu_memory_cache_alloc(caches->page_header_cache);
> >  	sp->spt = kvm_mmu_memory_cache_alloc(caches->shadow_page_cache);
> > -	if (!role.direct)
> > +	sp->role = role;
> > +	if (sp_might_have_leaf_gptes(sp))
> 
> And then this becomes:
> 
> 	if (!role.direct && role.level <= KVM_MAX_HUGEPAGE_LEVEL)
> 
> >  		sp->shadowed_translation = kvm_mmu_memory_cache_alloc(caches->shadowed_info_cache);
> >  
> >  	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
> > @@ -2216,7 +2228,6 @@ static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm *kvm,
> >  	kvm_account_mmu_page(kvm, sp);
> >  
> >  	sp->gfn = gfn;
> > -	sp->role = role;
> 
> And this code doesn't need to move.
> 
> >  	hlist_add_head(&sp->hash_link, sp_list);
> >  	if (sp_has_gptes(sp))
> >  		account_shadowed(kvm, sp);
> > -- 
> > 2.31.1
> >
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index fc3b59b59ee1..8be987d0f05e 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -716,12 +716,12 @@  static bool sp_has_gptes(struct kvm_mmu_page *sp);
 
 static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
 {
+	if (sp->shadowed_translation)
+		return sp->shadowed_translation[index] >> PAGE_SHIFT;
+
 	if (sp->role.passthrough)
 		return sp->gfn;
 
-	if (!sp->role.direct)
-		return sp->shadowed_translation[index] >> PAGE_SHIFT;
-
 	return sp->gfn + (index << ((sp->role.level - 1) * SPTE_LEVEL_BITS));
 }
 
@@ -733,7 +733,7 @@  static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
  */
 static u32 kvm_mmu_page_get_access(struct kvm_mmu_page *sp, int index)
 {
-	if (sp_has_gptes(sp))
+	if (sp->shadowed_translation)
 		return sp->shadowed_translation[index] & ACC_ALL;
 
 	/*
@@ -754,7 +754,7 @@  static u32 kvm_mmu_page_get_access(struct kvm_mmu_page *sp, int index)
 static void kvm_mmu_page_set_translation(struct kvm_mmu_page *sp, int index,
 					 gfn_t gfn, unsigned int access)
 {
-	if (sp_has_gptes(sp)) {
+	if (sp->shadowed_translation) {
 		sp->shadowed_translation[index] = (gfn << PAGE_SHIFT) | access;
 		return;
 	}
@@ -1697,7 +1697,7 @@  static void kvm_mmu_free_shadow_page(struct kvm_mmu_page *sp)
 	hlist_del(&sp->hash_link);
 	list_del(&sp->link);
 	free_page((unsigned long)sp->spt);
-	if (!sp->role.direct)
+	if (sp->shadowed_translation)
 		free_page((unsigned long)sp->shadowed_translation);
 	kmem_cache_free(mmu_page_header_cache, sp);
 }
@@ -1850,6 +1850,17 @@  static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
 				    struct list_head *invalid_list);
 
+static bool sp_might_have_leaf_gptes(struct kvm_mmu_page *sp)
+{
+	if (sp->role.direct)
+		return false;
+
+	if (sp->role.level > KVM_MAX_HUGEPAGE_LEVEL)
+		return false;
+
+	return true;
+}
+
 static bool sp_has_gptes(struct kvm_mmu_page *sp)
 {
 	if (sp->role.direct)
@@ -2199,7 +2210,8 @@  static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm *kvm,
 
 	sp = kvm_mmu_memory_cache_alloc(caches->page_header_cache);
 	sp->spt = kvm_mmu_memory_cache_alloc(caches->shadow_page_cache);
-	if (!role.direct)
+	sp->role = role;
+	if (sp_might_have_leaf_gptes(sp))
 		sp->shadowed_translation = kvm_mmu_memory_cache_alloc(caches->shadowed_info_cache);
 
 	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
@@ -2216,7 +2228,6 @@  static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm *kvm,
 	kvm_account_mmu_page(kvm, sp);
 
 	sp->gfn = gfn;
-	sp->role = role;
 	hlist_add_head(&sp->hash_link, sp_list);
 	if (sp_has_gptes(sp))
 		account_shadowed(kvm, sp);