diff mbox series

[v4,08/18] KVM: x86/mmu: Track unused mmu_shadowed_info_cache pages count via global counter

Message ID 20230306224127.1689967-9-vipinsh@google.com (mailing list archive)
State New, archived
Headers show
Series NUMA aware page table allocation | expand

Commit Message

Vipin Sharma March 6, 2023, 10:41 p.m. UTC
Add unused pages in mmu_shadowed_info_cache to global MMU unused page
cache counter i.e. kvm_total_unused_cached_pages. These pages will be
freed by MMU shrinker in future commit.

Signed-off-by: Vipin Sharma <vipinsh@google.com>
---
 arch/x86/include/asm/kvm_host.h | 3 ++-
 arch/x86/kvm/mmu/mmu.c          | 8 ++++----
 2 files changed, 6 insertions(+), 5 deletions(-)

Comments

Yang, Weijiang March 30, 2023, 4:53 a.m. UTC | #1
On 3/7/2023 6:41 AM, Vipin Sharma wrote:
> Add unused pages in mmu_shadowed_info_cache to global MMU unused page
> cache counter i.e. kvm_total_unused_cached_pages. These pages will be
> freed by MMU shrinker in future commit.

This patch mainly renames some functions,  but the commit log doesn't 
reflect what

this patch does. Please change the commit log or squash the patch.


>
> Signed-off-by: Vipin Sharma <vipinsh@google.com>
> ---
>   arch/x86/include/asm/kvm_host.h | 3 ++-
>   arch/x86/kvm/mmu/mmu.c          | 8 ++++----
>   2 files changed, 6 insertions(+), 5 deletions(-)
>
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 4322c7020d5d..185719dbeb81 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -792,7 +792,8 @@ struct kvm_vcpu_arch {
>   	struct kvm_mmu_memory_cache mmu_page_header_cache;
>   
>   	/*
> -	 * Protect allocation and release of pages from mmu_shadow_page_cache.
> +	 * Protect allocation and release of pages from mmu_shadow_page_cache
> +	 * and mmu_shadowed_info_cache.
>   	 */
>   	struct mutex mmu_shadow_page_cache_lock;
>   
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 0a0962d8108b..b7ca31b5699c 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -715,8 +715,8 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
>   		return r;
>   
>   	if (maybe_indirect) {
> -		r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadowed_info_cache,
> -					       PT64_ROOT_MAX_LEVEL);
> +		r = mmu_topup_sp_memory_cache(&vcpu->arch.mmu_shadowed_info_cache,
> +					      PT64_ROOT_MAX_LEVEL);
>   		if (r)
>   			return r;
>   	}
> @@ -729,8 +729,8 @@ static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
>   	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache);
>   	mutex_lock(&vcpu->arch.mmu_shadow_page_cache_lock);
>   	mmu_free_sp_memory_cache(&vcpu->arch.mmu_shadow_page_cache);
> +	mmu_free_sp_memory_cache(&vcpu->arch.mmu_shadowed_info_cache);
>   	mutex_unlock(&vcpu->arch.mmu_shadow_page_cache_lock);
> -	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadowed_info_cache);
>   	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
>   }
>   
> @@ -2197,7 +2197,7 @@ static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm *kvm,
>   	sp = kvm_mmu_memory_cache_alloc(caches->page_header_cache);
>   	sp->spt = mmu_sp_memory_cache_alloc(caches->shadow_page_cache);
>   	if (!role.direct)
> -		sp->shadowed_translation = kvm_mmu_memory_cache_alloc(caches->shadowed_info_cache);
> +		sp->shadowed_translation = mmu_sp_memory_cache_alloc(caches->shadowed_info_cache);
>   
>   	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
>
Vipin Sharma April 3, 2023, 11:02 p.m. UTC | #2
On Wed, Mar 29, 2023 at 9:53 PM Yang, Weijiang <weijiang.yang@intel.com> wrote:
>
>
> On 3/7/2023 6:41 AM, Vipin Sharma wrote:
> > Add unused pages in mmu_shadowed_info_cache to global MMU unused page
> > cache counter i.e. kvm_total_unused_cached_pages. These pages will be
> > freed by MMU shrinker in future commit.
>
> This patch mainly renames some functions,  but the commit log doesn't
> reflect what
>
> this patch does. Please change the commit log or squash the patch.
>
>

This is not just function renaming, it is using a function which does
page accounting. I will expand the commit log to capture more details
instead of squashing.

Thanks
diff mbox series

Patch

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 4322c7020d5d..185719dbeb81 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -792,7 +792,8 @@  struct kvm_vcpu_arch {
 	struct kvm_mmu_memory_cache mmu_page_header_cache;
 
 	/*
-	 * Protect allocation and release of pages from mmu_shadow_page_cache.
+	 * Protect allocation and release of pages from mmu_shadow_page_cache
+	 * and mmu_shadowed_info_cache.
 	 */
 	struct mutex mmu_shadow_page_cache_lock;
 
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 0a0962d8108b..b7ca31b5699c 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -715,8 +715,8 @@  static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
 		return r;
 
 	if (maybe_indirect) {
-		r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadowed_info_cache,
-					       PT64_ROOT_MAX_LEVEL);
+		r = mmu_topup_sp_memory_cache(&vcpu->arch.mmu_shadowed_info_cache,
+					      PT64_ROOT_MAX_LEVEL);
 		if (r)
 			return r;
 	}
@@ -729,8 +729,8 @@  static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache);
 	mutex_lock(&vcpu->arch.mmu_shadow_page_cache_lock);
 	mmu_free_sp_memory_cache(&vcpu->arch.mmu_shadow_page_cache);
+	mmu_free_sp_memory_cache(&vcpu->arch.mmu_shadowed_info_cache);
 	mutex_unlock(&vcpu->arch.mmu_shadow_page_cache_lock);
-	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadowed_info_cache);
 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
 }
 
@@ -2197,7 +2197,7 @@  static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm *kvm,
 	sp = kvm_mmu_memory_cache_alloc(caches->page_header_cache);
 	sp->spt = mmu_sp_memory_cache_alloc(caches->shadow_page_cache);
 	if (!role.direct)
-		sp->shadowed_translation = kvm_mmu_memory_cache_alloc(caches->shadowed_info_cache);
+		sp->shadowed_translation = mmu_sp_memory_cache_alloc(caches->shadowed_info_cache);
 
 	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);