diff mbox series

[v4,06/18] KVM: x86/mmu: Shrink split_shadow_page_cache via MMU shrinker

Message ID 20230306224127.1689967-7-vipinsh@google.com (mailing list archive)
State New, archived
Headers show
Series NUMA aware page table allocation | expand

Commit Message

Vipin Sharma March 6, 2023, 10:41 p.m. UTC
Use MMU shrinker to free unused pages in split_shadow_page_cache.
Refactor the code and make common function to try emptying the page cache.

Signed-off-by: Vipin Sharma <vipinsh@google.com>
---
 arch/x86/kvm/mmu/mmu.c | 34 +++++++++++++++++++++-------------
 1 file changed, 21 insertions(+), 13 deletions(-)

Comments

Zhi Wang March 9, 2023, 4:01 p.m. UTC | #1
On Mon,  6 Mar 2023 14:41:15 -0800
Vipin Sharma <vipinsh@google.com> wrote:

> Use MMU shrinker to free unused pages in split_shadow_page_cache.
> Refactor the code and make common function to try emptying the page cache.
> 
> Signed-off-by: Vipin Sharma <vipinsh@google.com>
> ---
>  arch/x86/kvm/mmu/mmu.c | 34 +++++++++++++++++++++-------------
>  1 file changed, 21 insertions(+), 13 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 0ebb8a2eaf47..73a0ac9c11ce 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -6696,13 +6696,24 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
>  	}
>  }
>  

After adding the lock in the kvm_mmu_memory_cache, the cache_lock doesn't need
to be passed here and in mmu_shrink_scan().

> +static int mmu_memory_cache_try_empty(struct kvm_mmu_memory_cache *cache,
> +				      struct mutex *cache_lock)
> +{
> +	int freed = 0;
> +
> +	if (mutex_trylock(cache_lock)) {
> +		freed = cache->nobjs;
> +		kvm_mmu_empty_memory_cache(cache);
> +		mutex_unlock(cache_lock);
> +	}
> +	return freed;
> +}
> +
>  static unsigned long mmu_shrink_scan(struct shrinker *shrink,
>  				     struct shrink_control *sc)
>  {
>  	struct kvm *kvm, *next_kvm, *first_kvm = NULL;
> -	struct kvm_mmu_memory_cache *cache;
>  	unsigned long i, freed = 0;
> -	struct mutex *cache_lock;
>  	struct kvm_vcpu *vcpu;
>  
>  	mutex_lock(&kvm_lock);
> @@ -6716,18 +6727,15 @@ static unsigned long mmu_shrink_scan(struct shrinker *shrink,
>  		list_move_tail(&kvm->vm_list, &vm_list);
>  
>  		kvm_for_each_vcpu(i, vcpu, kvm) {
> -			cache = &vcpu->arch.mmu_shadow_page_cache;
> -			cache_lock = &vcpu->arch.mmu_shadow_page_cache_lock;
> -			if (mutex_trylock(cache_lock)) {
> -				if (cache->nobjs) {
> -					freed += cache->nobjs;
> -					kvm_mmu_empty_memory_cache(cache);
> -				}
> -				mutex_unlock(cache_lock);
> -				if (freed >= sc->nr_to_scan)
> -					goto out;
> -			}
> +			freed += mmu_memory_cache_try_empty(&vcpu->arch.mmu_shadow_page_cache,
> +							    &vcpu->arch.mmu_shadow_page_cache_lock);
> +			if (freed >= sc->nr_to_scan)
> +				goto out;
>  		}
> +		freed += mmu_memory_cache_try_empty(&kvm->arch.split_shadow_page_cache,
> +						    &kvm->slots_lock);
> +		if (freed >= sc->nr_to_scan)
> +			goto out;
>  	}
>  out:
>  	mutex_unlock(&kvm_lock);
Vipin Sharma March 9, 2023, 7:59 p.m. UTC | #2
On Thu, Mar 9, 2023 at 8:01 AM Zhi Wang <zhi.wang.linux@gmail.com> wrote:
>
> On Mon,  6 Mar 2023 14:41:15 -0800
> Vipin Sharma <vipinsh@google.com> wrote:
>
> > Use MMU shrinker to free unused pages in split_shadow_page_cache.
> > Refactor the code and make common function to try emptying the page cache.
> >
> > Signed-off-by: Vipin Sharma <vipinsh@google.com>
> > ---
> >  arch/x86/kvm/mmu/mmu.c | 34 +++++++++++++++++++++-------------
> >  1 file changed, 21 insertions(+), 13 deletions(-)
> >
> > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> > index 0ebb8a2eaf47..73a0ac9c11ce 100644
> > --- a/arch/x86/kvm/mmu/mmu.c
> > +++ b/arch/x86/kvm/mmu/mmu.c
> > @@ -6696,13 +6696,24 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
> >       }
> >  }
> >
>
> After adding the lock in the kvm_mmu_memory_cache, the cache_lock doesn't need
> to be passed here and in mmu_shrink_scan().
>
Agree. Let us see what is the decision on moving the lock inside the cache.
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 0ebb8a2eaf47..73a0ac9c11ce 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -6696,13 +6696,24 @@  void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
 	}
 }
 
+static int mmu_memory_cache_try_empty(struct kvm_mmu_memory_cache *cache,
+				      struct mutex *cache_lock)
+{
+	int freed = 0;
+
+	if (mutex_trylock(cache_lock)) {
+		freed = cache->nobjs;
+		kvm_mmu_empty_memory_cache(cache);
+		mutex_unlock(cache_lock);
+	}
+	return freed;
+}
+
 static unsigned long mmu_shrink_scan(struct shrinker *shrink,
 				     struct shrink_control *sc)
 {
 	struct kvm *kvm, *next_kvm, *first_kvm = NULL;
-	struct kvm_mmu_memory_cache *cache;
 	unsigned long i, freed = 0;
-	struct mutex *cache_lock;
 	struct kvm_vcpu *vcpu;
 
 	mutex_lock(&kvm_lock);
@@ -6716,18 +6727,15 @@  static unsigned long mmu_shrink_scan(struct shrinker *shrink,
 		list_move_tail(&kvm->vm_list, &vm_list);
 
 		kvm_for_each_vcpu(i, vcpu, kvm) {
-			cache = &vcpu->arch.mmu_shadow_page_cache;
-			cache_lock = &vcpu->arch.mmu_shadow_page_cache_lock;
-			if (mutex_trylock(cache_lock)) {
-				if (cache->nobjs) {
-					freed += cache->nobjs;
-					kvm_mmu_empty_memory_cache(cache);
-				}
-				mutex_unlock(cache_lock);
-				if (freed >= sc->nr_to_scan)
-					goto out;
-			}
+			freed += mmu_memory_cache_try_empty(&vcpu->arch.mmu_shadow_page_cache,
+							    &vcpu->arch.mmu_shadow_page_cache_lock);
+			if (freed >= sc->nr_to_scan)
+				goto out;
 		}
+		freed += mmu_memory_cache_try_empty(&kvm->arch.split_shadow_page_cache,
+						    &kvm->slots_lock);
+		if (freed >= sc->nr_to_scan)
+			goto out;
 	}
 out:
 	mutex_unlock(&kvm_lock);