diff mbox series

[11/21] KVM: x86/MMU: Cleanup shrinker interface with Shadow MMU

Message ID 20230202182809.1929122-12-bgardon@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86/MMU: Formalize the Shadow MMU | expand

Commit Message

Ben Gardon Feb. 2, 2023, 6:27 p.m. UTC
The MMU shrinker currently only operates on the Shadow MMU, but having
the entire implemenatation in shadow_mmu.c is awkward since much of the
function isn't Shadow MMU specific. There has also been talk of changing
the target of the shrinker to the MMU caches rather than already allocated
page tables. As a result, it makes sense to move some of the implementation
back to mmu.c.

No functional change intended.

Signed-off-by: Ben Gardon <bgardon@google.com>
---
 arch/x86/kvm/mmu/mmu.c        | 43 ++++++++++++++++++++++++
 arch/x86/kvm/mmu/shadow_mmu.c | 62 ++++++++---------------------------
 arch/x86/kvm/mmu/shadow_mmu.h |  3 +-
 3 files changed, 58 insertions(+), 50 deletions(-)

Comments

kernel test robot Feb. 4, 2023, 1:52 p.m. UTC | #1
Hi Ben,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on 7cb79f433e75b05d1635aefaa851cfcd1cb7dc4f]

url:    https://github.com/intel-lab-lkp/linux/commits/Ben-Gardon/KVM-x86-mmu-Rename-slot-rmap-walkers-to-add-clarity-and-clean-up-code/20230203-023259
base:   7cb79f433e75b05d1635aefaa851cfcd1cb7dc4f
patch link:    https://lore.kernel.org/r/20230202182809.1929122-12-bgardon%40google.com
patch subject: [PATCH 11/21] KVM: x86/MMU: Cleanup shrinker interface with Shadow MMU
config: x86_64-randconfig-a002 (https://download.01.org/0day-ci/archive/20230204/202302042127.gbKNDVaL-lkp@intel.com/config)
compiler: gcc-11 (Debian 11.3.0-8) 11.3.0
reproduce (this is a W=1 build):
        # https://github.com/intel-lab-lkp/linux/commit/c1170de906dfe1ee64da0066e7c28d35e716ed18
        git remote add linux-review https://github.com/intel-lab-lkp/linux
        git fetch --no-tags linux-review Ben-Gardon/KVM-x86-mmu-Rename-slot-rmap-walkers-to-add-clarity-and-clean-up-code/20230203-023259
        git checkout c1170de906dfe1ee64da0066e7c28d35e716ed18
        # save the config file
        mkdir build_dir && cp config build_dir/.config
        make W=1 O=build_dir ARCH=x86_64 olddefconfig
        make W=1 O=build_dir ARCH=x86_64 SHELL=/bin/bash arch/x86/kvm/

If you fix the issue, kindly add following tag where applicable
| Reported-by: kernel test robot <lkp@intel.com>

All warnings (new ones prefixed by >>):

>> arch/x86/kvm/mmu/mmu.c:3148:15: warning: no previous prototype for 'mmu_shrink_scan' [-Wmissing-prototypes]
    3148 | unsigned long mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
         |               ^~~~~~~~~~~~~~~


vim +/mmu_shrink_scan +3148 arch/x86/kvm/mmu/mmu.c

  3147	
> 3148	unsigned long mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
  3149	{
  3150		struct kvm *kvm;
  3151		int nr_to_scan = sc->nr_to_scan;
  3152		unsigned long freed = 0;
  3153	
  3154		mutex_lock(&kvm_lock);
  3155	
  3156		list_for_each_entry(kvm, &vm_list, vm_list) {
  3157			/*
  3158			 * Never scan more than sc->nr_to_scan VM instances.
  3159			 * Will not hit this condition practically since we do not try
  3160			 * to shrink more than one VM and it is very unlikely to see
  3161			 * !n_used_mmu_pages so many times.
  3162			 */
  3163			if (!nr_to_scan--)
  3164				break;
  3165	
  3166			/*
  3167			 * n_used_mmu_pages is accessed without holding kvm->mmu_lock
  3168			 * here. We may skip a VM instance errorneosly, but we do not
  3169			 * want to shrink a VM that only started to populate its MMU
  3170			 * anyway.
  3171			 */
  3172			if (!kvm->arch.n_used_mmu_pages &&
  3173			    !kvm_shadow_mmu_has_zapped_obsolete_pages(kvm))
  3174				continue;
  3175	
  3176			freed = kvm_shadow_mmu_shrink_scan(kvm, sc->nr_to_scan);
  3177	
  3178			/*
  3179			 * unfair on small ones
  3180			 * per-vm shrinkers cry out
  3181			 * sadness comes quickly
  3182			 */
  3183			list_move_tail(&kvm->vm_list, &vm_list);
  3184			break;
  3185		}
  3186	
  3187		mutex_unlock(&kvm_lock);
  3188		return freed;
  3189	}
  3190
kernel test robot Feb. 4, 2023, 7:10 p.m. UTC | #2
Hi Ben,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on 7cb79f433e75b05d1635aefaa851cfcd1cb7dc4f]

url:    https://github.com/intel-lab-lkp/linux/commits/Ben-Gardon/KVM-x86-mmu-Rename-slot-rmap-walkers-to-add-clarity-and-clean-up-code/20230203-023259
base:   7cb79f433e75b05d1635aefaa851cfcd1cb7dc4f
patch link:    https://lore.kernel.org/r/20230202182809.1929122-12-bgardon%40google.com
patch subject: [PATCH 11/21] KVM: x86/MMU: Cleanup shrinker interface with Shadow MMU
config: x86_64-randconfig-a014 (https://download.01.org/0day-ci/archive/20230205/202302050303.kLjMucYK-lkp@intel.com/config)
compiler: clang version 14.0.6 (https://github.com/llvm/llvm-project f28c006a5895fc0e329fe15fead81e37457cb1d1)
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # https://github.com/intel-lab-lkp/linux/commit/c1170de906dfe1ee64da0066e7c28d35e716ed18
        git remote add linux-review https://github.com/intel-lab-lkp/linux
        git fetch --no-tags linux-review Ben-Gardon/KVM-x86-mmu-Rename-slot-rmap-walkers-to-add-clarity-and-clean-up-code/20230203-023259
        git checkout c1170de906dfe1ee64da0066e7c28d35e716ed18
        # save the config file
        mkdir build_dir && cp config build_dir/.config
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross W=1 O=build_dir ARCH=x86_64 olddefconfig
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross W=1 O=build_dir ARCH=x86_64 SHELL=/bin/bash arch/x86/kvm/

If you fix the issue, kindly add following tag where applicable
| Reported-by: kernel test robot <lkp@intel.com>

All warnings (new ones prefixed by >>):

>> arch/x86/kvm/mmu/mmu.c:3148:15: warning: no previous prototype for function 'mmu_shrink_scan' [-Wmissing-prototypes]
   unsigned long mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
                 ^
   arch/x86/kvm/mmu/mmu.c:3148:1: note: declare 'static' if the function is not intended to be used outside of this translation unit
   unsigned long mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
   ^
   static 
   1 warning generated.


vim +/mmu_shrink_scan +3148 arch/x86/kvm/mmu/mmu.c

  3147	
> 3148	unsigned long mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
  3149	{
  3150		struct kvm *kvm;
  3151		int nr_to_scan = sc->nr_to_scan;
  3152		unsigned long freed = 0;
  3153	
  3154		mutex_lock(&kvm_lock);
  3155	
  3156		list_for_each_entry(kvm, &vm_list, vm_list) {
  3157			/*
  3158			 * Never scan more than sc->nr_to_scan VM instances.
  3159			 * Will not hit this condition practically since we do not try
  3160			 * to shrink more than one VM and it is very unlikely to see
  3161			 * !n_used_mmu_pages so many times.
  3162			 */
  3163			if (!nr_to_scan--)
  3164				break;
  3165	
  3166			/*
  3167			 * n_used_mmu_pages is accessed without holding kvm->mmu_lock
  3168			 * here. We may skip a VM instance errorneosly, but we do not
  3169			 * want to shrink a VM that only started to populate its MMU
  3170			 * anyway.
  3171			 */
  3172			if (!kvm->arch.n_used_mmu_pages &&
  3173			    !kvm_shadow_mmu_has_zapped_obsolete_pages(kvm))
  3174				continue;
  3175	
  3176			freed = kvm_shadow_mmu_shrink_scan(kvm, sc->nr_to_scan);
  3177	
  3178			/*
  3179			 * unfair on small ones
  3180			 * per-vm shrinkers cry out
  3181			 * sadness comes quickly
  3182			 */
  3183			list_move_tail(&kvm->vm_list, &vm_list);
  3184			break;
  3185		}
  3186	
  3187		mutex_unlock(&kvm_lock);
  3188		return freed;
  3189	}
  3190
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index cef481a17a519..3ea54b08239aa 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3145,6 +3145,49 @@  static unsigned long mmu_shrink_count(struct shrinker *shrink,
 	return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
 }
 
+unsigned long mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+{
+	struct kvm *kvm;
+	int nr_to_scan = sc->nr_to_scan;
+	unsigned long freed = 0;
+
+	mutex_lock(&kvm_lock);
+
+	list_for_each_entry(kvm, &vm_list, vm_list) {
+		/*
+		 * Never scan more than sc->nr_to_scan VM instances.
+		 * Will not hit this condition practically since we do not try
+		 * to shrink more than one VM and it is very unlikely to see
+		 * !n_used_mmu_pages so many times.
+		 */
+		if (!nr_to_scan--)
+			break;
+
+		/*
+		 * n_used_mmu_pages is accessed without holding kvm->mmu_lock
+		 * here. We may skip a VM instance errorneosly, but we do not
+		 * want to shrink a VM that only started to populate its MMU
+		 * anyway.
+		 */
+		if (!kvm->arch.n_used_mmu_pages &&
+		    !kvm_shadow_mmu_has_zapped_obsolete_pages(kvm))
+			continue;
+
+		freed = kvm_shadow_mmu_shrink_scan(kvm, sc->nr_to_scan);
+
+		/*
+		 * unfair on small ones
+		 * per-vm shrinkers cry out
+		 * sadness comes quickly
+		 */
+		list_move_tail(&kvm->vm_list, &vm_list);
+		break;
+	}
+
+	mutex_unlock(&kvm_lock);
+	return freed;
+}
+
 static struct shrinker mmu_shrinker = {
 	.count_objects = mmu_shrink_count,
 	.scan_objects = mmu_shrink_scan,
diff --git a/arch/x86/kvm/mmu/shadow_mmu.c b/arch/x86/kvm/mmu/shadow_mmu.c
index 1be680bce15a6..76c50aca3c487 100644
--- a/arch/x86/kvm/mmu/shadow_mmu.c
+++ b/arch/x86/kvm/mmu/shadow_mmu.c
@@ -3160,7 +3160,7 @@  void kvm_zap_obsolete_pages(struct kvm *kvm)
 	kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages);
 }
 
-static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
+bool kvm_shadow_mmu_has_zapped_obsolete_pages(struct kvm *kvm)
 {
 	return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
 }
@@ -3429,60 +3429,24 @@  void kvm_rmap_zap_collapsible_sptes(struct kvm *kvm,
 		kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
 }
 
-unsigned long mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+unsigned long kvm_shadow_mmu_shrink_scan(struct kvm *kvm, int pages_to_free)
 {
-	struct kvm *kvm;
-	int nr_to_scan = sc->nr_to_scan;
 	unsigned long freed = 0;
+	int idx;
 
-	mutex_lock(&kvm_lock);
-
-	list_for_each_entry(kvm, &vm_list, vm_list) {
-		int idx;
-		LIST_HEAD(invalid_list);
-
-		/*
-		 * Never scan more than sc->nr_to_scan VM instances.
-		 * Will not hit this condition practically since we do not try
-		 * to shrink more than one VM and it is very unlikely to see
-		 * !n_used_mmu_pages so many times.
-		 */
-		if (!nr_to_scan--)
-			break;
-		/*
-		 * n_used_mmu_pages is accessed without holding kvm->mmu_lock
-		 * here. We may skip a VM instance errorneosly, but we do not
-		 * want to shrink a VM that only started to populate its MMU
-		 * anyway.
-		 */
-		if (!kvm->arch.n_used_mmu_pages &&
-		    !kvm_has_zapped_obsolete_pages(kvm))
-			continue;
-
-		idx = srcu_read_lock(&kvm->srcu);
-		write_lock(&kvm->mmu_lock);
-
-		if (kvm_has_zapped_obsolete_pages(kvm)) {
-			kvm_mmu_commit_zap_page(kvm,
-			      &kvm->arch.zapped_obsolete_pages);
-			goto unlock;
-		}
+	idx = srcu_read_lock(&kvm->srcu);
+	write_lock(&kvm->mmu_lock);
 
-		freed = kvm_mmu_zap_oldest_mmu_pages(kvm, sc->nr_to_scan);
+	if (kvm_shadow_mmu_has_zapped_obsolete_pages(kvm)) {
+		kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages);
+		goto out;
+	}
 
-unlock:
-		write_unlock(&kvm->mmu_lock);
-		srcu_read_unlock(&kvm->srcu, idx);
+	freed = kvm_mmu_zap_oldest_mmu_pages(kvm, pages_to_free);
 
-		/*
-		 * unfair on small ones
-		 * per-vm shrinkers cry out
-		 * sadness comes quickly
-		 */
-		list_move_tail(&kvm->vm_list, &vm_list);
-		break;
-	}
+out:
+	write_unlock(&kvm->mmu_lock);
+	srcu_read_unlock(&kvm->srcu, idx);
 
-	mutex_unlock(&kvm_lock);
 	return freed;
 }
diff --git a/arch/x86/kvm/mmu/shadow_mmu.h b/arch/x86/kvm/mmu/shadow_mmu.h
index 9f16c4782bfbf..9e27d03fbe368 100644
--- a/arch/x86/kvm/mmu/shadow_mmu.h
+++ b/arch/x86/kvm/mmu/shadow_mmu.h
@@ -112,7 +112,8 @@  void kvm_shadow_mmu_try_split_huge_pages(struct kvm *kvm,
 void kvm_rmap_zap_collapsible_sptes(struct kvm *kvm,
 				    const struct kvm_memory_slot *slot);
 
-unsigned long mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc);
+bool kvm_shadow_mmu_has_zapped_obsolete_pages(struct kvm *kvm);
+unsigned long kvm_shadow_mmu_shrink_scan(struct kvm *kvm, int pages_to_free);
 
 /* Exports from paging_tmpl.h */
 gpa_t paging32_gva_to_gpa(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,