@@ -2923,6 +2923,26 @@ static int kvm_mmu_remove_some_alloc_mmu
return kvm_mmu_zap_page(kvm, page) + 1;
}
+static int shrink_kvm_mmu(struct kvm *kvm, int nr_to_scan)
+{
+ int idx, freed_pages;
+
+ idx = srcu_read_lock(&kvm->srcu);
+ spin_lock(&kvm->mmu_lock);
+ if (kvm->arch.n_used_mmu_pages > 0)
+ freed_pages = kvm_mmu_remove_some_alloc_mmu_pages(kvm);
+
+ spin_unlock(&kvm->mmu_lock);
+ srcu_read_unlock(&kvm->srcu, idx);
+
+ /*
+ * This should optimally return the number of objects (mmu pages)
+ * that we have scanned. But, for now, just return the number
+ * that we were able to free.
+ */
+ return freed_pages;
+}
+
static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
{
struct kvm *kvm;
@@ -2934,20 +2954,15 @@ static int mmu_shrink(int nr_to_scan, gf
spin_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) {
- int idx, freed_pages;
+ if (nr_to_scan <= 0)
+ break;
- idx = srcu_read_lock(&kvm->srcu);
- spin_lock(&kvm->mmu_lock);
- if (!kvm_freed && nr_to_scan > 0 &&
- kvm->arch.n_used_mmu_pages > 0) {
- freed_pages = kvm_mmu_remove_some_alloc_mmu_pages(kvm);
+ shrink_kvm_mmu(kvm, nr_to_scan);
+ if (!kvm_freed)
kvm_freed = kvm;
- }
nr_to_scan--;
-
- spin_unlock(&kvm->mmu_lock);
- srcu_read_unlock(&kvm->srcu, idx);
}
+
if (kvm_freed)
list_move_tail(&kvm_freed->vm_list, &vm_list);