@@ -3138,37 +3138,51 @@ static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
{
struct kvm *kvm;
struct kvm *kvm_freed = NULL;
+ struct kvm *kvm_last;
int cache_count = 0;
spin_lock(&kvm_lock);
- list_for_each_entry(kvm, &vm_list, vm_list) {
+ if (list_empty(&vm_list))
+ goto out;
+
+ kvm_last = list_entry(vm_list.prev, struct kvm, vm_list);
+
+ for (;;) {
int npages, idx, freed_pages;
LIST_HEAD(invalid_list);
+ kvm = list_first_entry(&vm_list, struct kvm, vm_list);
idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
npages = kvm->arch.n_alloc_mmu_pages -
kvm->arch.n_free_mmu_pages;
- cache_count += npages;
- if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
+ if (kvm_last)
+ cache_count += npages;
+ if (nr_to_scan > 0 && npages > 0) {
freed_pages = kvm_mmu_remove_some_alloc_mmu_pages(kvm,
&invalid_list);
+ kvm_mmu_commit_zap_page(kvm, &invalid_list);
cache_count -= freed_pages;
kvm_freed = kvm;
- }
- nr_to_scan--;
+ nr_to_scan -= freed_pages;
+ } else if (kvm == kvm_freed)
+ nr_to_scan = 0; /* no more page to be freed, break */
- kvm_mmu_commit_zap_page(kvm, &invalid_list);
spin_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, idx);
- }
- if (kvm_freed)
list_move_tail(&kvm_freed->vm_list, &vm_list);
+ if (kvm == kvm_last) /* just scaned all vms */
+ kvm_last = NULL;
+ if (!kvm_last && (nr_to_scan <= 0 || !kvm_freed))
+ break;
+ }
+
+out:
spin_unlock(&kvm_lock);
- return cache_count;
+ return cache_count < 0 ? 0 : cache_count;
}
static struct shrinker mmu_shrinker = {