@@ -6693,16 +6693,57 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
}
}
-static unsigned long
-mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
-{
- return SHRINK_STOP;
+static unsigned long mmu_shrink_scan(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ struct kvm *kvm, *next_kvm, *first_kvm = NULL;
+ struct kvm_mmu_memory_cache *cache;
+ unsigned long i, freed = 0;
+ struct mutex *cache_lock;
+ struct kvm_vcpu *vcpu;
+
+ mutex_lock(&kvm_lock);
+ list_for_each_entry_safe(kvm, next_kvm, &vm_list, vm_list) {
+ if (first_kvm == kvm)
+ break;
+
+ if (!first_kvm)
+ first_kvm = kvm;
+
+ list_move_tail(&kvm->vm_list, &vm_list);
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ cache = &vcpu->arch.mmu_shadow_page_cache;
+ cache_lock = &vcpu->arch.mmu_shadow_page_cache_lock;
+ if (mutex_trylock(cache_lock)) {
+ if (cache->nobjs) {
+ freed += cache->nobjs;
+ kvm_mmu_empty_memory_cache(cache);
+ }
+ mutex_unlock(cache_lock);
+ if (freed >= sc->nr_to_scan)
+ goto out;
+ }
+ }
+ }
+out:
+ mutex_unlock(&kvm_lock);
+ if (freed) {
+ percpu_counter_sub(&kvm_total_unused_cached_pages, freed);
+ return freed;
+ } else {
+ return SHRINK_STOP;
+ }
}
-static unsigned long
-mmu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+static unsigned long mmu_shrink_count(struct shrinker *shrink,
+ struct shrink_control *sc)
{
- return SHRINK_EMPTY;
+ s64 count = percpu_counter_sum(&kvm_total_unused_cached_pages);
+
+ WARN_ON(count < 0);
+ return count <= 0 ? SHRINK_EMPTY : count;
+
}
static struct shrinker mmu_shrinker = {
@@ -1361,6 +1361,7 @@ void kvm_flush_remote_tlbs(struct kvm *kvm);
int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min);
int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min);
int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc);
+void kvm_mmu_empty_memory_cache(struct kvm_mmu_memory_cache *mc);
void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc);
void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
#endif
@@ -430,7 +430,7 @@ int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc)
return mc->nobjs;
}
-void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
+void kvm_mmu_empty_memory_cache(struct kvm_mmu_memory_cache *mc)
{
while (mc->nobjs) {
if (mc->kmem_cache)
@@ -438,7 +438,11 @@ void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
else
free_page((unsigned long)mc->objects[--mc->nobjs]);
}
+}
+void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
+{
+ kvm_mmu_empty_memory_cache(mc);
kvfree(mc->objects);
mc->objects = NULL;
Shrink shadow page caches via MMU shrinker based on kvm_total_unused_cached_pages. Traverse each vCPU of all of the VMs, empty the caches and exit the shrinker when sufficient number of pages have been freed. Also, move processed VMs to the end of vm_list so that next time other VMs are tortured first. Signed-off-by: Vipin Sharma <vipinsh@google.com> --- arch/x86/kvm/mmu/mmu.c | 55 +++++++++++++++++++++++++++++++++++----- include/linux/kvm_host.h | 1 + virt/kvm/kvm_main.c | 6 ++++- 3 files changed, 54 insertions(+), 8 deletions(-)