@@ -382,7 +382,7 @@ struct kvm_arch {
unsigned int n_free_mmu_pages;
unsigned int n_requested_mmu_pages;
- unsigned int n_alloc_mmu_pages;
+ unsigned int n_max_mmu_pages;
atomic_t invlpg_counter;
struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
/*
@@ -1522,7 +1522,7 @@ void kvm_mmu_change_mmu_pages(struct kvm
{
int used_pages;
- used_pages = kvm->arch.n_alloc_mmu_pages - kvm_mmu_available_pages(kvm);
+ used_pages = kvm->arch.n_max_mmu_pages - kvm_mmu_available_pages(kvm);
used_pages = max(0, used_pages);
/*
@@ -1546,9 +1546,9 @@ void kvm_mmu_change_mmu_pages(struct kvm
}
else
kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
- - kvm->arch.n_alloc_mmu_pages;
+ - kvm->arch.n_max_mmu_pages;
- kvm->arch.n_alloc_mmu_pages = kvm_nr_mmu_pages;
+ kvm->arch.n_max_mmu_pages = kvm_nr_mmu_pages;
}
static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
@@ -2932,7 +2932,7 @@ static int mmu_shrink(int nr_to_scan, gf
idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
- npages = kvm->arch.n_alloc_mmu_pages -
+ npages = kvm->arch.n_max_mmu_pages -
kvm_mmu_available_pages(kvm);
cache_count += npages;
if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
@@ -2557,7 +2557,7 @@ static int kvm_vm_ioctl_set_nr_mmu_pages
static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
{
- return kvm->arch.n_alloc_mmu_pages;
+ return kvm->arch.n_max_mmu_pages;
}
gfn_t unalias_gfn_instantiation(struct kvm *kvm, gfn_t gfn)