Message ID | 20200605213853.14959-20-sean.j.christopherson@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM: Cleanup and unify kvm_mmu_memory_cache usage | expand |
Reviewed-by: Huacai Chen <chenhc@lemote.com> On Sat, Jun 6, 2020 at 5:44 AM Sean Christopherson <sean.j.christopherson@intel.com> wrote: > > Replace the @max param in mmu_topup_memory_cache() and instead use > ARRAY_SIZE() to terminate the loop to fill the cache. This removes a > BUG_ON() and sets the stage for moving MIPS to the common memory cache > implementation. > > No functional change intended. > > Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> > --- > arch/mips/kvm/mmu.c | 12 ++++-------- > 1 file changed, 4 insertions(+), 8 deletions(-) > > diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c > index 7dad7a293eae..94562c54b930 100644 > --- a/arch/mips/kvm/mmu.c > +++ b/arch/mips/kvm/mmu.c > @@ -25,15 +25,13 @@ > #define KVM_MMU_CACHE_MIN_PAGES 2 > #endif > > -static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, > - int min, int max) > +static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, int min) > { > void *page; > > - BUG_ON(max > KVM_NR_MEM_OBJS); > if (cache->nobjs >= min) > return 0; > - while (cache->nobjs < max) { > + while (cache->nobjs < ARRAY_SIZE(cache->objects)) { > page = (void *)__get_free_page(GFP_KERNEL); > if (!page) > return -ENOMEM; > @@ -711,8 +709,7 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, > goto out; > > /* We need a minimum of cached pages ready for page table creation */ > - err = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES, > - KVM_NR_MEM_OBJS); > + err = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES); > if (err) > goto out; > > @@ -796,8 +793,7 @@ static pte_t *kvm_trap_emul_pte_for_gva(struct kvm_vcpu *vcpu, > int ret; > > /* We need a minimum of cached pages ready for page table creation */ > - ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES, > - KVM_NR_MEM_OBJS); > + ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES); > if (ret) > return NULL; > > -- > 2.26.0 >
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c index 7dad7a293eae..94562c54b930 100644 --- a/arch/mips/kvm/mmu.c +++ b/arch/mips/kvm/mmu.c @@ -25,15 +25,13 @@ #define KVM_MMU_CACHE_MIN_PAGES 2 #endif -static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, - int min, int max) +static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, int min) { void *page; - BUG_ON(max > KVM_NR_MEM_OBJS); if (cache->nobjs >= min) return 0; - while (cache->nobjs < max) { + while (cache->nobjs < ARRAY_SIZE(cache->objects)) { page = (void *)__get_free_page(GFP_KERNEL); if (!page) return -ENOMEM; @@ -711,8 +709,7 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, goto out; /* We need a minimum of cached pages ready for page table creation */ - err = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES, - KVM_NR_MEM_OBJS); + err = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES); if (err) goto out; @@ -796,8 +793,7 @@ static pte_t *kvm_trap_emul_pte_for_gva(struct kvm_vcpu *vcpu, int ret; /* We need a minimum of cached pages ready for page table creation */ - ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES, - KVM_NR_MEM_OBJS); + ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES); if (ret) return NULL;
Replace the @max param in mmu_topup_memory_cache() and instead use ARRAY_SIZE() to terminate the loop to fill the cache. This removes a BUG_ON() and sets the stage for moving MIPS to the common memory cache implementation. No functional change intended. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> --- arch/mips/kvm/mmu.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-)