Message ID | 20200605213853.14959-2-sean.j.christopherson@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM: Cleanup and unify kvm_mmu_memory_cache usage | expand |
On Fri, Jun 5, 2020 at 2:39 PM Sean Christopherson <sean.j.christopherson@intel.com> wrote: > > Track the kmem_cache used for non-page KVM MMU memory caches instead of > passing in the associated kmem_cache when filling the cache. This will > allow consolidating code and other cleanups. > > No functional change intended. > > Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Reviewed-by: Ben Gardon <bgardon@google.com> > --- > arch/x86/include/asm/kvm_host.h | 1 + > arch/x86/kvm/mmu/mmu.c | 24 +++++++++++------------- > 2 files changed, 12 insertions(+), 13 deletions(-) > > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h > index 1da5858501ca..16347b050754 100644 > --- a/arch/x86/include/asm/kvm_host.h > +++ b/arch/x86/include/asm/kvm_host.h > @@ -251,6 +251,7 @@ struct kvm_kernel_irq_routing_entry; > */ > struct kvm_mmu_memory_cache { > int nobjs; > + struct kmem_cache *kmem_cache; > void *objects[KVM_NR_MEM_OBJS]; > }; > > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c > index fdd05c233308..0830c195c9ed 100644 > --- a/arch/x86/kvm/mmu/mmu.c > +++ b/arch/x86/kvm/mmu/mmu.c > @@ -1060,15 +1060,14 @@ static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu) > local_irq_enable(); > } > > -static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, > - struct kmem_cache *base_cache, int min) > +static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, int min) > { > void *obj; > > if (cache->nobjs >= min) > return 0; > while (cache->nobjs < ARRAY_SIZE(cache->objects)) { > - obj = kmem_cache_zalloc(base_cache, GFP_KERNEL_ACCOUNT); > + obj = kmem_cache_zalloc(cache->kmem_cache, GFP_KERNEL_ACCOUNT); > if (!obj) > return cache->nobjs >= min ? 0 : -ENOMEM; > cache->objects[cache->nobjs++] = obj; > @@ -1081,11 +1080,10 @@ static int mmu_memory_cache_free_objects(struct kvm_mmu_memory_cache *cache) > return cache->nobjs; > } > > -static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc, > - struct kmem_cache *cache) > +static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) > { > while (mc->nobjs) > - kmem_cache_free(cache, mc->objects[--mc->nobjs]); > + kmem_cache_free(mc->kmem_cache, mc->objects[--mc->nobjs]); > } > > static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache, > @@ -1115,25 +1113,22 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu) > int r; > > r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, > - pte_list_desc_cache, 8 + PTE_PREFETCH_NUM); > + 8 + PTE_PREFETCH_NUM); > if (r) > goto out; > r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8); > if (r) > goto out; > - r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache, > - mmu_page_header_cache, 4); > + r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache, 4); > out: > return r; > } > > static void mmu_free_memory_caches(struct kvm_vcpu *vcpu) > { > - mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, > - pte_list_desc_cache); > + mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache); > mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache); > - mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache, > - mmu_page_header_cache); > + mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache); > } > > static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) > @@ -5684,6 +5679,9 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu) > uint i; > int ret; > > + vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache; > + vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache; > + > vcpu->arch.mmu = &vcpu->arch.root_mmu; > vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; > > -- > 2.26.0 >
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 1da5858501ca..16347b050754 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -251,6 +251,7 @@ struct kvm_kernel_irq_routing_entry; */ struct kvm_mmu_memory_cache { int nobjs; + struct kmem_cache *kmem_cache; void *objects[KVM_NR_MEM_OBJS]; }; diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index fdd05c233308..0830c195c9ed 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1060,15 +1060,14 @@ static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu) local_irq_enable(); } -static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, - struct kmem_cache *base_cache, int min) +static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, int min) { void *obj; if (cache->nobjs >= min) return 0; while (cache->nobjs < ARRAY_SIZE(cache->objects)) { - obj = kmem_cache_zalloc(base_cache, GFP_KERNEL_ACCOUNT); + obj = kmem_cache_zalloc(cache->kmem_cache, GFP_KERNEL_ACCOUNT); if (!obj) return cache->nobjs >= min ? 0 : -ENOMEM; cache->objects[cache->nobjs++] = obj; @@ -1081,11 +1080,10 @@ static int mmu_memory_cache_free_objects(struct kvm_mmu_memory_cache *cache) return cache->nobjs; } -static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc, - struct kmem_cache *cache) +static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) { while (mc->nobjs) - kmem_cache_free(cache, mc->objects[--mc->nobjs]); + kmem_cache_free(mc->kmem_cache, mc->objects[--mc->nobjs]); } static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache, @@ -1115,25 +1113,22 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu) int r; r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, - pte_list_desc_cache, 8 + PTE_PREFETCH_NUM); + 8 + PTE_PREFETCH_NUM); if (r) goto out; r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8); if (r) goto out; - r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache, - mmu_page_header_cache, 4); + r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache, 4); out: return r; } static void mmu_free_memory_caches(struct kvm_vcpu *vcpu) { - mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, - pte_list_desc_cache); + mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache); mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache); - mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache, - mmu_page_header_cache); + mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache); } static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) @@ -5684,6 +5679,9 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu) uint i; int ret; + vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache; + vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache; + vcpu->arch.mmu = &vcpu->arch.root_mmu; vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
Track the kmem_cache used for non-page KVM MMU memory caches instead of passing in the associated kmem_cache when filling the cache. This will allow consolidating code and other cleanups. No functional change intended. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/mmu/mmu.c | 24 +++++++++++------------- 2 files changed, 12 insertions(+), 13 deletions(-)