diff mbox series

[v3,6/9] KVM: Provide NUMA node support to kvm_mmu_memory_cache{}

Message ID 20221222023457.1764-7-vipinsh@google.com (mailing list archive)
State New, archived
Headers show
Series NUMA aware page table's pages allocation | expand

Commit Message

Vipin Sharma Dec. 22, 2022, 2:34 a.m. UTC
Add 'node' variable in kvm_mmu_memory_cache{} to denote which NUMA node
this cache should allocate memory from. Default initialize to
NUMA_NO_NODE in all architectures.

Signed-off-by: Vipin Sharma <vipinsh@google.com>
---
 arch/arm64/kvm/arm.c      |  2 +-
 arch/arm64/kvm/mmu.c      |  4 +++-
 arch/mips/kvm/mips.c      |  2 ++
 arch/riscv/kvm/mmu.c      |  2 +-
 arch/riscv/kvm/vcpu.c     |  2 +-
 arch/x86/kvm/mmu/mmu.c    | 22 ++++++++++++----------
 include/linux/kvm_host.h  |  6 ++++++
 include/linux/kvm_types.h |  2 ++
 8 files changed, 28 insertions(+), 14 deletions(-)

Comments

Ben Gardon Dec. 27, 2022, 7:09 p.m. UTC | #1
On Wed, Dec 21, 2022 at 6:35 PM Vipin Sharma <vipinsh@google.com> wrote:
>
> Add 'node' variable in kvm_mmu_memory_cache{} to denote which NUMA node
> this cache should allocate memory from. Default initialize to
> NUMA_NO_NODE in all architectures.
>
> Signed-off-by: Vipin Sharma <vipinsh@google.com>
> ---
>  arch/arm64/kvm/arm.c      |  2 +-
>  arch/arm64/kvm/mmu.c      |  4 +++-
>  arch/mips/kvm/mips.c      |  2 ++
>  arch/riscv/kvm/mmu.c      |  2 +-
>  arch/riscv/kvm/vcpu.c     |  2 +-
>  arch/x86/kvm/mmu/mmu.c    | 22 ++++++++++++----------
>  include/linux/kvm_host.h  |  6 ++++++
>  include/linux/kvm_types.h |  2 ++
>  8 files changed, 28 insertions(+), 14 deletions(-)
>
> diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> index 9c5573bc4614..52a41f4532e2 100644
> --- a/arch/arm64/kvm/arm.c
> +++ b/arch/arm64/kvm/arm.c
> @@ -340,7 +340,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
>         vcpu->arch.target = -1;
>         bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
>
> -       vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
> +       INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_page_cache, NULL, NUMA_NO_NODE);
>
>         /*
>          * Default value for the FP state, will be overloaded at load
> diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> index 31d7fa4c7c14..bd07155e17fa 100644
> --- a/arch/arm64/kvm/mmu.c
> +++ b/arch/arm64/kvm/mmu.c
> @@ -894,12 +894,14 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
>  {
>         phys_addr_t addr;
>         int ret = 0;
> -       struct kvm_mmu_memory_cache cache = { .gfp_zero = __GFP_ZERO };
> +       struct kvm_mmu_memory_cache cache;
>         struct kvm_pgtable *pgt = kvm->arch.mmu.pgt;
>         enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_DEVICE |
>                                      KVM_PGTABLE_PROT_R |
>                                      (writable ? KVM_PGTABLE_PROT_W : 0);
>
> +       INIT_KVM_MMU_MEMORY_CACHE(&cache, NULL, NUMA_NO_NODE);
> +
>         if (is_protected_kvm_enabled())
>                 return -EPERM;
>
> diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
> index a25e0b73ee70..b017c29a9340 100644
> --- a/arch/mips/kvm/mips.c
> +++ b/arch/mips/kvm/mips.c
> @@ -304,6 +304,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
>                      HRTIMER_MODE_REL);
>         vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
>
> +       vcpu->arch.mmu_page_cache.node = NUMA_NO_NODE;
> +

It looks weird to have MIPS not using the initialization MACRO. Should
it just have a GFP_ZERO parameter?

>         /*
>          * Allocate space for host mode exception handlers that handle
>          * guest mode exits
> diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c
> index 34b57e0be2ef..119de4520cc6 100644
> --- a/arch/riscv/kvm/mmu.c
> +++ b/arch/riscv/kvm/mmu.c
> @@ -353,9 +353,9 @@ int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
>         phys_addr_t addr, end;
>         struct kvm_mmu_memory_cache pcache = {
>                 .gfp_custom = (in_atomic) ? GFP_ATOMIC | __GFP_ACCOUNT : 0,
> -               .gfp_zero = __GFP_ZERO,
>         };
>
> +       INIT_KVM_MMU_MEMORY_CACHE(&pcache, NULL, NUMA_NO_NODE);
>         end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK;
>         pfn = __phys_to_pfn(hpa);
>
> diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
> index 7c08567097f0..189b14feb365 100644
> --- a/arch/riscv/kvm/vcpu.c
> +++ b/arch/riscv/kvm/vcpu.c
> @@ -161,7 +161,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
>
>         /* Mark this VCPU never ran */
>         vcpu->arch.ran_atleast_once = false;
> -       vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
> +       INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_page_cache, NULL, NUMA_NO_NODE);
>         bitmap_zero(vcpu->arch.isa, RISCV_ISA_EXT_MAX);
>
>         /* Setup ISA features available to VCPU */
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 6f6a10d7a871..23a3b82b2384 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -5954,13 +5954,14 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
>  {
>         int ret;
>
> -       vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache;
> -       vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO;
> +       INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_pte_list_desc_cache,
> +                                 pte_list_desc_cache, NUMA_NO_NODE);
>
> -       vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
> -       vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
> +       INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_page_header_cache,
> +                                 mmu_page_header_cache, NUMA_NO_NODE);
>
> -       vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
> +       INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_shadow_page_cache,
> +                                 NULL, NUMA_NO_NODE);
>         spin_lock_init(&vcpu->arch.mmu_shadow_page_cache_lock);
>
>         vcpu->arch.mmu = &vcpu->arch.root_mmu;
> @@ -6124,14 +6125,15 @@ int kvm_mmu_init_vm(struct kvm *kvm)
>         node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
>         kvm_page_track_register_notifier(kvm, node);
>
> -       kvm->arch.split_page_header_cache.kmem_cache = mmu_page_header_cache;
> -       kvm->arch.split_page_header_cache.gfp_zero = __GFP_ZERO;
> +       INIT_KVM_MMU_MEMORY_CACHE(&kvm->arch.split_page_header_cache,
> +                                 mmu_page_header_cache, NUMA_NO_NODE);
>
> -       kvm->arch.split_shadow_page_cache.gfp_zero = __GFP_ZERO;
> +       INIT_KVM_MMU_MEMORY_CACHE(&kvm->arch.split_shadow_page_cache,
> +                                 NULL, NUMA_NO_NODE);
>         spin_lock_init(&kvm->arch.split_shadow_page_cache_lock);
>
> -       kvm->arch.split_desc_cache.kmem_cache = pte_list_desc_cache;
> -       kvm->arch.split_desc_cache.gfp_zero = __GFP_ZERO;
> +       INIT_KVM_MMU_MEMORY_CACHE(&kvm->arch.split_desc_cache,
> +                                 pte_list_desc_cache, NUMA_NO_NODE);
>
>         return 0;
>  }
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index a262e15ebd19..719687a37ef7 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -2302,4 +2302,10 @@ static inline void kvm_account_pgtable_pages(void *virt, int nr)
>  /* Max number of entries allowed for each kvm dirty ring */
>  #define  KVM_DIRTY_RING_MAX_ENTRIES  65536
>
> +#define INIT_KVM_MMU_MEMORY_CACHE(_cache, _kmem_cache, _node) ({       \
> +       (_cache)->kmem_cache = _kmem_cache;                             \
> +       (_cache)->gfp_zero = __GFP_ZERO;                                \
> +       (_cache)->node = _node;                                         \
> +})
> +

Given that this initialization is probably not happening in a super
hot path, is there any downside to just using a function for the
initialization?

>  #endif
> diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
> index 76de36e56cdf..9c70ce95e51f 100644
> --- a/include/linux/kvm_types.h
> +++ b/include/linux/kvm_types.h
> @@ -97,6 +97,8 @@ struct kvm_mmu_memory_cache {
>         struct kmem_cache *kmem_cache;
>         int capacity;
>         void **objects;
> +       /* Node on which memory should be allocated by default */
> +       int node;
>  };
>  #endif
>
> --
> 2.39.0.314.g84b9a713c41-goog
>
Vipin Sharma Dec. 28, 2022, 10:07 p.m. UTC | #2
On Tue, Dec 27, 2022 at 11:10 AM Ben Gardon <bgardon@google.com> wrote:
>
> On Wed, Dec 21, 2022 at 6:35 PM Vipin Sharma <vipinsh@google.com> wrote:
> >
> > Add 'node' variable in kvm_mmu_memory_cache{} to denote which NUMA node
> > this cache should allocate memory from. Default initialize to
> > NUMA_NO_NODE in all architectures.
> >
> > Signed-off-by: Vipin Sharma <vipinsh@google.com>
> > ---
> >  arch/arm64/kvm/arm.c      |  2 +-
> >  arch/arm64/kvm/mmu.c      |  4 +++-
> >  arch/mips/kvm/mips.c      |  2 ++
> >  arch/riscv/kvm/mmu.c      |  2 +-
> >  arch/riscv/kvm/vcpu.c     |  2 +-
> >  arch/x86/kvm/mmu/mmu.c    | 22 ++++++++++++----------
> >  include/linux/kvm_host.h  |  6 ++++++
> >  include/linux/kvm_types.h |  2 ++
> >  8 files changed, 28 insertions(+), 14 deletions(-)
> >
> > diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> > index 9c5573bc4614..52a41f4532e2 100644
> > --- a/arch/arm64/kvm/arm.c
> > +++ b/arch/arm64/kvm/arm.c
> > @@ -340,7 +340,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
> >         vcpu->arch.target = -1;
> >         bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
> >
> > -       vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
> > +       INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_page_cache, NULL, NUMA_NO_NODE);
> >
> >         /*
> >          * Default value for the FP state, will be overloaded at load
> > diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> > index 31d7fa4c7c14..bd07155e17fa 100644
> > --- a/arch/arm64/kvm/mmu.c
> > +++ b/arch/arm64/kvm/mmu.c
> > @@ -894,12 +894,14 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
> >  {
> >         phys_addr_t addr;
> >         int ret = 0;
> > -       struct kvm_mmu_memory_cache cache = { .gfp_zero = __GFP_ZERO };
> > +       struct kvm_mmu_memory_cache cache;
> >         struct kvm_pgtable *pgt = kvm->arch.mmu.pgt;
> >         enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_DEVICE |
> >                                      KVM_PGTABLE_PROT_R |
> >                                      (writable ? KVM_PGTABLE_PROT_W : 0);
> >
> > +       INIT_KVM_MMU_MEMORY_CACHE(&cache, NULL, NUMA_NO_NODE);
> > +
> >         if (is_protected_kvm_enabled())
> >                 return -EPERM;
> >
> > diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
> > index a25e0b73ee70..b017c29a9340 100644
> > --- a/arch/mips/kvm/mips.c
> > +++ b/arch/mips/kvm/mips.c
> > @@ -304,6 +304,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
> >                      HRTIMER_MODE_REL);
> >         vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
> >
> > +       vcpu->arch.mmu_page_cache.node = NUMA_NO_NODE;
> > +
>
> It looks weird to have MIPS not using the initialization MACRO. Should
> it just have a GFP_ZERO parameter?

MIPS is not setting GFP_ZERO explicitly before my series, so, I didn't
make it GFP_ZERO. I am not sure if MIPS needs it or not, I tried to
keep the same functionality in my patch.

May be someone from MIPS can tell more about it.

>
> >         /*
> >          * Allocate space for host mode exception handlers that handle
> >          * guest mode exits
> > diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c
> > index 34b57e0be2ef..119de4520cc6 100644
> > --- a/arch/riscv/kvm/mmu.c
> > +++ b/arch/riscv/kvm/mmu.c
> > @@ -353,9 +353,9 @@ int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
> >         phys_addr_t addr, end;
> >         struct kvm_mmu_memory_cache pcache = {
> >                 .gfp_custom = (in_atomic) ? GFP_ATOMIC | __GFP_ACCOUNT : 0,
> > -               .gfp_zero = __GFP_ZERO,
> >         };
> >
> > +       INIT_KVM_MMU_MEMORY_CACHE(&pcache, NULL, NUMA_NO_NODE);
> >         end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK;
> >         pfn = __phys_to_pfn(hpa);
> >
> > diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
> > index 7c08567097f0..189b14feb365 100644
> > --- a/arch/riscv/kvm/vcpu.c
> > +++ b/arch/riscv/kvm/vcpu.c
> > @@ -161,7 +161,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
> >
> >         /* Mark this VCPU never ran */
> >         vcpu->arch.ran_atleast_once = false;
> > -       vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
> > +       INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_page_cache, NULL, NUMA_NO_NODE);
> >         bitmap_zero(vcpu->arch.isa, RISCV_ISA_EXT_MAX);
> >
> >         /* Setup ISA features available to VCPU */
> > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> > index 6f6a10d7a871..23a3b82b2384 100644
> > --- a/arch/x86/kvm/mmu/mmu.c
> > +++ b/arch/x86/kvm/mmu/mmu.c
> > @@ -5954,13 +5954,14 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
> >  {
> >         int ret;
> >
> > -       vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache;
> > -       vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO;
> > +       INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_pte_list_desc_cache,
> > +                                 pte_list_desc_cache, NUMA_NO_NODE);
> >
> > -       vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
> > -       vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
> > +       INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_page_header_cache,
> > +                                 mmu_page_header_cache, NUMA_NO_NODE);
> >
> > -       vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
> > +       INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_shadow_page_cache,
> > +                                 NULL, NUMA_NO_NODE);
> >         spin_lock_init(&vcpu->arch.mmu_shadow_page_cache_lock);
> >
> >         vcpu->arch.mmu = &vcpu->arch.root_mmu;
> > @@ -6124,14 +6125,15 @@ int kvm_mmu_init_vm(struct kvm *kvm)
> >         node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
> >         kvm_page_track_register_notifier(kvm, node);
> >
> > -       kvm->arch.split_page_header_cache.kmem_cache = mmu_page_header_cache;
> > -       kvm->arch.split_page_header_cache.gfp_zero = __GFP_ZERO;
> > +       INIT_KVM_MMU_MEMORY_CACHE(&kvm->arch.split_page_header_cache,
> > +                                 mmu_page_header_cache, NUMA_NO_NODE);
> >
> > -       kvm->arch.split_shadow_page_cache.gfp_zero = __GFP_ZERO;
> > +       INIT_KVM_MMU_MEMORY_CACHE(&kvm->arch.split_shadow_page_cache,
> > +                                 NULL, NUMA_NO_NODE);
> >         spin_lock_init(&kvm->arch.split_shadow_page_cache_lock);
> >
> > -       kvm->arch.split_desc_cache.kmem_cache = pte_list_desc_cache;
> > -       kvm->arch.split_desc_cache.gfp_zero = __GFP_ZERO;
> > +       INIT_KVM_MMU_MEMORY_CACHE(&kvm->arch.split_desc_cache,
> > +                                 pte_list_desc_cache, NUMA_NO_NODE);
> >
> >         return 0;
> >  }
> > diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> > index a262e15ebd19..719687a37ef7 100644
> > --- a/include/linux/kvm_host.h
> > +++ b/include/linux/kvm_host.h
> > @@ -2302,4 +2302,10 @@ static inline void kvm_account_pgtable_pages(void *virt, int nr)
> >  /* Max number of entries allowed for each kvm dirty ring */
> >  #define  KVM_DIRTY_RING_MAX_ENTRIES  65536
> >
> > +#define INIT_KVM_MMU_MEMORY_CACHE(_cache, _kmem_cache, _node) ({       \
> > +       (_cache)->kmem_cache = _kmem_cache;                             \
> > +       (_cache)->gfp_zero = __GFP_ZERO;                                \
> > +       (_cache)->node = _node;                                         \
> > +})
> > +
>
> Given that this initialization is probably not happening in a super
> hot path, is there any downside to just using a function for the
> initialization?
>

It can totally be a function as well. I will make it function in the
next version.


> >  #endif
> > diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
> > index 76de36e56cdf..9c70ce95e51f 100644
> > --- a/include/linux/kvm_types.h
> > +++ b/include/linux/kvm_types.h
> > @@ -97,6 +97,8 @@ struct kvm_mmu_memory_cache {
> >         struct kmem_cache *kmem_cache;
> >         int capacity;
> >         void **objects;
> > +       /* Node on which memory should be allocated by default */
> > +       int node;
> >  };
> >  #endif
> >
> > --
> > 2.39.0.314.g84b9a713c41-goog
> >
Ben Gardon Dec. 29, 2022, 6:22 p.m. UTC | #3
On Wed, Dec 28, 2022 at 2:08 PM Vipin Sharma <vipinsh@google.com> wrote:
>
> On Tue, Dec 27, 2022 at 11:10 AM Ben Gardon <bgardon@google.com> wrote:
> >
> > On Wed, Dec 21, 2022 at 6:35 PM Vipin Sharma <vipinsh@google.com> wrote:
> > >
> > > Add 'node' variable in kvm_mmu_memory_cache{} to denote which NUMA node
> > > this cache should allocate memory from. Default initialize to
> > > NUMA_NO_NODE in all architectures.
> > >
> > > Signed-off-by: Vipin Sharma <vipinsh@google.com>
> > > ---
> > >  arch/arm64/kvm/arm.c      |  2 +-
> > >  arch/arm64/kvm/mmu.c      |  4 +++-
> > >  arch/mips/kvm/mips.c      |  2 ++
> > >  arch/riscv/kvm/mmu.c      |  2 +-
> > >  arch/riscv/kvm/vcpu.c     |  2 +-
> > >  arch/x86/kvm/mmu/mmu.c    | 22 ++++++++++++----------
> > >  include/linux/kvm_host.h  |  6 ++++++
> > >  include/linux/kvm_types.h |  2 ++
> > >  8 files changed, 28 insertions(+), 14 deletions(-)
> > >
> > > diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> > > index 9c5573bc4614..52a41f4532e2 100644
> > > --- a/arch/arm64/kvm/arm.c
> > > +++ b/arch/arm64/kvm/arm.c
> > > @@ -340,7 +340,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
> > >         vcpu->arch.target = -1;
> > >         bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
> > >
> > > -       vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
> > > +       INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_page_cache, NULL, NUMA_NO_NODE);
> > >
> > >         /*
> > >          * Default value for the FP state, will be overloaded at load
> > > diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> > > index 31d7fa4c7c14..bd07155e17fa 100644
> > > --- a/arch/arm64/kvm/mmu.c
> > > +++ b/arch/arm64/kvm/mmu.c
> > > @@ -894,12 +894,14 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
> > >  {
> > >         phys_addr_t addr;
> > >         int ret = 0;
> > > -       struct kvm_mmu_memory_cache cache = { .gfp_zero = __GFP_ZERO };
> > > +       struct kvm_mmu_memory_cache cache;
> > >         struct kvm_pgtable *pgt = kvm->arch.mmu.pgt;
> > >         enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_DEVICE |
> > >                                      KVM_PGTABLE_PROT_R |
> > >                                      (writable ? KVM_PGTABLE_PROT_W : 0);
> > >
> > > +       INIT_KVM_MMU_MEMORY_CACHE(&cache, NULL, NUMA_NO_NODE);
> > > +
> > >         if (is_protected_kvm_enabled())
> > >                 return -EPERM;
> > >
> > > diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
> > > index a25e0b73ee70..b017c29a9340 100644
> > > --- a/arch/mips/kvm/mips.c
> > > +++ b/arch/mips/kvm/mips.c
> > > @@ -304,6 +304,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
> > >                      HRTIMER_MODE_REL);
> > >         vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
> > >
> > > +       vcpu->arch.mmu_page_cache.node = NUMA_NO_NODE;
> > > +
> >
> > It looks weird to have MIPS not using the initialization MACRO. Should
> > it just have a GFP_ZERO parameter?
>
> MIPS is not setting GFP_ZERO explicitly before my series, so, I didn't
> make it GFP_ZERO. I am not sure if MIPS needs it or not, I tried to
> keep the same functionality in my patch.
>
> May be someone from MIPS can tell more about it.

That makes sense, I just don't want to see MIPS get left behind
because we move the cache init logic to a macro or function. Folks
might update the init function but forget to update MIPS too.

>
> >
> > >         /*
> > >          * Allocate space for host mode exception handlers that handle
> > >          * guest mode exits
> > > diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c
> > > index 34b57e0be2ef..119de4520cc6 100644
> > > --- a/arch/riscv/kvm/mmu.c
> > > +++ b/arch/riscv/kvm/mmu.c
> > > @@ -353,9 +353,9 @@ int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
> > >         phys_addr_t addr, end;
> > >         struct kvm_mmu_memory_cache pcache = {
> > >                 .gfp_custom = (in_atomic) ? GFP_ATOMIC | __GFP_ACCOUNT : 0,
> > > -               .gfp_zero = __GFP_ZERO,
> > >         };
> > >
> > > +       INIT_KVM_MMU_MEMORY_CACHE(&pcache, NULL, NUMA_NO_NODE);
> > >         end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK;
> > >         pfn = __phys_to_pfn(hpa);
> > >
> > > diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
> > > index 7c08567097f0..189b14feb365 100644
> > > --- a/arch/riscv/kvm/vcpu.c
> > > +++ b/arch/riscv/kvm/vcpu.c
> > > @@ -161,7 +161,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
> > >
> > >         /* Mark this VCPU never ran */
> > >         vcpu->arch.ran_atleast_once = false;
> > > -       vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
> > > +       INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_page_cache, NULL, NUMA_NO_NODE);
> > >         bitmap_zero(vcpu->arch.isa, RISCV_ISA_EXT_MAX);
> > >
> > >         /* Setup ISA features available to VCPU */
> > > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> > > index 6f6a10d7a871..23a3b82b2384 100644
> > > --- a/arch/x86/kvm/mmu/mmu.c
> > > +++ b/arch/x86/kvm/mmu/mmu.c
> > > @@ -5954,13 +5954,14 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
> > >  {
> > >         int ret;
> > >
> > > -       vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache;
> > > -       vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO;
> > > +       INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_pte_list_desc_cache,
> > > +                                 pte_list_desc_cache, NUMA_NO_NODE);
> > >
> > > -       vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
> > > -       vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
> > > +       INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_page_header_cache,
> > > +                                 mmu_page_header_cache, NUMA_NO_NODE);
> > >
> > > -       vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
> > > +       INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_shadow_page_cache,
> > > +                                 NULL, NUMA_NO_NODE);
> > >         spin_lock_init(&vcpu->arch.mmu_shadow_page_cache_lock);
> > >
> > >         vcpu->arch.mmu = &vcpu->arch.root_mmu;
> > > @@ -6124,14 +6125,15 @@ int kvm_mmu_init_vm(struct kvm *kvm)
> > >         node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
> > >         kvm_page_track_register_notifier(kvm, node);
> > >
> > > -       kvm->arch.split_page_header_cache.kmem_cache = mmu_page_header_cache;
> > > -       kvm->arch.split_page_header_cache.gfp_zero = __GFP_ZERO;
> > > +       INIT_KVM_MMU_MEMORY_CACHE(&kvm->arch.split_page_header_cache,
> > > +                                 mmu_page_header_cache, NUMA_NO_NODE);
> > >
> > > -       kvm->arch.split_shadow_page_cache.gfp_zero = __GFP_ZERO;
> > > +       INIT_KVM_MMU_MEMORY_CACHE(&kvm->arch.split_shadow_page_cache,
> > > +                                 NULL, NUMA_NO_NODE);
> > >         spin_lock_init(&kvm->arch.split_shadow_page_cache_lock);
> > >
> > > -       kvm->arch.split_desc_cache.kmem_cache = pte_list_desc_cache;
> > > -       kvm->arch.split_desc_cache.gfp_zero = __GFP_ZERO;
> > > +       INIT_KVM_MMU_MEMORY_CACHE(&kvm->arch.split_desc_cache,
> > > +                                 pte_list_desc_cache, NUMA_NO_NODE);
> > >
> > >         return 0;
> > >  }
> > > diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> > > index a262e15ebd19..719687a37ef7 100644
> > > --- a/include/linux/kvm_host.h
> > > +++ b/include/linux/kvm_host.h
> > > @@ -2302,4 +2302,10 @@ static inline void kvm_account_pgtable_pages(void *virt, int nr)
> > >  /* Max number of entries allowed for each kvm dirty ring */
> > >  #define  KVM_DIRTY_RING_MAX_ENTRIES  65536
> > >
> > > +#define INIT_KVM_MMU_MEMORY_CACHE(_cache, _kmem_cache, _node) ({       \
> > > +       (_cache)->kmem_cache = _kmem_cache;                             \
> > > +       (_cache)->gfp_zero = __GFP_ZERO;                                \
> > > +       (_cache)->node = _node;                                         \
> > > +})
> > > +
> >
> > Given that this initialization is probably not happening in a super
> > hot path, is there any downside to just using a function for the
> > initialization?
> >
>
> It can totally be a function as well. I will make it function in the
> next version.

Awesome, thanks.

>
>
> > >  #endif
> > > diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
> > > index 76de36e56cdf..9c70ce95e51f 100644
> > > --- a/include/linux/kvm_types.h
> > > +++ b/include/linux/kvm_types.h
> > > @@ -97,6 +97,8 @@ struct kvm_mmu_memory_cache {
> > >         struct kmem_cache *kmem_cache;
> > >         int capacity;
> > >         void **objects;
> > > +       /* Node on which memory should be allocated by default */
> > > +       int node;
> > >  };
> > >  #endif
> > >
> > > --
> > > 2.39.0.314.g84b9a713c41-goog
> > >
David Matlack Dec. 29, 2022, 11:08 p.m. UTC | #4
On Wed, Dec 21, 2022 at 06:34:54PM -0800, Vipin Sharma wrote:
> Add 'node' variable in kvm_mmu_memory_cache{} to denote which NUMA node
> this cache should allocate memory from. Default initialize to
> NUMA_NO_NODE in all architectures.
> 
> Signed-off-by: Vipin Sharma <vipinsh@google.com>
> ---
>  arch/arm64/kvm/arm.c      |  2 +-
>  arch/arm64/kvm/mmu.c      |  4 +++-
>  arch/mips/kvm/mips.c      |  2 ++
>  arch/riscv/kvm/mmu.c      |  2 +-
>  arch/riscv/kvm/vcpu.c     |  2 +-
>  arch/x86/kvm/mmu/mmu.c    | 22 ++++++++++++----------
>  include/linux/kvm_host.h  |  6 ++++++
>  include/linux/kvm_types.h |  2 ++
>  8 files changed, 28 insertions(+), 14 deletions(-)
> 
> diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> index 9c5573bc4614..52a41f4532e2 100644
> --- a/arch/arm64/kvm/arm.c
> +++ b/arch/arm64/kvm/arm.c
> @@ -340,7 +340,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
>  	vcpu->arch.target = -1;
>  	bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
>  
> -	vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
> +	INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_page_cache, NULL, NUMA_NO_NODE);
>  
>  	/*
>  	 * Default value for the FP state, will be overloaded at load
> diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> index 31d7fa4c7c14..bd07155e17fa 100644
> --- a/arch/arm64/kvm/mmu.c
> +++ b/arch/arm64/kvm/mmu.c
> @@ -894,12 +894,14 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
>  {
>  	phys_addr_t addr;
>  	int ret = 0;
> -	struct kvm_mmu_memory_cache cache = { .gfp_zero = __GFP_ZERO };
> +	struct kvm_mmu_memory_cache cache;
>  	struct kvm_pgtable *pgt = kvm->arch.mmu.pgt;
>  	enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_DEVICE |
>  				     KVM_PGTABLE_PROT_R |
>  				     (writable ? KVM_PGTABLE_PROT_W : 0);
>  
> +	INIT_KVM_MMU_MEMORY_CACHE(&cache, NULL, NUMA_NO_NODE);

This is not any better than setting cache.node = NUMA_NO_NODE directly.
Yes it's less lines of code, but it's harder to read (what does NULL
mean here?), and every user of kvm_mmu_memory_cache still has to know to
pass NUMA_NO_NODE.

When I originally gave this suggestion, I intended to suggest that
INIT_KVM_MMU_MEMORY_CACHE() provide just default initialization.
Non-default initialization for gfp_zero, gfp_custom, kmem_cache, and
node would remain as they are.

Yes this adds some more lines, but keeps things readable, and doesn't
every initialization site of kvm_mmu_memory_cache to know what to pass
for gfp_zero, node, and kmem_cache. It only needs to set the fields
*it* cares about.

Here's what I mean specifically, based on INIT_LIST_HEAD. I don't think
I got all the kvm_mmu_memory_cache users, but you get the point.


diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 9c5573bc4614..0e138dcaf4d4 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -340,6 +340,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
 	vcpu->arch.target = -1;
 	bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
 
+	INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_page_cache);
 	vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
 
 	/*
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 31d7fa4c7c14..f5fd78a4f084 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -894,12 +894,14 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
 {
 	phys_addr_t addr;
 	int ret = 0;
-	struct kvm_mmu_memory_cache cache = { .gfp_zero = __GFP_ZERO };
+	KVM_MMU_MEMORY_CACHE(cache);
 	struct kvm_pgtable *pgt = kvm->arch.mmu.pgt;
 	enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_DEVICE |
 				     KVM_PGTABLE_PROT_R |
 				     (writable ? KVM_PGTABLE_PROT_W : 0);
 
+	cache.gfp_zero = __GFP_ZERO;
+
 	if (is_protected_kvm_enabled())
 		return -EPERM;
 
diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c
index 34b57e0be2ef..7915a5a2d104 100644
--- a/arch/riscv/kvm/mmu.c
+++ b/arch/riscv/kvm/mmu.c
@@ -351,10 +351,11 @@ int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
 	int ret = 0;
 	unsigned long pfn;
 	phys_addr_t addr, end;
-	struct kvm_mmu_memory_cache pcache = {
-		.gfp_custom = (in_atomic) ? GFP_ATOMIC | __GFP_ACCOUNT : 0,
-		.gfp_zero = __GFP_ZERO,
-	};
+	KVM_MMU_MEMORY_CACHE(pcache);
+
+	pcache.gfp_zero = __GFP_ZERO;
+	if (in_atomic)
+		pcache.gfp_custom = GFP_ATOMIC | __GFP_ACCOUNT;
 
 	end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK;
 	pfn = __phys_to_pfn(hpa);
diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
index 7c08567097f0..3d73ab3ec9a4 100644
--- a/arch/riscv/kvm/vcpu.c
+++ b/arch/riscv/kvm/vcpu.c
@@ -161,6 +161,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
 
 	/* Mark this VCPU never ran */
 	vcpu->arch.ran_atleast_once = false;
+	INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_page_header_cache);
 	vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
 	bitmap_zero(vcpu->arch.isa, RISCV_ISA_EXT_MAX);
 
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 254bc46234e0..d4cd8e64cc03 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -5909,14 +5909,19 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
 {
 	int ret;
 
+	INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_pte_list_desc_cache);
 	vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache;
 	vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO;
 
+	INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_page_header_cache);
 	vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
 	vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
 
+	INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_shadow_page_cache);
 	vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
 
+	INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_shadowed_info_cache);
+
 	vcpu->arch.mmu = &vcpu->arch.root_mmu;
 	vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
 
@@ -6083,11 +6088,14 @@ int kvm_mmu_init_vm(struct kvm *kvm)
 	node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
 	kvm_page_track_register_notifier(kvm, node);
 
+	INIT_KVM_MMU_MEMORY_CACHE(&kvm->arch.split_page_header_cache);
 	kvm->arch.split_page_header_cache.kmem_cache = mmu_page_header_cache;
 	kvm->arch.split_page_header_cache.gfp_zero = __GFP_ZERO;
 
+	INIT_KVM_MMU_MEMORY_CACHE(&kvm->arch.split_shadow_page_cache);
 	kvm->arch.split_shadow_page_cache.gfp_zero = __GFP_ZERO;
 
+	INIT_KVM_MMU_MEMORY_CACHE(&kvm->arch.split_desc_cache);
 	kvm->arch.split_desc_cache.kmem_cache = pte_list_desc_cache;
 	kvm->arch.split_desc_cache.gfp_zero = __GFP_ZERO;
 
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index 76de36e56cdf..eb7ff9afa5c7 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -98,6 +98,17 @@ struct kvm_mmu_memory_cache {
 	int capacity;
 	void **objects;
 };
+
+#define KVM_MMU_MEMORY_CACHE_INIT() (struct kvm_mmu_memory_cache) { \
+}
+
+#define KVM_MMU_MEMORY_CACHE(_name) \
+	struct kvm_mmu_memory_cache _name = KVM_MMU_MEMORY_CACHE_INIT()
+
+static inline void INIT_KVM_MMU_MEMORY_CACHE(struct kvm_mmu_memory_cache *cache)
+{
+	*cache = KVM_MMU_MEMORY_CACHE_INIT();
+}
 #endif
 
 #define HALT_POLL_HIST_COUNT			32

> +
>  	if (is_protected_kvm_enabled())
>  		return -EPERM;
>  
> diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
> index a25e0b73ee70..b017c29a9340 100644
> --- a/arch/mips/kvm/mips.c
> +++ b/arch/mips/kvm/mips.c
> @@ -304,6 +304,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
>  		     HRTIMER_MODE_REL);
>  	vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
>  
> +	vcpu->arch.mmu_page_cache.node = NUMA_NO_NODE;
> +
>  	/*
>  	 * Allocate space for host mode exception handlers that handle
>  	 * guest mode exits
> diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c
> index 34b57e0be2ef..119de4520cc6 100644
> --- a/arch/riscv/kvm/mmu.c
> +++ b/arch/riscv/kvm/mmu.c
> @@ -353,9 +353,9 @@ int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
>  	phys_addr_t addr, end;
>  	struct kvm_mmu_memory_cache pcache = {
>  		.gfp_custom = (in_atomic) ? GFP_ATOMIC | __GFP_ACCOUNT : 0,
> -		.gfp_zero = __GFP_ZERO,
>  	};
>  
> +	INIT_KVM_MMU_MEMORY_CACHE(&pcache, NULL, NUMA_NO_NODE);
>  	end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK;
>  	pfn = __phys_to_pfn(hpa);
>  
> diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
> index 7c08567097f0..189b14feb365 100644
> --- a/arch/riscv/kvm/vcpu.c
> +++ b/arch/riscv/kvm/vcpu.c
> @@ -161,7 +161,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
>  
>  	/* Mark this VCPU never ran */
>  	vcpu->arch.ran_atleast_once = false;
> -	vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
> +	INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_page_cache, NULL, NUMA_NO_NODE);
>  	bitmap_zero(vcpu->arch.isa, RISCV_ISA_EXT_MAX);
>  
>  	/* Setup ISA features available to VCPU */
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 6f6a10d7a871..23a3b82b2384 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -5954,13 +5954,14 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
>  {
>  	int ret;
>  
> -	vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache;
> -	vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO;
> +	INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_pte_list_desc_cache,
> +				  pte_list_desc_cache, NUMA_NO_NODE);
>  
> -	vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
> -	vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
> +	INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_page_header_cache,
> +				  mmu_page_header_cache, NUMA_NO_NODE);
>  
> -	vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
> +	INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_shadow_page_cache,
> +				  NULL, NUMA_NO_NODE);
>  	spin_lock_init(&vcpu->arch.mmu_shadow_page_cache_lock);
>  
>  	vcpu->arch.mmu = &vcpu->arch.root_mmu;
> @@ -6124,14 +6125,15 @@ int kvm_mmu_init_vm(struct kvm *kvm)
>  	node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
>  	kvm_page_track_register_notifier(kvm, node);
>  
> -	kvm->arch.split_page_header_cache.kmem_cache = mmu_page_header_cache;
> -	kvm->arch.split_page_header_cache.gfp_zero = __GFP_ZERO;
> +	INIT_KVM_MMU_MEMORY_CACHE(&kvm->arch.split_page_header_cache,
> +				  mmu_page_header_cache, NUMA_NO_NODE);
>  
> -	kvm->arch.split_shadow_page_cache.gfp_zero = __GFP_ZERO;
> +	INIT_KVM_MMU_MEMORY_CACHE(&kvm->arch.split_shadow_page_cache,
> +				  NULL, NUMA_NO_NODE);
>  	spin_lock_init(&kvm->arch.split_shadow_page_cache_lock);
>  
> -	kvm->arch.split_desc_cache.kmem_cache = pte_list_desc_cache;
> -	kvm->arch.split_desc_cache.gfp_zero = __GFP_ZERO;
> +	INIT_KVM_MMU_MEMORY_CACHE(&kvm->arch.split_desc_cache,
> +				  pte_list_desc_cache, NUMA_NO_NODE);
>  
>  	return 0;
>  }
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index a262e15ebd19..719687a37ef7 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -2302,4 +2302,10 @@ static inline void kvm_account_pgtable_pages(void *virt, int nr)
>  /* Max number of entries allowed for each kvm dirty ring */
>  #define  KVM_DIRTY_RING_MAX_ENTRIES  65536
>  
> +#define INIT_KVM_MMU_MEMORY_CACHE(_cache, _kmem_cache, _node) ({	\
> +	(_cache)->kmem_cache = _kmem_cache;				\
> +	(_cache)->gfp_zero = __GFP_ZERO;				\
> +	(_cache)->node = _node;						\
> +})
> +
>  #endif
> diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
> index 76de36e56cdf..9c70ce95e51f 100644
> --- a/include/linux/kvm_types.h
> +++ b/include/linux/kvm_types.h
> @@ -97,6 +97,8 @@ struct kvm_mmu_memory_cache {
>  	struct kmem_cache *kmem_cache;
>  	int capacity;
>  	void **objects;
> +	/* Node on which memory should be allocated by default */
> +	int node;
>  };
>  #endif
>  
> -- 
> 2.39.0.314.g84b9a713c41-goog
>
David Matlack Dec. 29, 2022, 11:11 p.m. UTC | #5
On Thu, Dec 29, 2022 at 3:08 PM David Matlack <dmatlack@google.com> wrote:
>
> On Wed, Dec 21, 2022 at 06:34:54PM -0800, Vipin Sharma wrote:
> > Add 'node' variable in kvm_mmu_memory_cache{} to denote which NUMA node
> > this cache should allocate memory from. Default initialize to
> > NUMA_NO_NODE in all architectures.
> >
> > Signed-off-by: Vipin Sharma <vipinsh@google.com>
> > ---
> >  arch/arm64/kvm/arm.c      |  2 +-
> >  arch/arm64/kvm/mmu.c      |  4 +++-
> >  arch/mips/kvm/mips.c      |  2 ++
> >  arch/riscv/kvm/mmu.c      |  2 +-
> >  arch/riscv/kvm/vcpu.c     |  2 +-
> >  arch/x86/kvm/mmu/mmu.c    | 22 ++++++++++++----------
> >  include/linux/kvm_host.h  |  6 ++++++
> >  include/linux/kvm_types.h |  2 ++
> >  8 files changed, 28 insertions(+), 14 deletions(-)
> >
> > diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> > index 9c5573bc4614..52a41f4532e2 100644
> > --- a/arch/arm64/kvm/arm.c
> > +++ b/arch/arm64/kvm/arm.c
> > @@ -340,7 +340,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
> >       vcpu->arch.target = -1;
> >       bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
> >
> > -     vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
> > +     INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_page_cache, NULL, NUMA_NO_NODE);
> >
> >       /*
> >        * Default value for the FP state, will be overloaded at load
> > diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> > index 31d7fa4c7c14..bd07155e17fa 100644
> > --- a/arch/arm64/kvm/mmu.c
> > +++ b/arch/arm64/kvm/mmu.c
> > @@ -894,12 +894,14 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
> >  {
> >       phys_addr_t addr;
> >       int ret = 0;
> > -     struct kvm_mmu_memory_cache cache = { .gfp_zero = __GFP_ZERO };
> > +     struct kvm_mmu_memory_cache cache;
> >       struct kvm_pgtable *pgt = kvm->arch.mmu.pgt;
> >       enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_DEVICE |
> >                                    KVM_PGTABLE_PROT_R |
> >                                    (writable ? KVM_PGTABLE_PROT_W : 0);
> >
> > +     INIT_KVM_MMU_MEMORY_CACHE(&cache, NULL, NUMA_NO_NODE);
>
> This is not any better than setting cache.node = NUMA_NO_NODE directly.
> Yes it's less lines of code, but it's harder to read (what does NULL
> mean here?), and every user of kvm_mmu_memory_cache still has to know to
> pass NUMA_NO_NODE.
>
> When I originally gave this suggestion, I intended to suggest that
> INIT_KVM_MMU_MEMORY_CACHE() provide just default initialization.
> Non-default initialization for gfp_zero, gfp_custom, kmem_cache, and
> node would remain as they are.
>
> Yes this adds some more lines, but keeps things readable, and doesn't
> every initialization site of kvm_mmu_memory_cache to know what to pass
> for gfp_zero, node, and kmem_cache. It only needs to set the fields
> *it* cares about.

And to offset the extra lines to call INIT_KVM_MMU_MEMORY_CACHE(), we
could finally invert the meaning of gfp_zero so that caches use
__GFP_ZERO by default. The majority of caches want __GFP_ZERO, so that
should cut down a bunch of lines.

>
> Here's what I mean specifically, based on INIT_LIST_HEAD. I don't think
> I got all the kvm_mmu_memory_cache users, but you get the point.
>
>
> diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> index 9c5573bc4614..0e138dcaf4d4 100644
> --- a/arch/arm64/kvm/arm.c
> +++ b/arch/arm64/kvm/arm.c
> @@ -340,6 +340,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
>         vcpu->arch.target = -1;
>         bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
>
> +       INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_page_cache);
>         vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
>
>         /*
> diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> index 31d7fa4c7c14..f5fd78a4f084 100644
> --- a/arch/arm64/kvm/mmu.c
> +++ b/arch/arm64/kvm/mmu.c
> @@ -894,12 +894,14 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
>  {
>         phys_addr_t addr;
>         int ret = 0;
> -       struct kvm_mmu_memory_cache cache = { .gfp_zero = __GFP_ZERO };
> +       KVM_MMU_MEMORY_CACHE(cache);
>         struct kvm_pgtable *pgt = kvm->arch.mmu.pgt;
>         enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_DEVICE |
>                                      KVM_PGTABLE_PROT_R |
>                                      (writable ? KVM_PGTABLE_PROT_W : 0);
>
> +       cache.gfp_zero = __GFP_ZERO;
> +
>         if (is_protected_kvm_enabled())
>                 return -EPERM;
>
> diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c
> index 34b57e0be2ef..7915a5a2d104 100644
> --- a/arch/riscv/kvm/mmu.c
> +++ b/arch/riscv/kvm/mmu.c
> @@ -351,10 +351,11 @@ int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
>         int ret = 0;
>         unsigned long pfn;
>         phys_addr_t addr, end;
> -       struct kvm_mmu_memory_cache pcache = {
> -               .gfp_custom = (in_atomic) ? GFP_ATOMIC | __GFP_ACCOUNT : 0,
> -               .gfp_zero = __GFP_ZERO,
> -       };
> +       KVM_MMU_MEMORY_CACHE(pcache);
> +
> +       pcache.gfp_zero = __GFP_ZERO;
> +       if (in_atomic)
> +               pcache.gfp_custom = GFP_ATOMIC | __GFP_ACCOUNT;
>
>         end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK;
>         pfn = __phys_to_pfn(hpa);
> diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
> index 7c08567097f0..3d73ab3ec9a4 100644
> --- a/arch/riscv/kvm/vcpu.c
> +++ b/arch/riscv/kvm/vcpu.c
> @@ -161,6 +161,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
>
>         /* Mark this VCPU never ran */
>         vcpu->arch.ran_atleast_once = false;
> +       INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_page_header_cache);
>         vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
>         bitmap_zero(vcpu->arch.isa, RISCV_ISA_EXT_MAX);
>
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 254bc46234e0..d4cd8e64cc03 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -5909,14 +5909,19 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
>  {
>         int ret;
>
> +       INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_pte_list_desc_cache);
>         vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache;
>         vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO;
>
> +       INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_page_header_cache);
>         vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
>         vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
>
> +       INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_shadow_page_cache);
>         vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
>
> +       INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_shadowed_info_cache);
> +
>         vcpu->arch.mmu = &vcpu->arch.root_mmu;
>         vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
>
> @@ -6083,11 +6088,14 @@ int kvm_mmu_init_vm(struct kvm *kvm)
>         node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
>         kvm_page_track_register_notifier(kvm, node);
>
> +       INIT_KVM_MMU_MEMORY_CACHE(&kvm->arch.split_page_header_cache);
>         kvm->arch.split_page_header_cache.kmem_cache = mmu_page_header_cache;
>         kvm->arch.split_page_header_cache.gfp_zero = __GFP_ZERO;
>
> +       INIT_KVM_MMU_MEMORY_CACHE(&kvm->arch.split_shadow_page_cache);
>         kvm->arch.split_shadow_page_cache.gfp_zero = __GFP_ZERO;
>
> +       INIT_KVM_MMU_MEMORY_CACHE(&kvm->arch.split_desc_cache);
>         kvm->arch.split_desc_cache.kmem_cache = pte_list_desc_cache;
>         kvm->arch.split_desc_cache.gfp_zero = __GFP_ZERO;
>
> diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
> index 76de36e56cdf..eb7ff9afa5c7 100644
> --- a/include/linux/kvm_types.h
> +++ b/include/linux/kvm_types.h
> @@ -98,6 +98,17 @@ struct kvm_mmu_memory_cache {
>         int capacity;
>         void **objects;
>  };
> +
> +#define KVM_MMU_MEMORY_CACHE_INIT() (struct kvm_mmu_memory_cache) { \
> +}
> +
> +#define KVM_MMU_MEMORY_CACHE(_name) \
> +       struct kvm_mmu_memory_cache _name = KVM_MMU_MEMORY_CACHE_INIT()
> +
> +static inline void INIT_KVM_MMU_MEMORY_CACHE(struct kvm_mmu_memory_cache *cache)
> +{
> +       *cache = KVM_MMU_MEMORY_CACHE_INIT();
> +}
>  #endif
>
>  #define HALT_POLL_HIST_COUNT                   32
>
> > +
> >       if (is_protected_kvm_enabled())
> >               return -EPERM;
> >
> > diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
> > index a25e0b73ee70..b017c29a9340 100644
> > --- a/arch/mips/kvm/mips.c
> > +++ b/arch/mips/kvm/mips.c
> > @@ -304,6 +304,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
> >                    HRTIMER_MODE_REL);
> >       vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
> >
> > +     vcpu->arch.mmu_page_cache.node = NUMA_NO_NODE;
> > +
> >       /*
> >        * Allocate space for host mode exception handlers that handle
> >        * guest mode exits
> > diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c
> > index 34b57e0be2ef..119de4520cc6 100644
> > --- a/arch/riscv/kvm/mmu.c
> > +++ b/arch/riscv/kvm/mmu.c
> > @@ -353,9 +353,9 @@ int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
> >       phys_addr_t addr, end;
> >       struct kvm_mmu_memory_cache pcache = {
> >               .gfp_custom = (in_atomic) ? GFP_ATOMIC | __GFP_ACCOUNT : 0,
> > -             .gfp_zero = __GFP_ZERO,
> >       };
> >
> > +     INIT_KVM_MMU_MEMORY_CACHE(&pcache, NULL, NUMA_NO_NODE);
> >       end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK;
> >       pfn = __phys_to_pfn(hpa);
> >
> > diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
> > index 7c08567097f0..189b14feb365 100644
> > --- a/arch/riscv/kvm/vcpu.c
> > +++ b/arch/riscv/kvm/vcpu.c
> > @@ -161,7 +161,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
> >
> >       /* Mark this VCPU never ran */
> >       vcpu->arch.ran_atleast_once = false;
> > -     vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
> > +     INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_page_cache, NULL, NUMA_NO_NODE);
> >       bitmap_zero(vcpu->arch.isa, RISCV_ISA_EXT_MAX);
> >
> >       /* Setup ISA features available to VCPU */
> > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> > index 6f6a10d7a871..23a3b82b2384 100644
> > --- a/arch/x86/kvm/mmu/mmu.c
> > +++ b/arch/x86/kvm/mmu/mmu.c
> > @@ -5954,13 +5954,14 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
> >  {
> >       int ret;
> >
> > -     vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache;
> > -     vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO;
> > +     INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_pte_list_desc_cache,
> > +                               pte_list_desc_cache, NUMA_NO_NODE);
> >
> > -     vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
> > -     vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
> > +     INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_page_header_cache,
> > +                               mmu_page_header_cache, NUMA_NO_NODE);
> >
> > -     vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
> > +     INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_shadow_page_cache,
> > +                               NULL, NUMA_NO_NODE);
> >       spin_lock_init(&vcpu->arch.mmu_shadow_page_cache_lock);
> >
> >       vcpu->arch.mmu = &vcpu->arch.root_mmu;
> > @@ -6124,14 +6125,15 @@ int kvm_mmu_init_vm(struct kvm *kvm)
> >       node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
> >       kvm_page_track_register_notifier(kvm, node);
> >
> > -     kvm->arch.split_page_header_cache.kmem_cache = mmu_page_header_cache;
> > -     kvm->arch.split_page_header_cache.gfp_zero = __GFP_ZERO;
> > +     INIT_KVM_MMU_MEMORY_CACHE(&kvm->arch.split_page_header_cache,
> > +                               mmu_page_header_cache, NUMA_NO_NODE);
> >
> > -     kvm->arch.split_shadow_page_cache.gfp_zero = __GFP_ZERO;
> > +     INIT_KVM_MMU_MEMORY_CACHE(&kvm->arch.split_shadow_page_cache,
> > +                               NULL, NUMA_NO_NODE);
> >       spin_lock_init(&kvm->arch.split_shadow_page_cache_lock);
> >
> > -     kvm->arch.split_desc_cache.kmem_cache = pte_list_desc_cache;
> > -     kvm->arch.split_desc_cache.gfp_zero = __GFP_ZERO;
> > +     INIT_KVM_MMU_MEMORY_CACHE(&kvm->arch.split_desc_cache,
> > +                               pte_list_desc_cache, NUMA_NO_NODE);
> >
> >       return 0;
> >  }
> > diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> > index a262e15ebd19..719687a37ef7 100644
> > --- a/include/linux/kvm_host.h
> > +++ b/include/linux/kvm_host.h
> > @@ -2302,4 +2302,10 @@ static inline void kvm_account_pgtable_pages(void *virt, int nr)
> >  /* Max number of entries allowed for each kvm dirty ring */
> >  #define  KVM_DIRTY_RING_MAX_ENTRIES  65536
> >
> > +#define INIT_KVM_MMU_MEMORY_CACHE(_cache, _kmem_cache, _node) ({     \
> > +     (_cache)->kmem_cache = _kmem_cache;                             \
> > +     (_cache)->gfp_zero = __GFP_ZERO;                                \
> > +     (_cache)->node = _node;                                         \
> > +})
> > +
> >  #endif
> > diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
> > index 76de36e56cdf..9c70ce95e51f 100644
> > --- a/include/linux/kvm_types.h
> > +++ b/include/linux/kvm_types.h
> > @@ -97,6 +97,8 @@ struct kvm_mmu_memory_cache {
> >       struct kmem_cache *kmem_cache;
> >       int capacity;
> >       void **objects;
> > +     /* Node on which memory should be allocated by default */
> > +     int node;
> >  };
> >  #endif
> >
> > --
> > 2.39.0.314.g84b9a713c41-goog
> >
Vipin Sharma Jan. 3, 2023, 5:36 p.m. UTC | #6
On Thu, Dec 29, 2022 at 10:22 AM Ben Gardon <bgardon@google.com> wrote:
>
> On Wed, Dec 28, 2022 at 2:08 PM Vipin Sharma <vipinsh@google.com> wrote:
> >
> > On Tue, Dec 27, 2022 at 11:10 AM Ben Gardon <bgardon@google.com> wrote:
> > >
> > > On Wed, Dec 21, 2022 at 6:35 PM Vipin Sharma <vipinsh@google.com> wrote:
> > > >
> > > > Add 'node' variable in kvm_mmu_memory_cache{} to denote which NUMA node
> > > > this cache should allocate memory from. Default initialize to
> > > > NUMA_NO_NODE in all architectures.
> > > >
> > > > Signed-off-by: Vipin Sharma <vipinsh@google.com>
> > > > ---
> > > >  arch/arm64/kvm/arm.c      |  2 +-
> > > >  arch/arm64/kvm/mmu.c      |  4 +++-
> > > >  arch/mips/kvm/mips.c      |  2 ++
> > > >  arch/riscv/kvm/mmu.c      |  2 +-
> > > >  arch/riscv/kvm/vcpu.c     |  2 +-
> > > >  arch/x86/kvm/mmu/mmu.c    | 22 ++++++++++++----------
> > > >  include/linux/kvm_host.h  |  6 ++++++
> > > >  include/linux/kvm_types.h |  2 ++
> > > >  8 files changed, 28 insertions(+), 14 deletions(-)
> > > >
> > > > diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> > > > index 9c5573bc4614..52a41f4532e2 100644
> > > > --- a/arch/arm64/kvm/arm.c
> > > > +++ b/arch/arm64/kvm/arm.c
> > > > @@ -340,7 +340,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
> > > >         vcpu->arch.target = -1;
> > > >         bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
> > > >
> > > > -       vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
> > > > +       INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_page_cache, NULL, NUMA_NO_NODE);
> > > >
> > > >         /*
> > > >          * Default value for the FP state, will be overloaded at load
> > > > diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> > > > index 31d7fa4c7c14..bd07155e17fa 100644
> > > > --- a/arch/arm64/kvm/mmu.c
> > > > +++ b/arch/arm64/kvm/mmu.c
> > > > @@ -894,12 +894,14 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
> > > >  {
> > > >         phys_addr_t addr;
> > > >         int ret = 0;
> > > > -       struct kvm_mmu_memory_cache cache = { .gfp_zero = __GFP_ZERO };
> > > > +       struct kvm_mmu_memory_cache cache;
> > > >         struct kvm_pgtable *pgt = kvm->arch.mmu.pgt;
> > > >         enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_DEVICE |
> > > >                                      KVM_PGTABLE_PROT_R |
> > > >                                      (writable ? KVM_PGTABLE_PROT_W : 0);
> > > >
> > > > +       INIT_KVM_MMU_MEMORY_CACHE(&cache, NULL, NUMA_NO_NODE);
> > > > +
> > > >         if (is_protected_kvm_enabled())
> > > >                 return -EPERM;
> > > >
> > > > diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
> > > > index a25e0b73ee70..b017c29a9340 100644
> > > > --- a/arch/mips/kvm/mips.c
> > > > +++ b/arch/mips/kvm/mips.c
> > > > @@ -304,6 +304,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
> > > >                      HRTIMER_MODE_REL);
> > > >         vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
> > > >
> > > > +       vcpu->arch.mmu_page_cache.node = NUMA_NO_NODE;
> > > > +
> > >
> > > It looks weird to have MIPS not using the initialization MACRO. Should
> > > it just have a GFP_ZERO parameter?
> >
> > MIPS is not setting GFP_ZERO explicitly before my series, so, I didn't
> > make it GFP_ZERO. I am not sure if MIPS needs it or not, I tried to
> > keep the same functionality in my patch.
> >
> > May be someone from MIPS can tell more about it.
>
> That makes sense, I just don't want to see MIPS get left behind
> because we move the cache init logic to a macro or function. Folks
> might update the init function but forget to update MIPS too.
>

Hi Huacai, Aleksandar,

I have noticed that MIPS doesn't use __GFP_ZERO flag for
mmu_page_cache in KVM. Is it intentional? I was wondering if it will
be useful if I add zero flag for cache in this patch for MIPS? All
other architectures seem to use __GFP_ZERO flag for their caches.

Thanks
Vipin
Vipin Sharma Jan. 3, 2023, 6:45 p.m. UTC | #7
On Thu, Dec 29, 2022 at 3:12 PM David Matlack <dmatlack@google.com> wrote:
>
> On Thu, Dec 29, 2022 at 3:08 PM David Matlack <dmatlack@google.com> wrote:
> >
> > On Wed, Dec 21, 2022 at 06:34:54PM -0800, Vipin Sharma wrote:
> > > Add 'node' variable in kvm_mmu_memory_cache{} to denote which NUMA node
> > > this cache should allocate memory from. Default initialize to
> > > NUMA_NO_NODE in all architectures.
> > >
> > > Signed-off-by: Vipin Sharma <vipinsh@google.com>
> > > ---
> > >  arch/arm64/kvm/arm.c      |  2 +-
> > >  arch/arm64/kvm/mmu.c      |  4 +++-
> > >  arch/mips/kvm/mips.c      |  2 ++
> > >  arch/riscv/kvm/mmu.c      |  2 +-
> > >  arch/riscv/kvm/vcpu.c     |  2 +-
> > >  arch/x86/kvm/mmu/mmu.c    | 22 ++++++++++++----------
> > >  include/linux/kvm_host.h  |  6 ++++++
> > >  include/linux/kvm_types.h |  2 ++
> > >  8 files changed, 28 insertions(+), 14 deletions(-)
> > >
> > > diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> > > index 9c5573bc4614..52a41f4532e2 100644
> > > --- a/arch/arm64/kvm/arm.c
> > > +++ b/arch/arm64/kvm/arm.c
> > > @@ -340,7 +340,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
> > >       vcpu->arch.target = -1;
> > >       bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
> > >
> > > -     vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
> > > +     INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_page_cache, NULL, NUMA_NO_NODE);
> > >
> > >       /*
> > >        * Default value for the FP state, will be overloaded at load
> > > diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> > > index 31d7fa4c7c14..bd07155e17fa 100644
> > > --- a/arch/arm64/kvm/mmu.c
> > > +++ b/arch/arm64/kvm/mmu.c
> > > @@ -894,12 +894,14 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
> > >  {
> > >       phys_addr_t addr;
> > >       int ret = 0;
> > > -     struct kvm_mmu_memory_cache cache = { .gfp_zero = __GFP_ZERO };
> > > +     struct kvm_mmu_memory_cache cache;
> > >       struct kvm_pgtable *pgt = kvm->arch.mmu.pgt;
> > >       enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_DEVICE |
> > >                                    KVM_PGTABLE_PROT_R |
> > >                                    (writable ? KVM_PGTABLE_PROT_W : 0);
> > >
> > > +     INIT_KVM_MMU_MEMORY_CACHE(&cache, NULL, NUMA_NO_NODE);
> >
> > This is not any better than setting cache.node = NUMA_NO_NODE directly.
> > Yes it's less lines of code, but it's harder to read (what does NULL
> > mean here?), and every user of kvm_mmu_memory_cache still has to know to
> > pass NUMA_NO_NODE.
> >
> > When I originally gave this suggestion, I intended to suggest that
> > INIT_KVM_MMU_MEMORY_CACHE() provide just default initialization.
> > Non-default initialization for gfp_zero, gfp_custom, kmem_cache, and
> > node would remain as they are.
> >
> > Yes this adds some more lines, but keeps things readable, and doesn't
> > every initialization site of kvm_mmu_memory_cache to know what to pass
> > for gfp_zero, node, and kmem_cache. It only needs to set the fields
> > *it* cares about.
>
> And to offset the extra lines to call INIT_KVM_MMU_MEMORY_CACHE(), we
> could finally invert the meaning of gfp_zero so that caches use
> __GFP_ZERO by default. The majority of caches want __GFP_ZERO, so that
> should cut down a bunch of lines.
>

Can you clarify what you mean by invert?

Caches which don't want __GFP_ZERO will explicitly set gfp_zero to 0.
Is this what you intend?


> >
> > Here's what I mean specifically, based on INIT_LIST_HEAD. I don't think
> > I got all the kvm_mmu_memory_cache users, but you get the point.
> >
> >
> > diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> > index 9c5573bc4614..0e138dcaf4d4 100644
> > --- a/arch/arm64/kvm/arm.c
> > +++ b/arch/arm64/kvm/arm.c
> > @@ -340,6 +340,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
> >         vcpu->arch.target = -1;
> >         bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
> >
> > +       INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_page_cache);
> >         vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
> >
> >         /*
> > diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> > index 31d7fa4c7c14..f5fd78a4f084 100644
> > --- a/arch/arm64/kvm/mmu.c
> > +++ b/arch/arm64/kvm/mmu.c
> > @@ -894,12 +894,14 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
> >  {
> >         phys_addr_t addr;
> >         int ret = 0;
> > -       struct kvm_mmu_memory_cache cache = { .gfp_zero = __GFP_ZERO };
> > +       KVM_MMU_MEMORY_CACHE(cache);
> >         struct kvm_pgtable *pgt = kvm->arch.mmu.pgt;
> >         enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_DEVICE |
> >                                      KVM_PGTABLE_PROT_R |
> >                                      (writable ? KVM_PGTABLE_PROT_W : 0);
> >
> > +       cache.gfp_zero = __GFP_ZERO;
> > +
> >         if (is_protected_kvm_enabled())
> >                 return -EPERM;
> >
> > diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c
> > index 34b57e0be2ef..7915a5a2d104 100644
> > --- a/arch/riscv/kvm/mmu.c
> > +++ b/arch/riscv/kvm/mmu.c
> > @@ -351,10 +351,11 @@ int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
> >         int ret = 0;
> >         unsigned long pfn;
> >         phys_addr_t addr, end;
> > -       struct kvm_mmu_memory_cache pcache = {
> > -               .gfp_custom = (in_atomic) ? GFP_ATOMIC | __GFP_ACCOUNT : 0,
> > -               .gfp_zero = __GFP_ZERO,
> > -       };
> > +       KVM_MMU_MEMORY_CACHE(pcache);
> > +
> > +       pcache.gfp_zero = __GFP_ZERO;
> > +       if (in_atomic)
> > +               pcache.gfp_custom = GFP_ATOMIC | __GFP_ACCOUNT;
> >
> >         end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK;
> >         pfn = __phys_to_pfn(hpa);
> > diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
> > index 7c08567097f0..3d73ab3ec9a4 100644
> > --- a/arch/riscv/kvm/vcpu.c
> > +++ b/arch/riscv/kvm/vcpu.c
> > @@ -161,6 +161,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
> >
> >         /* Mark this VCPU never ran */
> >         vcpu->arch.ran_atleast_once = false;
> > +       INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_page_header_cache);
> >         vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
> >         bitmap_zero(vcpu->arch.isa, RISCV_ISA_EXT_MAX);
> >
> > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> > index 254bc46234e0..d4cd8e64cc03 100644
> > --- a/arch/x86/kvm/mmu/mmu.c
> > +++ b/arch/x86/kvm/mmu/mmu.c
> > @@ -5909,14 +5909,19 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
> >  {
> >         int ret;
> >
> > +       INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_pte_list_desc_cache);
> >         vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache;
> >         vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO;
> >
> > +       INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_page_header_cache);
> >         vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
> >         vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
> >
> > +       INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_shadow_page_cache);
> >         vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
> >
> > +       INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_shadowed_info_cache);
> > +
> >         vcpu->arch.mmu = &vcpu->arch.root_mmu;
> >         vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
> >
> > @@ -6083,11 +6088,14 @@ int kvm_mmu_init_vm(struct kvm *kvm)
> >         node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
> >         kvm_page_track_register_notifier(kvm, node);
> >
> > +       INIT_KVM_MMU_MEMORY_CACHE(&kvm->arch.split_page_header_cache);
> >         kvm->arch.split_page_header_cache.kmem_cache = mmu_page_header_cache;
> >         kvm->arch.split_page_header_cache.gfp_zero = __GFP_ZERO;
> >
> > +       INIT_KVM_MMU_MEMORY_CACHE(&kvm->arch.split_shadow_page_cache);
> >         kvm->arch.split_shadow_page_cache.gfp_zero = __GFP_ZERO;
> >
> > +       INIT_KVM_MMU_MEMORY_CACHE(&kvm->arch.split_desc_cache);
> >         kvm->arch.split_desc_cache.kmem_cache = pte_list_desc_cache;
> >         kvm->arch.split_desc_cache.gfp_zero = __GFP_ZERO;
> >
> > diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
> > index 76de36e56cdf..eb7ff9afa5c7 100644
> > --- a/include/linux/kvm_types.h
> > +++ b/include/linux/kvm_types.h
> > @@ -98,6 +98,17 @@ struct kvm_mmu_memory_cache {
> >         int capacity;
> >         void **objects;
> >  };
> > +
> > +#define KVM_MMU_MEMORY_CACHE_INIT() (struct kvm_mmu_memory_cache) { \
> > +}
> > +
> > +#define KVM_MMU_MEMORY_CACHE(_name) \
> > +       struct kvm_mmu_memory_cache _name = KVM_MMU_MEMORY_CACHE_INIT()
> > +
> > +static inline void INIT_KVM_MMU_MEMORY_CACHE(struct kvm_mmu_memory_cache *cache)
> > +{
> > +       *cache = KVM_MMU_MEMORY_CACHE_INIT();
> > +}
> >  #endif
> >
> >  #define HALT_POLL_HIST_COUNT                   32
> >
> > > +
> > >       if (is_protected_kvm_enabled())
> > >               return -EPERM;
> > >
> > > diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
> > > index a25e0b73ee70..b017c29a9340 100644
> > > --- a/arch/mips/kvm/mips.c
> > > +++ b/arch/mips/kvm/mips.c
> > > @@ -304,6 +304,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
> > >                    HRTIMER_MODE_REL);
> > >       vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
> > >
> > > +     vcpu->arch.mmu_page_cache.node = NUMA_NO_NODE;
> > > +
> > >       /*
> > >        * Allocate space for host mode exception handlers that handle
> > >        * guest mode exits
> > > diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c
> > > index 34b57e0be2ef..119de4520cc6 100644
> > > --- a/arch/riscv/kvm/mmu.c
> > > +++ b/arch/riscv/kvm/mmu.c
> > > @@ -353,9 +353,9 @@ int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
> > >       phys_addr_t addr, end;
> > >       struct kvm_mmu_memory_cache pcache = {
> > >               .gfp_custom = (in_atomic) ? GFP_ATOMIC | __GFP_ACCOUNT : 0,
> > > -             .gfp_zero = __GFP_ZERO,
> > >       };
> > >
> > > +     INIT_KVM_MMU_MEMORY_CACHE(&pcache, NULL, NUMA_NO_NODE);
> > >       end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK;
> > >       pfn = __phys_to_pfn(hpa);
> > >
> > > diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
> > > index 7c08567097f0..189b14feb365 100644
> > > --- a/arch/riscv/kvm/vcpu.c
> > > +++ b/arch/riscv/kvm/vcpu.c
> > > @@ -161,7 +161,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
> > >
> > >       /* Mark this VCPU never ran */
> > >       vcpu->arch.ran_atleast_once = false;
> > > -     vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
> > > +     INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_page_cache, NULL, NUMA_NO_NODE);
> > >       bitmap_zero(vcpu->arch.isa, RISCV_ISA_EXT_MAX);
> > >
> > >       /* Setup ISA features available to VCPU */
> > > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> > > index 6f6a10d7a871..23a3b82b2384 100644
> > > --- a/arch/x86/kvm/mmu/mmu.c
> > > +++ b/arch/x86/kvm/mmu/mmu.c
> > > @@ -5954,13 +5954,14 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
> > >  {
> > >       int ret;
> > >
> > > -     vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache;
> > > -     vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO;
> > > +     INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_pte_list_desc_cache,
> > > +                               pte_list_desc_cache, NUMA_NO_NODE);
> > >
> > > -     vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
> > > -     vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
> > > +     INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_page_header_cache,
> > > +                               mmu_page_header_cache, NUMA_NO_NODE);
> > >
> > > -     vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
> > > +     INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_shadow_page_cache,
> > > +                               NULL, NUMA_NO_NODE);
> > >       spin_lock_init(&vcpu->arch.mmu_shadow_page_cache_lock);
> > >
> > >       vcpu->arch.mmu = &vcpu->arch.root_mmu;
> > > @@ -6124,14 +6125,15 @@ int kvm_mmu_init_vm(struct kvm *kvm)
> > >       node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
> > >       kvm_page_track_register_notifier(kvm, node);
> > >
> > > -     kvm->arch.split_page_header_cache.kmem_cache = mmu_page_header_cache;
> > > -     kvm->arch.split_page_header_cache.gfp_zero = __GFP_ZERO;
> > > +     INIT_KVM_MMU_MEMORY_CACHE(&kvm->arch.split_page_header_cache,
> > > +                               mmu_page_header_cache, NUMA_NO_NODE);
> > >
> > > -     kvm->arch.split_shadow_page_cache.gfp_zero = __GFP_ZERO;
> > > +     INIT_KVM_MMU_MEMORY_CACHE(&kvm->arch.split_shadow_page_cache,
> > > +                               NULL, NUMA_NO_NODE);
> > >       spin_lock_init(&kvm->arch.split_shadow_page_cache_lock);
> > >
> > > -     kvm->arch.split_desc_cache.kmem_cache = pte_list_desc_cache;
> > > -     kvm->arch.split_desc_cache.gfp_zero = __GFP_ZERO;
> > > +     INIT_KVM_MMU_MEMORY_CACHE(&kvm->arch.split_desc_cache,
> > > +                               pte_list_desc_cache, NUMA_NO_NODE);
> > >
> > >       return 0;
> > >  }
> > > diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> > > index a262e15ebd19..719687a37ef7 100644
> > > --- a/include/linux/kvm_host.h
> > > +++ b/include/linux/kvm_host.h
> > > @@ -2302,4 +2302,10 @@ static inline void kvm_account_pgtable_pages(void *virt, int nr)
> > >  /* Max number of entries allowed for each kvm dirty ring */
> > >  #define  KVM_DIRTY_RING_MAX_ENTRIES  65536
> > >
> > > +#define INIT_KVM_MMU_MEMORY_CACHE(_cache, _kmem_cache, _node) ({     \
> > > +     (_cache)->kmem_cache = _kmem_cache;                             \
> > > +     (_cache)->gfp_zero = __GFP_ZERO;                                \
> > > +     (_cache)->node = _node;                                         \
> > > +})
> > > +
> > >  #endif
> > > diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
> > > index 76de36e56cdf..9c70ce95e51f 100644
> > > --- a/include/linux/kvm_types.h
> > > +++ b/include/linux/kvm_types.h
> > > @@ -97,6 +97,8 @@ struct kvm_mmu_memory_cache {
> > >       struct kmem_cache *kmem_cache;
> > >       int capacity;
> > >       void **objects;
> > > +     /* Node on which memory should be allocated by default */
> > > +     int node;
> > >  };
> > >  #endif
> > >
> > > --
> > > 2.39.0.314.g84b9a713c41-goog
> > >
David Matlack Jan. 3, 2023, 6:55 p.m. UTC | #8
On Tue, Jan 3, 2023 at 10:46 AM Vipin Sharma <vipinsh@google.com> wrote:
>
> On Thu, Dec 29, 2022 at 3:12 PM David Matlack <dmatlack@google.com> wrote:
> >
> > On Thu, Dec 29, 2022 at 3:08 PM David Matlack <dmatlack@google.com> wrote:
> > >
> > > On Wed, Dec 21, 2022 at 06:34:54PM -0800, Vipin Sharma wrote:
> > > > Add 'node' variable in kvm_mmu_memory_cache{} to denote which NUMA node
> > > > this cache should allocate memory from. Default initialize to
> > > > NUMA_NO_NODE in all architectures.
> > > >
> > > > Signed-off-by: Vipin Sharma <vipinsh@google.com>
> > > > ---
> > > >  arch/arm64/kvm/arm.c      |  2 +-
> > > >  arch/arm64/kvm/mmu.c      |  4 +++-
> > > >  arch/mips/kvm/mips.c      |  2 ++
> > > >  arch/riscv/kvm/mmu.c      |  2 +-
> > > >  arch/riscv/kvm/vcpu.c     |  2 +-
> > > >  arch/x86/kvm/mmu/mmu.c    | 22 ++++++++++++----------
> > > >  include/linux/kvm_host.h  |  6 ++++++
> > > >  include/linux/kvm_types.h |  2 ++
> > > >  8 files changed, 28 insertions(+), 14 deletions(-)
> > > >
> > > > diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> > > > index 9c5573bc4614..52a41f4532e2 100644
> > > > --- a/arch/arm64/kvm/arm.c
> > > > +++ b/arch/arm64/kvm/arm.c
> > > > @@ -340,7 +340,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
> > > >       vcpu->arch.target = -1;
> > > >       bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
> > > >
> > > > -     vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
> > > > +     INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_page_cache, NULL, NUMA_NO_NODE);
> > > >
> > > >       /*
> > > >        * Default value for the FP state, will be overloaded at load
> > > > diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> > > > index 31d7fa4c7c14..bd07155e17fa 100644
> > > > --- a/arch/arm64/kvm/mmu.c
> > > > +++ b/arch/arm64/kvm/mmu.c
> > > > @@ -894,12 +894,14 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
> > > >  {
> > > >       phys_addr_t addr;
> > > >       int ret = 0;
> > > > -     struct kvm_mmu_memory_cache cache = { .gfp_zero = __GFP_ZERO };
> > > > +     struct kvm_mmu_memory_cache cache;
> > > >       struct kvm_pgtable *pgt = kvm->arch.mmu.pgt;
> > > >       enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_DEVICE |
> > > >                                    KVM_PGTABLE_PROT_R |
> > > >                                    (writable ? KVM_PGTABLE_PROT_W : 0);
> > > >
> > > > +     INIT_KVM_MMU_MEMORY_CACHE(&cache, NULL, NUMA_NO_NODE);
> > >
> > > This is not any better than setting cache.node = NUMA_NO_NODE directly.
> > > Yes it's less lines of code, but it's harder to read (what does NULL
> > > mean here?), and every user of kvm_mmu_memory_cache still has to know to
> > > pass NUMA_NO_NODE.
> > >
> > > When I originally gave this suggestion, I intended to suggest that
> > > INIT_KVM_MMU_MEMORY_CACHE() provide just default initialization.
> > > Non-default initialization for gfp_zero, gfp_custom, kmem_cache, and
> > > node would remain as they are.
> > >
> > > Yes this adds some more lines, but keeps things readable, and doesn't
> > > every initialization site of kvm_mmu_memory_cache to know what to pass
> > > for gfp_zero, node, and kmem_cache. It only needs to set the fields
> > > *it* cares about.
> >
> > And to offset the extra lines to call INIT_KVM_MMU_MEMORY_CACHE(), we
> > could finally invert the meaning of gfp_zero so that caches use
> > __GFP_ZERO by default. The majority of caches want __GFP_ZERO, so that
> > should cut down a bunch of lines.
> >
>
> Can you clarify what you mean by invert?
>
> Caches which don't want __GFP_ZERO will explicitly set gfp_zero to 0.
> Is this what you intend?

When I wrote that comment I was thinking you can change `gfp_t
gfp_zero` to e.g. `bool skip_gfp_zero` so that the default initialized
value (false/0) means "use __GFP_ZERO".

However, that's silly once we have INIT_KVM_MMU_MEMORY_CACHE(). We can
do what you suggest: set gfp_zero to __GFP_ZERO in
INIT_KVM_MMU_MEMORY_CACHE() and then explicitly set it to 0 in caches
that don't need __GFP_ZERO.
diff mbox series

Patch

diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 9c5573bc4614..52a41f4532e2 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -340,7 +340,7 @@  int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
 	vcpu->arch.target = -1;
 	bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
 
-	vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
+	INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_page_cache, NULL, NUMA_NO_NODE);
 
 	/*
 	 * Default value for the FP state, will be overloaded at load
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 31d7fa4c7c14..bd07155e17fa 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -894,12 +894,14 @@  int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
 {
 	phys_addr_t addr;
 	int ret = 0;
-	struct kvm_mmu_memory_cache cache = { .gfp_zero = __GFP_ZERO };
+	struct kvm_mmu_memory_cache cache;
 	struct kvm_pgtable *pgt = kvm->arch.mmu.pgt;
 	enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_DEVICE |
 				     KVM_PGTABLE_PROT_R |
 				     (writable ? KVM_PGTABLE_PROT_W : 0);
 
+	INIT_KVM_MMU_MEMORY_CACHE(&cache, NULL, NUMA_NO_NODE);
+
 	if (is_protected_kvm_enabled())
 		return -EPERM;
 
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index a25e0b73ee70..b017c29a9340 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -304,6 +304,8 @@  int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
 		     HRTIMER_MODE_REL);
 	vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
 
+	vcpu->arch.mmu_page_cache.node = NUMA_NO_NODE;
+
 	/*
 	 * Allocate space for host mode exception handlers that handle
 	 * guest mode exits
diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c
index 34b57e0be2ef..119de4520cc6 100644
--- a/arch/riscv/kvm/mmu.c
+++ b/arch/riscv/kvm/mmu.c
@@ -353,9 +353,9 @@  int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
 	phys_addr_t addr, end;
 	struct kvm_mmu_memory_cache pcache = {
 		.gfp_custom = (in_atomic) ? GFP_ATOMIC | __GFP_ACCOUNT : 0,
-		.gfp_zero = __GFP_ZERO,
 	};
 
+	INIT_KVM_MMU_MEMORY_CACHE(&pcache, NULL, NUMA_NO_NODE);
 	end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK;
 	pfn = __phys_to_pfn(hpa);
 
diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
index 7c08567097f0..189b14feb365 100644
--- a/arch/riscv/kvm/vcpu.c
+++ b/arch/riscv/kvm/vcpu.c
@@ -161,7 +161,7 @@  int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
 
 	/* Mark this VCPU never ran */
 	vcpu->arch.ran_atleast_once = false;
-	vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
+	INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_page_cache, NULL, NUMA_NO_NODE);
 	bitmap_zero(vcpu->arch.isa, RISCV_ISA_EXT_MAX);
 
 	/* Setup ISA features available to VCPU */
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 6f6a10d7a871..23a3b82b2384 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -5954,13 +5954,14 @@  int kvm_mmu_create(struct kvm_vcpu *vcpu)
 {
 	int ret;
 
-	vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache;
-	vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO;
+	INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_pte_list_desc_cache,
+				  pte_list_desc_cache, NUMA_NO_NODE);
 
-	vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
-	vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
+	INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_page_header_cache,
+				  mmu_page_header_cache, NUMA_NO_NODE);
 
-	vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
+	INIT_KVM_MMU_MEMORY_CACHE(&vcpu->arch.mmu_shadow_page_cache,
+				  NULL, NUMA_NO_NODE);
 	spin_lock_init(&vcpu->arch.mmu_shadow_page_cache_lock);
 
 	vcpu->arch.mmu = &vcpu->arch.root_mmu;
@@ -6124,14 +6125,15 @@  int kvm_mmu_init_vm(struct kvm *kvm)
 	node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
 	kvm_page_track_register_notifier(kvm, node);
 
-	kvm->arch.split_page_header_cache.kmem_cache = mmu_page_header_cache;
-	kvm->arch.split_page_header_cache.gfp_zero = __GFP_ZERO;
+	INIT_KVM_MMU_MEMORY_CACHE(&kvm->arch.split_page_header_cache,
+				  mmu_page_header_cache, NUMA_NO_NODE);
 
-	kvm->arch.split_shadow_page_cache.gfp_zero = __GFP_ZERO;
+	INIT_KVM_MMU_MEMORY_CACHE(&kvm->arch.split_shadow_page_cache,
+				  NULL, NUMA_NO_NODE);
 	spin_lock_init(&kvm->arch.split_shadow_page_cache_lock);
 
-	kvm->arch.split_desc_cache.kmem_cache = pte_list_desc_cache;
-	kvm->arch.split_desc_cache.gfp_zero = __GFP_ZERO;
+	INIT_KVM_MMU_MEMORY_CACHE(&kvm->arch.split_desc_cache,
+				  pte_list_desc_cache, NUMA_NO_NODE);
 
 	return 0;
 }
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index a262e15ebd19..719687a37ef7 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -2302,4 +2302,10 @@  static inline void kvm_account_pgtable_pages(void *virt, int nr)
 /* Max number of entries allowed for each kvm dirty ring */
 #define  KVM_DIRTY_RING_MAX_ENTRIES  65536
 
+#define INIT_KVM_MMU_MEMORY_CACHE(_cache, _kmem_cache, _node) ({	\
+	(_cache)->kmem_cache = _kmem_cache;				\
+	(_cache)->gfp_zero = __GFP_ZERO;				\
+	(_cache)->node = _node;						\
+})
+
 #endif
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index 76de36e56cdf..9c70ce95e51f 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -97,6 +97,8 @@  struct kvm_mmu_memory_cache {
 	struct kmem_cache *kmem_cache;
 	int capacity;
 	void **objects;
+	/* Node on which memory should be allocated by default */
+	int node;
 };
 #endif