diff mbox series

[v3,9/9] KVM: x86/mmu: Reduce default cache size in KVM from 40 to PT64_ROOT_MAX_LEVEL

Message ID 20221222023457.1764-10-vipinsh@google.com (mailing list archive)
State New, archived
Headers show
Series NUMA aware page table's pages allocation | expand

Commit Message

Vipin Sharma Dec. 22, 2022, 2:34 a.m. UTC
KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE is set to 40 without any specific
reason. Reduce default size to PT64_ROOT_MAX_LEVEL, which is currently
5.

Change mmu_pte_list_desc_cache size to what is needed as it is more than
5 but way less than 40.

Tested by running dirty_log_perf_test on both tdp and shadow MMU with 48
vcpu and 2GB/vcpu size on a 2 NUMA node machine. No impact on
performance noticed.

Ran perf on dirty_log_perf_test and found kvm_mmu_get_free_page() calls
reduced by ~3300 which is near to 48 (vcpus) * 2 (nodes) * 35 (cache
size).

Signed-off-by: Vipin Sharma <vipinsh@google.com>
---
 arch/x86/include/asm/kvm_types.h | 2 +-
 arch/x86/kvm/mmu/mmu.c           | 7 ++++---
 2 files changed, 5 insertions(+), 4 deletions(-)

Comments

Ben Gardon Dec. 27, 2022, 7:52 p.m. UTC | #1
On Wed, Dec 21, 2022 at 6:35 PM Vipin Sharma <vipinsh@google.com> wrote:
>
> KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE is set to 40 without any specific
> reason. Reduce default size to PT64_ROOT_MAX_LEVEL, which is currently
> 5.
>
> Change mmu_pte_list_desc_cache size to what is needed as it is more than
> 5 but way less than 40.

Why do you say more than 5? At least to resolve a page fault we'll
never need more than 4 pages on a system with 5 level paging since the
root is already allocated.

>
> Tested by running dirty_log_perf_test on both tdp and shadow MMU with 48
> vcpu and 2GB/vcpu size on a 2 NUMA node machine. No impact on
> performance noticed.
>
> Ran perf on dirty_log_perf_test and found kvm_mmu_get_free_page() calls
> reduced by ~3300 which is near to 48 (vcpus) * 2 (nodes) * 35 (cache
> size).
>
> Signed-off-by: Vipin Sharma <vipinsh@google.com>
> ---
>  arch/x86/include/asm/kvm_types.h | 2 +-
>  arch/x86/kvm/mmu/mmu.c           | 7 ++++---
>  2 files changed, 5 insertions(+), 4 deletions(-)
>
> diff --git a/arch/x86/include/asm/kvm_types.h b/arch/x86/include/asm/kvm_types.h
> index 08f1b57d3b62..752dab218a62 100644
> --- a/arch/x86/include/asm/kvm_types.h
> +++ b/arch/x86/include/asm/kvm_types.h
> @@ -2,6 +2,6 @@
>  #ifndef _ASM_X86_KVM_TYPES_H
>  #define _ASM_X86_KVM_TYPES_H
>
> -#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 40
> +#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE PT64_ROOT_MAX_LEVEL

Please add a comment explaining why this value was chosen.

>
>  #endif /* _ASM_X86_KVM_TYPES_H */
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 7454bfc49a51..f89d933ff380 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -677,11 +677,12 @@ static int mmu_topup_sp_memory_cache(struct kvm_mmu_memory_cache *cache,
>
>  static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
>  {
> -       int r, nid;
> +       int r, nid, desc_capacity;
>
>         /* 1 rmap, 1 parent PTE per level, and the prefetched rmaps. */
> -       r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
> -                                      1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM);
> +       desc_capacity = 1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM;
> +       r = __kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
> +                                        desc_capacity, desc_capacity);
>         if (r)
>                 return r;
>
> --
> 2.39.0.314.g84b9a713c41-goog
>
Vipin Sharma Dec. 28, 2022, 10:08 p.m. UTC | #2
On Tue, Dec 27, 2022 at 11:52 AM Ben Gardon <bgardon@google.com> wrote:
>
> On Wed, Dec 21, 2022 at 6:35 PM Vipin Sharma <vipinsh@google.com> wrote:
> >
> > KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE is set to 40 without any specific
> > reason. Reduce default size to PT64_ROOT_MAX_LEVEL, which is currently
> > 5.
> >
> > Change mmu_pte_list_desc_cache size to what is needed as it is more than
> > 5 but way less than 40.
>
> Why do you say more than 5? At least to resolve a page fault we'll
> never need more than 4 pages on a system with 5 level paging since the
> root is already allocated.

Because of the comment in code:
> >         /* 1 rmap, 1 parent PTE per level, and the prefetched rmaps. */
> > -       r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
> > -                                      1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM);

>
> >
> > Tested by running dirty_log_perf_test on both tdp and shadow MMU with 48
> > vcpu and 2GB/vcpu size on a 2 NUMA node machine. No impact on
> > performance noticed.
> >
> > Ran perf on dirty_log_perf_test and found kvm_mmu_get_free_page() calls
> > reduced by ~3300 which is near to 48 (vcpus) * 2 (nodes) * 35 (cache
> > size).
> >
> > Signed-off-by: Vipin Sharma <vipinsh@google.com>
> > ---
> >  arch/x86/include/asm/kvm_types.h | 2 +-
> >  arch/x86/kvm/mmu/mmu.c           | 7 ++++---
> >  2 files changed, 5 insertions(+), 4 deletions(-)
> >
> > diff --git a/arch/x86/include/asm/kvm_types.h b/arch/x86/include/asm/kvm_types.h
> > index 08f1b57d3b62..752dab218a62 100644
> > --- a/arch/x86/include/asm/kvm_types.h
> > +++ b/arch/x86/include/asm/kvm_types.h
> > @@ -2,6 +2,6 @@
> >  #ifndef _ASM_X86_KVM_TYPES_H
> >  #define _ASM_X86_KVM_TYPES_H
> >
> > -#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 40
> > +#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE PT64_ROOT_MAX_LEVEL
>
> Please add a comment explaining why this value was chosen.

Okay


>
> >
> >  #endif /* _ASM_X86_KVM_TYPES_H */
> > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> > index 7454bfc49a51..f89d933ff380 100644
> > --- a/arch/x86/kvm/mmu/mmu.c
> > +++ b/arch/x86/kvm/mmu/mmu.c
> > @@ -677,11 +677,12 @@ static int mmu_topup_sp_memory_cache(struct kvm_mmu_memory_cache *cache,
> >
> >  static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
> >  {
> > -       int r, nid;
> > +       int r, nid, desc_capacity;
> >
> >         /* 1 rmap, 1 parent PTE per level, and the prefetched rmaps. */
> > -       r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
> > -                                      1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM);
> > +       desc_capacity = 1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM;
> > +       r = __kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
> > +                                        desc_capacity, desc_capacity);
> >         if (r)
> >                 return r;
> >
> > --
> > 2.39.0.314.g84b9a713c41-goog
> >
diff mbox series

Patch

diff --git a/arch/x86/include/asm/kvm_types.h b/arch/x86/include/asm/kvm_types.h
index 08f1b57d3b62..752dab218a62 100644
--- a/arch/x86/include/asm/kvm_types.h
+++ b/arch/x86/include/asm/kvm_types.h
@@ -2,6 +2,6 @@ 
 #ifndef _ASM_X86_KVM_TYPES_H
 #define _ASM_X86_KVM_TYPES_H
 
-#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 40
+#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE PT64_ROOT_MAX_LEVEL
 
 #endif /* _ASM_X86_KVM_TYPES_H */
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 7454bfc49a51..f89d933ff380 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -677,11 +677,12 @@  static int mmu_topup_sp_memory_cache(struct kvm_mmu_memory_cache *cache,
 
 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
 {
-	int r, nid;
+	int r, nid, desc_capacity;
 
 	/* 1 rmap, 1 parent PTE per level, and the prefetched rmaps. */
-	r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
-				       1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM);
+	desc_capacity = 1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM;
+	r = __kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
+					 desc_capacity, desc_capacity);
 	if (r)
 		return r;