diff mbox series

[4/6] KVM: x86/mmu: Leverage vcpu->lru_slot_index for rmap_add and rmap_recycle

Message ID 20210730223707.4083785-5-dmatlack@google.com (mailing list archive)
State New, archived
Headers show
Series Improve gfn-to-memslot performance during page faults | expand

Commit Message

David Matlack July 30, 2021, 10:37 p.m. UTC
rmap_add() and rmap_recycle() both run in the context of the vCPU and
thus we can use kvm_vcpu_gfn_to_memslot() to look up the memslot. This
enables rmap_add() and rmap_recycle() to take advantage of
vcpu->lru_slot_index and avoid expensive memslot searching.

This change improves the performance of "Populate memory time" in
dirty_log_perf_test with tdp_mmu=N. In addition to improving the
performance, "Populate memory time" no longer scales with the number
of memslots in the VM.

Command                         | Before           | After
------------------------------- | ---------------- | -------------
./dirty_log_perf_test -v64 -x1  | 15.18001570s     | 14.99469366s
./dirty_log_perf_test -v64 -x64 | 18.71336392s     | 14.98675076s

Signed-off-by: David Matlack <dmatlack@google.com>
---
 arch/x86/kvm/mmu/mmu.c | 35 ++++++++++++++++++++---------------
 1 file changed, 20 insertions(+), 15 deletions(-)

Comments

Paolo Bonzini Aug. 2, 2021, 2:58 p.m. UTC | #1
On 31/07/21 00:37, David Matlack wrote:
> rmap_add() and rmap_recycle() both run in the context of the vCPU and
> thus we can use kvm_vcpu_gfn_to_memslot() to look up the memslot. This
> enables rmap_add() and rmap_recycle() to take advantage of
> vcpu->lru_slot_index and avoid expensive memslot searching.
> 
> This change improves the performance of "Populate memory time" in
> dirty_log_perf_test with tdp_mmu=N. In addition to improving the
> performance, "Populate memory time" no longer scales with the number
> of memslots in the VM.
> 
> Command                         | Before           | After
> ------------------------------- | ---------------- | -------------
> ./dirty_log_perf_test -v64 -x1  | 15.18001570s     | 14.99469366s
> ./dirty_log_perf_test -v64 -x64 | 18.71336392s     | 14.98675076s
> 
> Signed-off-by: David Matlack <dmatlack@google.com>

Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>

> ---
>   arch/x86/kvm/mmu/mmu.c | 35 ++++++++++++++++++++---------------
>   1 file changed, 20 insertions(+), 15 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index a8cdfd8d45c4..370a6ebc2ede 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -1043,17 +1043,6 @@ static struct kvm_rmap_head *__gfn_to_rmap(gfn_t gfn, int level,
>   	return &slot->arch.rmap[level - PG_LEVEL_4K][idx];
>   }
>   
> -static struct kvm_rmap_head *gfn_to_rmap(struct kvm *kvm, gfn_t gfn,
> -					 struct kvm_mmu_page *sp)
> -{
> -	struct kvm_memslots *slots;
> -	struct kvm_memory_slot *slot;
> -
> -	slots = kvm_memslots_for_spte_role(kvm, sp->role);
> -	slot = __gfn_to_memslot(slots, gfn);
> -	return __gfn_to_rmap(gfn, sp->role.level, slot);
> -}
> -
>   static bool rmap_can_add(struct kvm_vcpu *vcpu)
>   {
>   	struct kvm_mmu_memory_cache *mc;
> @@ -1064,24 +1053,39 @@ static bool rmap_can_add(struct kvm_vcpu *vcpu)
>   
>   static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
>   {
> +	struct kvm_memory_slot *slot;
>   	struct kvm_mmu_page *sp;
>   	struct kvm_rmap_head *rmap_head;
>   
>   	sp = sptep_to_sp(spte);
>   	kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
> -	rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
> +	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
> +	rmap_head = __gfn_to_rmap(gfn, sp->role.level, slot);
>   	return pte_list_add(vcpu, spte, rmap_head);
>   }
>   
> +
>   static void rmap_remove(struct kvm *kvm, u64 *spte)
>   {
> +	struct kvm_memslots *slots;
> +	struct kvm_memory_slot *slot;
>   	struct kvm_mmu_page *sp;
>   	gfn_t gfn;
>   	struct kvm_rmap_head *rmap_head;
>   
>   	sp = sptep_to_sp(spte);
>   	gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
> -	rmap_head = gfn_to_rmap(kvm, gfn, sp);
> +
> +	/*
> +	 * Unlike rmap_add and rmap_recycle, rmap_remove does not run in the
> +	 * context of a vCPU so have to determine which memslots to use based
> +	 * on context information in sp->role.
> +	 */
> +	slots = kvm_memslots_for_spte_role(kvm, sp->role);
> +
> +	slot = __gfn_to_memslot(slots, gfn);
> +	rmap_head = __gfn_to_rmap(gfn, sp->role.level, slot);
> +
>   	__pte_list_remove(spte, rmap_head);
>   }
>   
> @@ -1628,12 +1632,13 @@ static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
>   
>   static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
>   {
> +	struct kvm_memory_slot *slot;
>   	struct kvm_rmap_head *rmap_head;
>   	struct kvm_mmu_page *sp;
>   
>   	sp = sptep_to_sp(spte);
> -
> -	rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
> +	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
> +	rmap_head = __gfn_to_rmap(gfn, sp->role.level, slot);
>   
>   	kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, __pte(0));
>   	kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
>
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index a8cdfd8d45c4..370a6ebc2ede 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1043,17 +1043,6 @@  static struct kvm_rmap_head *__gfn_to_rmap(gfn_t gfn, int level,
 	return &slot->arch.rmap[level - PG_LEVEL_4K][idx];
 }
 
-static struct kvm_rmap_head *gfn_to_rmap(struct kvm *kvm, gfn_t gfn,
-					 struct kvm_mmu_page *sp)
-{
-	struct kvm_memslots *slots;
-	struct kvm_memory_slot *slot;
-
-	slots = kvm_memslots_for_spte_role(kvm, sp->role);
-	slot = __gfn_to_memslot(slots, gfn);
-	return __gfn_to_rmap(gfn, sp->role.level, slot);
-}
-
 static bool rmap_can_add(struct kvm_vcpu *vcpu)
 {
 	struct kvm_mmu_memory_cache *mc;
@@ -1064,24 +1053,39 @@  static bool rmap_can_add(struct kvm_vcpu *vcpu)
 
 static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
 {
+	struct kvm_memory_slot *slot;
 	struct kvm_mmu_page *sp;
 	struct kvm_rmap_head *rmap_head;
 
 	sp = sptep_to_sp(spte);
 	kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
-	rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
+	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
+	rmap_head = __gfn_to_rmap(gfn, sp->role.level, slot);
 	return pte_list_add(vcpu, spte, rmap_head);
 }
 
+
 static void rmap_remove(struct kvm *kvm, u64 *spte)
 {
+	struct kvm_memslots *slots;
+	struct kvm_memory_slot *slot;
 	struct kvm_mmu_page *sp;
 	gfn_t gfn;
 	struct kvm_rmap_head *rmap_head;
 
 	sp = sptep_to_sp(spte);
 	gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
-	rmap_head = gfn_to_rmap(kvm, gfn, sp);
+
+	/*
+	 * Unlike rmap_add and rmap_recycle, rmap_remove does not run in the
+	 * context of a vCPU so have to determine which memslots to use based
+	 * on context information in sp->role.
+	 */
+	slots = kvm_memslots_for_spte_role(kvm, sp->role);
+
+	slot = __gfn_to_memslot(slots, gfn);
+	rmap_head = __gfn_to_rmap(gfn, sp->role.level, slot);
+
 	__pte_list_remove(spte, rmap_head);
 }
 
@@ -1628,12 +1632,13 @@  static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
 
 static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
 {
+	struct kvm_memory_slot *slot;
 	struct kvm_rmap_head *rmap_head;
 	struct kvm_mmu_page *sp;
 
 	sp = sptep_to_sp(spte);
-
-	rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
+	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
+	rmap_head = __gfn_to_rmap(gfn, sp->role.level, slot);
 
 	kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, __pte(0));
 	kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,