diff mbox series

[RFC,v5,08/38] KVM: arm64: Unlock memslots after stage 2 tables are freed

Message ID 20211117153842.302159-9-alexandru.elisei@arm.com (mailing list archive)
State New, archived
Headers show
Series KVM: arm64: Add Statistical Profiling Extension (SPE) support | expand

Commit Message

Alexandru Elisei Nov. 17, 2021, 3:38 p.m. UTC
Unpin the backing pages mapped at stage 2 after the stage 2 translation
tables are destroyed.

Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
---
 arch/arm64/kvm/mmu.c | 23 ++++++++++++++++++-----
 1 file changed, 18 insertions(+), 5 deletions(-)

Comments

Reiji Watanabe March 18, 2022, 5:19 a.m. UTC | #1
Hi Alex,

On 11/17/21 7:38 AM, Alexandru Elisei wrote:
> Unpin the backing pages mapped at stage 2 after the stage 2 translation
> tables are destroyed.
> 
> Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
> ---
>   arch/arm64/kvm/mmu.c | 23 ++++++++++++++++++-----
>   1 file changed, 18 insertions(+), 5 deletions(-)
> 
> diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> index cd6f1bc7842d..072e2aba371f 100644
> --- a/arch/arm64/kvm/mmu.c
> +++ b/arch/arm64/kvm/mmu.c
> @@ -1627,11 +1627,19 @@ int kvm_mmu_lock_memslot(struct kvm *kvm, u64 slot, u64 flags)
>   	return ret;
>   }
>   
> -static void unlock_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
> +static void __unlock_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
>   {
>   	bool writable = memslot->arch.flags & KVM_MEMSLOT_LOCK_WRITE;
>   	unsigned long npages = memslot->npages;
>   
> +	unpin_memslot_pages(memslot, writable);
> +	account_locked_vm(current->mm, npages, false);
> +
> +	memslot->arch.flags &= ~KVM_MEMSLOT_LOCK_MASK;
> +}
> +
> +static void unlock_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
> +{
>   	/*
>   	 * MMU maintenace operations aren't performed on an unlocked memslot.
>   	 * Unmap it from stage 2 so the abort handler performs the necessary
> @@ -1640,10 +1648,7 @@ static void unlock_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
>   	if (kvm_mmu_has_pending_ops(kvm))
>   		kvm_arch_flush_shadow_memslot(kvm, memslot);
>   
> -	unpin_memslot_pages(memslot, writable);
> -	account_locked_vm(current->mm, npages, false);
> -
> -	memslot->arch.flags &= ~KVM_MEMSLOT_LOCK_MASK;
> +	__unlock_memslot(kvm, memslot);
>   }
>   
>   int kvm_mmu_unlock_memslot(struct kvm *kvm, u64 slot, u64 flags)
> @@ -1951,7 +1956,15 @@ void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
>   
>   void kvm_arch_flush_shadow_all(struct kvm *kvm)
>   {
> +	struct kvm_memory_slot *memslot;
> +
>   	kvm_free_stage2_pgd(&kvm->arch.mmu);
> +
> +	kvm_for_each_memslot(memslot, kvm_memslots(kvm)) {
> +		if (!memslot_is_locked(memslot))
> +			continue;
> +		__unlock_memslot(kvm, memslot);
> +	}
>   }

Perhaps it might be useful to manage the number of locked memslots ?
(can be used in the fix for kvm_mmu_unlock_memslot in the patch-7 as well)
                                                  
Thanks,
Reiji


>   
>   void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
Alexandru Elisei March 21, 2022, 5:29 p.m. UTC | #2
Hi,

On Thu, Mar 17, 2022 at 10:19:56PM -0700, Reiji Watanabe wrote:
> Hi Alex,
> 
> On 11/17/21 7:38 AM, Alexandru Elisei wrote:
> > Unpin the backing pages mapped at stage 2 after the stage 2 translation
> > tables are destroyed.
> > 
> > Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
> > ---
> >   arch/arm64/kvm/mmu.c | 23 ++++++++++++++++++-----
> >   1 file changed, 18 insertions(+), 5 deletions(-)
> > 
> > diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> > index cd6f1bc7842d..072e2aba371f 100644
> > --- a/arch/arm64/kvm/mmu.c
> > +++ b/arch/arm64/kvm/mmu.c
> > @@ -1627,11 +1627,19 @@ int kvm_mmu_lock_memslot(struct kvm *kvm, u64 slot, u64 flags)
> >   	return ret;
> >   }
> > -static void unlock_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
> > +static void __unlock_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
> >   {
> >   	bool writable = memslot->arch.flags & KVM_MEMSLOT_LOCK_WRITE;
> >   	unsigned long npages = memslot->npages;
> > +	unpin_memslot_pages(memslot, writable);
> > +	account_locked_vm(current->mm, npages, false);
> > +
> > +	memslot->arch.flags &= ~KVM_MEMSLOT_LOCK_MASK;
> > +}
> > +
> > +static void unlock_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
> > +{
> >   	/*
> >   	 * MMU maintenace operations aren't performed on an unlocked memslot.
> >   	 * Unmap it from stage 2 so the abort handler performs the necessary
> > @@ -1640,10 +1648,7 @@ static void unlock_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
> >   	if (kvm_mmu_has_pending_ops(kvm))
> >   		kvm_arch_flush_shadow_memslot(kvm, memslot);
> > -	unpin_memslot_pages(memslot, writable);
> > -	account_locked_vm(current->mm, npages, false);
> > -
> > -	memslot->arch.flags &= ~KVM_MEMSLOT_LOCK_MASK;
> > +	__unlock_memslot(kvm, memslot);
> >   }
> >   int kvm_mmu_unlock_memslot(struct kvm *kvm, u64 slot, u64 flags)
> > @@ -1951,7 +1956,15 @@ void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
> >   void kvm_arch_flush_shadow_all(struct kvm *kvm)
> >   {
> > +	struct kvm_memory_slot *memslot;
> > +
> >   	kvm_free_stage2_pgd(&kvm->arch.mmu);
> > +
> > +	kvm_for_each_memslot(memslot, kvm_memslots(kvm)) {
> > +		if (!memslot_is_locked(memslot))
> > +			continue;
> > +		__unlock_memslot(kvm, memslot);
> > +	}
> >   }
> 
> Perhaps it might be useful to manage the number of locked memslots ?
> (can be used in the fix for kvm_mmu_unlock_memslot in the patch-7 as well)

I don't think it's very useful to manage the number, as we usually want to
find all locked memslots, and there's absolutely no guarantee that the
locked memslot will be at the start of the list, in which case we would
have saved iterating over the last memslots.

In the case above, this is done when the VM is being destroyed, which is
not particularly performance sensitive. And certainly a few linked list
accesses won't make much of a difference.

In patch #7, KVM iterates through the memslots and calls
kvm_arch_flush_shadow_memslot(), which is several orders of magnitude
slower than iterating through a few extra memslots. Also, I don't think
userspace locking then unlocking a memslot before running any VCPUs is
something that will happen very often.

Thanks,
Alex
diff mbox series

Patch

diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index cd6f1bc7842d..072e2aba371f 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1627,11 +1627,19 @@  int kvm_mmu_lock_memslot(struct kvm *kvm, u64 slot, u64 flags)
 	return ret;
 }
 
-static void unlock_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
+static void __unlock_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
 {
 	bool writable = memslot->arch.flags & KVM_MEMSLOT_LOCK_WRITE;
 	unsigned long npages = memslot->npages;
 
+	unpin_memslot_pages(memslot, writable);
+	account_locked_vm(current->mm, npages, false);
+
+	memslot->arch.flags &= ~KVM_MEMSLOT_LOCK_MASK;
+}
+
+static void unlock_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
+{
 	/*
 	 * MMU maintenace operations aren't performed on an unlocked memslot.
 	 * Unmap it from stage 2 so the abort handler performs the necessary
@@ -1640,10 +1648,7 @@  static void unlock_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
 	if (kvm_mmu_has_pending_ops(kvm))
 		kvm_arch_flush_shadow_memslot(kvm, memslot);
 
-	unpin_memslot_pages(memslot, writable);
-	account_locked_vm(current->mm, npages, false);
-
-	memslot->arch.flags &= ~KVM_MEMSLOT_LOCK_MASK;
+	__unlock_memslot(kvm, memslot);
 }
 
 int kvm_mmu_unlock_memslot(struct kvm *kvm, u64 slot, u64 flags)
@@ -1951,7 +1956,15 @@  void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
 
 void kvm_arch_flush_shadow_all(struct kvm *kvm)
 {
+	struct kvm_memory_slot *memslot;
+
 	kvm_free_stage2_pgd(&kvm->arch.mmu);
+
+	kvm_for_each_memslot(memslot, kvm_memslots(kvm)) {
+		if (!memslot_is_locked(memslot))
+			continue;
+		__unlock_memslot(kvm, memslot);
+	}
 }
 
 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,