Message ID | 20241222193445.349800-2-pbonzini@redhat.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | TDX MMU prep series part 1 | expand |
On Sun, Dec 22, 2024 at 02:34:28PM -0500, Paolo Bonzini wrote: > From: Rick Edgecombe <rick.p.edgecombe@intel.com> > > Prepare for a future TDX patch which asserts that atomic zapping > (i.e. zapping with mmu_lock taken for read) don't operate on mirror roots. > When tearing down a VM, all roots have to be zapped (including mirro s/mirro/mirror > roots once they're in place) so do that with the mmu_lock taken for werite. s/werite/write > kvm_mmu_uninit_tdp_mmu() is invoked either before or after executing any > atomic operations on SPTEs by vCPU threads. Therefore, it will not impact > vCPU threads performance if kvm_tdp_mmu_zap_invalidated_roots() acquires > mmu_lock for write to zap invalid roots. > > Co-developed-by: Yan Zhao <yan.y.zhao@intel.com> > Signed-off-by: Yan Zhao <yan.y.zhao@intel.com> > Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com> > Message-ID: <20240718211230.1492011-2-rick.p.edgecombe@intel.com> > Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> > --- > arch/x86/kvm/mmu/mmu.c | 2 +- > arch/x86/kvm/mmu/tdp_mmu.c | 16 +++++++++++----- > arch/x86/kvm/mmu/tdp_mmu.h | 2 +- > 3 files changed, 13 insertions(+), 7 deletions(-) > > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c > index 2401606db260..3f749fb5ec6c 100644 > --- a/arch/x86/kvm/mmu/mmu.c > +++ b/arch/x86/kvm/mmu/mmu.c > @@ -6467,7 +6467,7 @@ static void kvm_mmu_zap_all_fast(struct kvm *kvm) > * lead to use-after-free. > */ > if (tdp_mmu_enabled) > - kvm_tdp_mmu_zap_invalidated_roots(kvm); > + kvm_tdp_mmu_zap_invalidated_roots(kvm, true); > } > > void kvm_mmu_init_vm(struct kvm *kvm) > diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c > index 2f15e0e33903..1054ccd9b861 100644 > --- a/arch/x86/kvm/mmu/tdp_mmu.c > +++ b/arch/x86/kvm/mmu/tdp_mmu.c > @@ -38,7 +38,7 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) > * ultimately frees all roots. > */ > kvm_tdp_mmu_invalidate_all_roots(kvm); > - kvm_tdp_mmu_zap_invalidated_roots(kvm); > + kvm_tdp_mmu_zap_invalidated_roots(kvm, false); > > WARN_ON(atomic64_read(&kvm->arch.tdp_mmu_pages)); > WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots)); > @@ -883,11 +883,14 @@ void kvm_tdp_mmu_zap_all(struct kvm *kvm) > * Zap all invalidated roots to ensure all SPTEs are dropped before the "fast > * zap" completes. > */ > -void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm) > +void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm, bool shared) > { > struct kvm_mmu_page *root; > > - read_lock(&kvm->mmu_lock); > + if (shared) > + read_lock(&kvm->mmu_lock); > + else > + write_lock(&kvm->mmu_lock); > > for_each_tdp_mmu_root_yield_safe(kvm, root) { > if (!root->tdp_mmu_scheduled_root_to_zap) > @@ -905,7 +908,7 @@ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm) > * that may be zapped, as such entries are associated with the > * ASID on both VMX and SVM. > */ > - tdp_mmu_zap_root(kvm, root, true); > + tdp_mmu_zap_root(kvm, root, shared); > > /* > * The referenced needs to be put *after* zapping the root, as > @@ -915,7 +918,10 @@ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm) > kvm_tdp_mmu_put_root(kvm, root); > } > > - read_unlock(&kvm->mmu_lock); > + if (shared) > + read_unlock(&kvm->mmu_lock); > + else > + write_unlock(&kvm->mmu_lock); > } > > /* > diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h > index f03ca0dd13d9..6d7cdc462f58 100644 > --- a/arch/x86/kvm/mmu/tdp_mmu.h > +++ b/arch/x86/kvm/mmu/tdp_mmu.h > @@ -23,7 +23,7 @@ bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush); > bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp); > void kvm_tdp_mmu_zap_all(struct kvm *kvm); > void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm); > -void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm); > +void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm, bool shared); > > int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault); > > -- > 2.43.5 > >
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 2401606db260..3f749fb5ec6c 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -6467,7 +6467,7 @@ static void kvm_mmu_zap_all_fast(struct kvm *kvm) * lead to use-after-free. */ if (tdp_mmu_enabled) - kvm_tdp_mmu_zap_invalidated_roots(kvm); + kvm_tdp_mmu_zap_invalidated_roots(kvm, true); } void kvm_mmu_init_vm(struct kvm *kvm) diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 2f15e0e33903..1054ccd9b861 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -38,7 +38,7 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) * ultimately frees all roots. */ kvm_tdp_mmu_invalidate_all_roots(kvm); - kvm_tdp_mmu_zap_invalidated_roots(kvm); + kvm_tdp_mmu_zap_invalidated_roots(kvm, false); WARN_ON(atomic64_read(&kvm->arch.tdp_mmu_pages)); WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots)); @@ -883,11 +883,14 @@ void kvm_tdp_mmu_zap_all(struct kvm *kvm) * Zap all invalidated roots to ensure all SPTEs are dropped before the "fast * zap" completes. */ -void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm) +void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm, bool shared) { struct kvm_mmu_page *root; - read_lock(&kvm->mmu_lock); + if (shared) + read_lock(&kvm->mmu_lock); + else + write_lock(&kvm->mmu_lock); for_each_tdp_mmu_root_yield_safe(kvm, root) { if (!root->tdp_mmu_scheduled_root_to_zap) @@ -905,7 +908,7 @@ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm) * that may be zapped, as such entries are associated with the * ASID on both VMX and SVM. */ - tdp_mmu_zap_root(kvm, root, true); + tdp_mmu_zap_root(kvm, root, shared); /* * The referenced needs to be put *after* zapping the root, as @@ -915,7 +918,10 @@ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm) kvm_tdp_mmu_put_root(kvm, root); } - read_unlock(&kvm->mmu_lock); + if (shared) + read_unlock(&kvm->mmu_lock); + else + write_unlock(&kvm->mmu_lock); } /* diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h index f03ca0dd13d9..6d7cdc462f58 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.h +++ b/arch/x86/kvm/mmu/tdp_mmu.h @@ -23,7 +23,7 @@ bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush); bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp); void kvm_tdp_mmu_zap_all(struct kvm *kvm); void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm); -void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm); +void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm, bool shared); int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);