@@ -3011,22 +3011,9 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
void kvm_mmu_zap_all(struct kvm *kvm)
{
- struct kvm_mmu_page *sp, *node;
- LIST_HEAD(invalid_list);
- int ign;
-
write_lock(&kvm->mmu_lock);
-restart:
- list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
- if (WARN_ON(sp->role.invalid))
- continue;
- if (__kvm_shadow_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
- goto restart;
- if (cond_resched_rwlock_write(&kvm->mmu_lock))
- goto restart;
- }
-
- kvm_shadow_mmu_commit_zap_page(kvm, &invalid_list);
+ if (kvm_memslots_have_rmaps(kvm))
+ kvm_shadow_mmu_zap_all(kvm);
if (tdp_mmu_enabled)
kvm_tdp_mmu_zap_all(kvm);
@@ -3604,3 +3604,22 @@ bool kvm_shadow_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
return kvm_handle_gfn_range(kvm, range, kvm_test_age_rmap);
}
+
+void kvm_shadow_mmu_zap_all(struct kvm *kvm)
+{
+ struct kvm_mmu_page *sp, *node;
+ LIST_HEAD(invalid_list);
+ int ign;
+
+restart:
+ list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
+ if (WARN_ON(sp->role.invalid))
+ continue;
+ if (__kvm_shadow_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
+ goto restart;
+ if (cond_resched_rwlock_write(&kvm->mmu_lock))
+ goto restart;
+ }
+
+ kvm_shadow_mmu_commit_zap_page(kvm, &invalid_list);
+}
@@ -101,6 +101,8 @@ bool kvm_shadow_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
bool kvm_shadow_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
bool kvm_shadow_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
+void kvm_shadow_mmu_zap_all(struct kvm *kvm);
+
/* Exports from paging_tmpl.h */
gpa_t paging32_gva_to_gpa(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
gpa_t vaddr, u64 access,
Move the Shadow MMU part of kvm_mmu_zap_all() into a helper function in shadow_mmu.h. Also check kvm_memslots_have_rmaps so the Shadow MMU operation can be skipped entierly if it's not needed. This could present an opportuinity to move the TDP MMU portion of the function under the MMU lock in read mode, but since zapping all paging structures should be a very rare and thus not a perfromance sensitive operation, it's not necessary. Suggested-by: David Matlack <dmatlack@google.com> Signed-off-by: Ben Gardon <bgardon@google.com> --- arch/x86/kvm/mmu/mmu.c | 17 ++--------------- arch/x86/kvm/mmu/shadow_mmu.c | 19 +++++++++++++++++++ arch/x86/kvm/mmu/shadow_mmu.h | 2 ++ 3 files changed, 23 insertions(+), 15 deletions(-)