@@ -5612,13 +5612,19 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
write_lock(&kvm->mmu_lock);
flush = slot_handle_leaf(kvm, slot, kvm_mmu_zap_collapsible_spte, true);
- if (is_tdp_mmu_enabled(kvm))
- flush = kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot, flush);
-
if (flush)
kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
-
write_unlock(&kvm->mmu_lock);
+
+ if (is_tdp_mmu_enabled(kvm)) {
+ flush = false;
+
+ read_lock(&kvm->mmu_lock);
+ flush = kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot, flush);
+ if (flush)
+ kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
+ read_unlock(&kvm->mmu_lock);
+ }
}
void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
@@ -1323,7 +1323,8 @@ static bool zap_collapsible_spte_range(struct kvm *kvm,
rcu_read_lock();
tdp_root_for_each_pte(iter, root, start, end) {
- if (tdp_mmu_iter_cond_resched(kvm, &iter, flush, false)) {
+retry:
+ if (tdp_mmu_iter_cond_resched(kvm, &iter, flush, true)) {
flush = false;
continue;
}
@@ -1338,8 +1339,14 @@ static bool zap_collapsible_spte_range(struct kvm *kvm,
pfn, PG_LEVEL_NUM))
continue;
- tdp_mmu_set_spte(kvm, &iter, 0);
-
+ if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) {
+ /*
+ * The iter must explicitly re-read the SPTE because
+ * the atomic cmpxchg failed.
+ */
+ iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
+ goto retry;
+ }
flush = true;
}
@@ -1358,7 +1365,9 @@ bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
{
struct kvm_mmu_page *root;
- for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, false)
+ lockdep_assert_held_read(&kvm->mmu_lock);
+
+ for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
flush = zap_collapsible_spte_range(kvm, root, slot, flush);
return flush;
To speed the process of disabling dirty logging, change the TDP MMU function which zaps collapsible SPTEs to run under the MMU read lock. Signed-off-by: Ben Gardon <bgardon@google.com> --- arch/x86/kvm/mmu/mmu.c | 14 ++++++++++---- arch/x86/kvm/mmu/tdp_mmu.c | 17 +++++++++++++---- 2 files changed, 23 insertions(+), 8 deletions(-)