@@ -4181,12 +4181,21 @@ void kvm_mmu_zap_all(struct kvm *kvm)
struct kvm_mmu_page *sp, *node;
LIST_HEAD(invalid_list);
+ might_sleep();
+
spin_lock(&kvm->mmu_lock);
restart:
- list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
+ list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list))
goto restart;
+ if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
+ kvm_mmu_commit_zap_page(kvm, &invalid_list);
+ cond_resched_lock(&kvm->mmu_lock);
+ goto restart;
+ }
+ }
+
kvm_mmu_commit_zap_page(kvm, &invalid_list);
spin_unlock(&kvm->mmu_lock);
}
kvm_mmu_zap_all is a slow path, break the mmu-lock if needed to avoid potential soft lockup Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> --- arch/x86/kvm/mmu.c | 11 ++++++++++- 1 files changed, 10 insertions(+), 1 deletions(-)