@@ -4207,14 +4207,18 @@ restart:
spin_unlock(&kvm->mmu_lock);
}
+#define BATCH_ZAP_PAGES 10
static void kvm_zap_obsolete_pages(struct kvm *kvm)
{
struct kvm_mmu_page *sp, *node;
LIST_HEAD(invalid_list);
+ int batch = 0;
restart:
list_for_each_entry_safe_reverse(sp, node,
&kvm->arch.active_mmu_pages, link) {
+ int ret;
+
/*
* No obsolete page exists before new created page since
* active_mmu_pages is the FIFO list.
@@ -4252,10 +4256,16 @@ restart:
* Need not flush tlb since we only zap the sp with invalid
* generation number.
*/
- if (cond_resched_lock(&kvm->mmu_lock))
+ if ((batch >= BATCH_ZAP_PAGES) &&
+ cond_resched_lock(&kvm->mmu_lock)) {
+ batch = 0;
goto restart;
+ }
- if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list))
+ ret = kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
+ batch += ret;
+
+ if (ret)
goto restart;
}
Zap at lease 10 pages before releasing mmu-lock to reduce the overload caused by requiring lock [ It improves kernel building 0.6% ~ 1% ] Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> --- arch/x86/kvm/mmu.c | 14 ++++++++++++-- 1 files changed, 12 insertions(+), 2 deletions(-)