@@ -4233,13 +4233,13 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
spin_unlock(&kvm->mmu_lock);
}
-#define BATCH_ZAP_PAGES 10
+#define BATCH_ZAP_PAGES 0
static void kvm_zap_obsolete_pages(struct kvm *kvm)
{
struct kvm_mmu_page *sp, *node;
LIST_HEAD(invalid_list);
int batch = 0;
-
+ int num = 0;
restart:
list_for_each_entry_safe_reverse(sp, node,
&kvm->arch.active_mmu_pages, link) {
@@ -4265,6 +4265,7 @@ restart:
if (batch >= BATCH_ZAP_PAGES &&
cond_resched_lock(&kvm->mmu_lock)) {
batch = 0;
+ num++;
goto restart;
}
@@ -4277,6 +4278,7 @@ restart:
* may use the pages.
*/
kvm_mmu_commit_zap_page(kvm, &invalid_list);
+ printk("lock-break: %d.\n", num);
}
/*
@@ -4290,7 +4292,12 @@ restart:
*/
void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm)
{
+ u64 start;
+
spin_lock(&kvm->mmu_lock);
+
+ start = local_clock();
+
trace_kvm_mmu_invalidate_zap_all_pages(kvm);
kvm->arch.mmu_valid_gen++;
@@ -4306,6 +4313,9 @@ void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm)
kvm_reload_remote_mmus(kvm);
kvm_zap_obsolete_pages(kvm);
+
+ printk("%s: %llx.\n", __FUNCTION__, local_clock() - start);
+
spin_unlock(&kvm->mmu_lock);
}