@@ -2066,8 +2066,9 @@ static int mmu_zap_unsync_children(struct kvm *kvm,
return zapped;
}
-static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
- struct list_head *invalid_list)
+static int
+__kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
+ struct list_head *invalid_list)
{
int ret;
@@ -2088,15 +2089,24 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
ret++;
list_move(&sp->link, invalid_list);
kvm_mod_used_mmu_pages(kvm, -1);
- } else {
+ } else
list_move(&sp->link, &kvm->arch.active_mmu_pages);
- kvm_reload_remote_mmus(kvm);
- }
sp->role.invalid = 1;
return ret;
}
+static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
+ struct list_head *invalid_list)
+{
+ int ret = __kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
+
+ if (sp->root_count)
+ kvm_reload_remote_mmus(kvm);
+
+ return ret;
+}
+
static void kvm_mmu_commit_zap_page(struct kvm *kvm,
struct list_head *invalid_list)
{
Then the new function __kvm_mmu_prepare_zap_page only zaps the shadow page without KVM_REQ_MMU_RELOAD. Later, we will use it to batch free root shadow pages Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> --- arch/x86/kvm/mmu.c | 20 +++++++++++++++----- 1 files changed, 15 insertions(+), 5 deletions(-)