@@ -1466,7 +1466,7 @@ static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr)
static void kvm_mmu_free_page(struct kvm_mmu_page *sp)
{
ASSERT(is_empty_shadow_page(sp->spt));
- hlist_del(&sp->hash_link);
+
list_del(&sp->link);
free_page((unsigned long)sp->spt);
if (!sp->role.direct)
@@ -1655,7 +1655,8 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
#define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn) \
for_each_gfn_sp(_kvm, _sp, _gfn) \
- if ((_sp)->role.direct || (_sp)->role.invalid) {} else
+ if ((_sp)->role.direct || \
+ ((_sp)->role.invalid && WARN_ON(1))) {} else
/* @sp->gfn should be write-protected at the call site */
static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
@@ -2074,6 +2075,9 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
unaccount_shadowed(kvm, sp->gfn);
if (sp->unsync)
kvm_unlink_unsync_page(kvm, sp);
+
+ hlist_del_init(&sp->hash_link);
+
if (!sp->root_count) {
/* Count self */
ret++;
Move deletion shadow page from the hash list from kvm_mmu_commit_zap_page to kvm_mmu_prepare_zap_page so that we can call kvm_mmu_commit_zap_page once for multiple kvm_mmu_prepare_zap_page that can help us to avoid unnecessary TLB flush Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> --- arch/x86/kvm/mmu.c | 8 ++++++-- 1 files changed, 6 insertions(+), 2 deletions(-)