@@ -894,6 +894,7 @@ static int is_empty_shadow_page(u64 *spt)
static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{
ASSERT(is_empty_shadow_page(sp->spt));
+ hlist_del(&sp->hash_link);
list_del(&sp->link);
__free_page(virt_to_page(sp->spt));
__free_page(virt_to_page(sp->gfns));
@@ -1542,12 +1543,13 @@ static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
if (!sp->active_count) {
/* Count self */
ret++;
- hlist_del(&sp->hash_link);
kvm_mmu_free_page(kvm, sp);
} else {
sp->role.invalid = 1;
list_move(&sp->link, &kvm->arch.active_mmu_pages);
- kvm_reload_remote_mmus(kvm);
+ /* No need reload mmu if it's unsync page zapped */
+ if (sp->role.level != PT_PAGE_TABLE_LEVEL)
+ kvm_reload_remote_mmus(kvm);
}
kvm_mmu_reset_last_pte_updated(kvm);
return ret;
@@ -1782,7 +1784,8 @@ static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
bucket = &vcpu->kvm->arch.mmu_page_hash[index];
hlist_for_each_entry_safe(s, node, n, bucket, hash_link) {
- if (s->gfn != gfn || s->role.direct || s->unsync)
+ if (s->gfn != gfn || s->role.direct || s->unsync ||
+ s->role.invalid)
continue;
WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
__kvm_unsync_page(vcpu, s);
@@ -1807,7 +1810,7 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
if (s->role.level != PT_PAGE_TABLE_LEVEL)
return 1;
- if (!need_unsync && !s->unsync) {
+ if (!need_unsync && !s->unsync && !s->role.invalid) {
if (!can_unsync || !oos_shadow)
return 1;
need_unsync = true;