@@ -996,7 +996,6 @@ static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{
ASSERT(is_empty_shadow_page(sp->spt));
hlist_del(&sp->hash_link);
- list_del(&sp->link);
__free_page(virt_to_page(sp->spt));
if (!sp->role.direct)
__free_page(virt_to_page(sp->gfns));
@@ -1681,9 +1680,8 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
kvm_mmu_unlink_parents(kvm, sp);
if (!sp->role.invalid && !sp->role.direct)
unaccount_shadowed(kvm, sp->gfn);
- if (sp->unsync)
- kvm_unlink_unsync_page(kvm, sp);
- if (!sp->active_count) {
+
+ if (!sp->active_count || sp->unsync) {
/* Count self */
ret++;
list_move(&sp->link, invalid_list);
@@ -1692,6 +1690,8 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
kvm_reload_remote_mmus(kvm);
}
+ if (sp->unsync)
+ kvm_unlink_unsync_page(kvm, sp);
sp->role.invalid = 1;
kvm_mmu_reset_last_pte_updated(kvm);
return ret;
@@ -1709,8 +1709,12 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
do {
sp = list_first_entry(invalid_list, struct kvm_mmu_page, link);
- WARN_ON(!sp->role.invalid || sp->active_count);
- kvm_mmu_free_page(kvm, sp);
+ WARN_ON(!sp->role.invalid);
+ list_del(&sp->link);
+ if (sp->active_count)
+ WARN_ON(!sp->unsync);
+ else
+ kvm_mmu_free_page(kvm, sp);
} while (!list_empty(invalid_list));
}