@@ -2731,7 +2731,8 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
restart:
hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
- if (sp->gfn != gfn || sp->role.direct || sp->role.invalid)
+ if (sp->gfn != gfn || sp->role.direct || sp->role.invalid ||
+ sp->unsync)
continue;
pte_size = sp->role.cr4_pae ? 8 : 4;
misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
@@ -475,10 +475,15 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
level = iterator.level;
sptep = iterator.sptep;
+ sp = page_header(__pa(sptep));
if (is_last_spte(*sptep, level)) {
int shift;
- sp = page_header(__pa(sptep));
+ if (!sp->unsync)
+ break;
+
+ WARN_ON(level != PT_PAGE_TABLE_LEVEL);
+
shift = PAGE_SHIFT -
(PT_LEVEL_BITS - PT64_LEVEL_BITS) * level;
gfn = sp->gfn;
@@ -496,7 +501,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
break;
}
- if (!is_shadow_present_pte(*sptep))
+ if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
break;
}
@@ -523,8 +528,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
kvm_mmu_free_page(vcpu->kvm, sp);
goto unlock_exit;
}
- if (vcpu->kvm->arch.invlpg_counter == invlpg_counter &&
- sp->role.level == PT_PAGE_TABLE_LEVEL) {
+ if (vcpu->kvm->arch.invlpg_counter == invlpg_counter) {
++vcpu->kvm->stat.mmu_pte_updated;
FNAME(update_pte)(vcpu, sp, sptep, &gentry);
}