@@ -2299,10 +2299,11 @@ static void paging_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
for_each_shadow_entry(vcpu, gva, iterator) {
level = iterator.level;
sptep = iterator.sptep;
+ sp = page_header(__pa(sptep));
if (is_last_spte(*sptep, level)) {
-
- sp = page_header(__pa(sptep));
+ if (!sp->unsync)
+ break;
if (!sp->role.cr4_pae)
offset = sp->role.quadrant << PT64_LEVEL_BITS;;
@@ -2320,7 +2321,7 @@ static void paging_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
break;
}
- if (!is_shadow_present_pte(*sptep))
+ if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
break;
}
@@ -2744,7 +2745,8 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
restart:
hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
- if (sp->gfn != gfn || sp->role.direct || sp->role.invalid)
+ if (sp->gfn != gfn || sp->role.direct || sp->role.invalid ||
+ sp->unsync)
continue;
pte_size = sp->role.cr4_pae ? 8 : 4;
misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);