@@ -2719,7 +2719,8 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
restart:
hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
- if (sp->gfn != gfn || sp->role.direct || sp->role.invalid)
+ if (sp->gfn != gfn || sp->role.direct || sp->role.invalid ||
+ sp->unsync)
continue;
pte_size = sp->role.cr4_pae ? 8 : 4;
misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
@@ -474,10 +474,14 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
level = iterator.level;
sptep = iterator.sptep;
+ sp = page_header(__pa(sptep));
if (is_last_spte(*sptep, level)) {
int shift;
- sp = page_header(__pa(sptep));
+ if (!sp->unsync)
+ break;
+
+ WARN_ON(level != PT_PAGE_TABLE_LEVEL);
shift = PAGE_SHIFT -
(PT_LEVEL_BITS - PT64_LEVEL_BITS) * level;
gfn = sp->gfn;
@@ -494,7 +498,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
break;
}
- if (!is_shadow_present_pte(*sptep))
+ if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
break;
}
@@ -515,8 +519,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
mmu_guess_page_from_pte_write(vcpu, gfn_to_gpa(gfn) + offset, gentry);
spin_lock(&vcpu->kvm->mmu_lock);
- if (atomic_read(&vcpu->kvm->arch.invlpg_counter) == invlpg_counter &&
- sp->role.level == PT_PAGE_TABLE_LEVEL) {
+ if (atomic_read(&vcpu->kvm->arch.invlpg_counter) == invlpg_counter) {
++vcpu->kvm->stat.mmu_pte_updated;
FNAME(update_pte)(vcpu, sp, sptep, &gentry);
}