@@ -2697,7 +2697,8 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
restart:
hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
- if (sp->gfn != gfn || sp->role.direct || sp->role.invalid)
+ if (sp->gfn != gfn || sp->role.direct || sp->role.invalid ||
+ sp->unsync)
continue;
pte_size = sp->role.cr4_pae ? 8 : 4;
misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
@@ -474,16 +474,16 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
level = iterator.level;
sptep = iterator.sptep;
+ sp = page_header(__pa(sptep));
if (is_last_spte(*sptep, level)) {
- sp = page_header(__pa(sptep));
-
- if (PTTYPE == 32) {
- if (level == PT_DIRECTORY_LEVEL)
- offset = PAGE_SHIFT - 4;
- else
- offset = PT64_LEVEL_BITS;
- offset = sp->role.quadrant << offset;
- }
+ if (!sp->unsync)
+ break;
+
+ WARN_ON(level != PT_PAGE_TABLE_LEVEL);
+
+ if (PTTYPE == 32)
+ offset = sp->role.quadrant << PT64_LEVEL_BITS;
+
gfn = sp->gfn;
offset = (sptep - sp->spt + offset) *
sizeof(pt_element_t);
@@ -498,7 +498,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
break;
}
- if (!is_shadow_present_pte(*sptep))
+ if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
break;
}
@@ -520,8 +520,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
mmu_guess_page_from_pte_write(vcpu, gfn_to_gpa(gfn) + offset, gentry);
spin_lock(&vcpu->kvm->mmu_lock);
- if (atomic_read(&vcpu->kvm->arch.invlpg_counter) == invlpg_counter &&
- sp->role.level == PT_PAGE_TABLE_LEVEL) {
+ if (atomic_read(&vcpu->kvm->arch.invlpg_counter) == invlpg_counter) {
++vcpu->kvm->stat.mmu_pte_updated;
FNAME(update_pte)(vcpu, sp, sptep, &gentry);
}