@@ -2783,6 +2783,9 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
break;
}
+ if (is_rsvd_bits_set(vcpu, gentry, PT_PAGE_TABLE_LEVEL))
+ gentry = 0;
+
mmu_guess_page_from_pte_write(vcpu, gpa, gentry);
spin_lock(&vcpu->kvm->mmu_lock);
if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
@@ -2851,6 +2854,11 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
while (npte--) {
entry = *spte;
mmu_pte_write_zap_pte(vcpu, sp, spte);
+
+ if (!!is_pae(vcpu) != sp->role.cr4_pae ||
+ is_nx(vcpu) != sp->role.nxe)
+ continue;
+
if (gentry)
mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
if (!remote_flush && need_remote_flush(entry, *spte))
@@ -640,8 +640,9 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
return -EINVAL;
gfn = gpte_to_gfn(gpte);
- if (gfn != sp->gfns[i] ||
- !is_present_gpte(gpte) || !(gpte & PT_ACCESSED_MASK)) {
+ if (is_rsvd_bits_set(vcpu, gpte, PT_PAGE_TABLE_LEVEL) ||
+ gfn != sp->gfns[i] || !is_present_gpte(gpte) ||
+ !(gpte & PT_ACCESSED_MASK)) {
u64 nonpresent;
if (is_present_gpte(gpte) || !clear_unsync)