@@ -625,9 +625,14 @@ static int is_nx(struct kvm_vcpu *vcpu)
return vcpu->arch.efer & EFER_NX;
}
-static int is_shadow_present_pte(u64 pte)
+static inline bool __is_shadow_present_pte(u64 pte)
{
- return (pte != 0) && !is_mmio_spte(pte);
+ return !!(pte & 0x7);
+}
+
+static bool is_shadow_present_pte(u64 pte)
+{
+ return __is_shadow_present_pte(pte) && !is_mmio_spte(pte);
}
static int is_large_pte(u64 pte)
@@ -1024,7 +1024,8 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
gpa_t pte_gpa;
gfn_t gfn;
- if (!sp->spt[i])
+ if (!__is_shadow_present_pte(sp->spt[i]) &&
+ !is_mmio_spte(sp->spt[i]))
continue;
pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
Use what are effectively EPT's RWX bits to detect present SPTEs instead of simply looking for a non-zero value. This will allow using a non-zero initial value for SPTEs as well as using not-present SPTEs to track metadata for zapped private SPTEs. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> --- arch/x86/kvm/mmu/mmu.c | 9 +++++++-- arch/x86/kvm/mmu/paging_tmpl.h | 3 ++- 2 files changed, 9 insertions(+), 3 deletions(-)