@@ -461,6 +461,18 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
}
}
+#if PTTYPE == PTTYPE_EPT
+static void FNAME(link_shadow_page)(u64 *sptep, struct kvm_mmu_page *sp)
+{
+ u64 spte;
+
+ spte = __pa(sp->spt) | VMX_EPT_READABLE_MASK | VMX_EPT_WRITABLE_MASK |
+ VMX_EPT_EXECUTABLE_MASK;
+
+ mmu_spte_set(sptep, spte);
+}
+#endif
+
/*
* Fetch a shadow pte for a specific level in the paging hierarchy.
* If the guest tries to write a write-protected page, we need to
@@ -513,7 +525,11 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
goto out_gpte_changed;
if (sp)
+#if PTTYPE == PTTYPE_EPT
+ FNAME(link_shadow_page)(it.sptep, sp);
+#else
link_shadow_page(it.sptep, sp);
+#endif
}
for (;
@@ -533,7 +549,11 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, it.level-1,
true, direct_access, it.sptep);
+#if PTTYPE == PTTYPE_EPT
+ FNAME(link_shadow_page)(it.sptep, sp);
+#else
link_shadow_page(it.sptep, sp);
+#endif
}
clear_sp_write_flooding_count(it.sptep);