@@ -809,6 +809,7 @@ void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset, unsigned long mask);
void kvm_mmu_zap_all(struct kvm *kvm);
+void kvm_set_mmio_spte_mask(void);
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm);
unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
@@ -295,6 +295,31 @@ static bool check_mmio_spte(struct kvm *kvm, u64 spte)
return likely(kvm_gen == spte_gen);
}
+/*
+ * Set the reserved bits and the present bit of an paging-structure
+ * entry to generate page fault with PFER.RSV = 1.
+ */
+void kvm_set_mmio_spte_mask(void)
+{
+ u64 mask;
+ int maxphyaddr = boot_cpu_data.x86_phys_bits;
+
+ /* Mask the reserved physical address bits. */
+ mask = rsvd_bits(maxphyaddr, MMIO_SPTE_GEN_HIGH_SHIFT - 1);
+
+ /* Magic bits are always reserved to identify mmio spte.
+ * On 32 bit systems we have bit 62.
+ */
+ mask |= 0x3ull << 62;
+
+ /* Set the present bit to enable mmio page fault. */
+ if (maxphyaddr < MMIO_SPTE_GEN_HIGH_SHIFT)
+ mask |= PT_PRESENT_MASK;
+
+ kvm_mmu_set_mmio_spte_mask(mask);
+}
+EXPORT_SYMBOL_GPL(kvm_set_mmio_spte_mask);
+
void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
u64 dirty_mask, u64 nx_mask, u64 x_mask)
{
@@ -5596,36 +5596,6 @@ void kvm_after_handle_nmi(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
-static void kvm_set_mmio_spte_mask(void)
-{
- u64 mask;
- int maxphyaddr = boot_cpu_data.x86_phys_bits;
-
- /*
- * Set the reserved bits and the present bit of an paging-structure
- * entry to generate page fault with PFER.RSV = 1.
- */
- /* Mask the reserved physical address bits. */
- mask = rsvd_bits(maxphyaddr, 51);
-
- /* Bit 62 is always reserved for 32bit host. */
- mask |= 0x3ull << 62;
-
- /* Set the present bit. */
- mask |= 1ull;
-
-#ifdef CONFIG_X86_64
- /*
- * If reserved bit is not supported, clear the present bit to disable
- * mmio page fault.
- */
- if (maxphyaddr == 52)
- mask &= ~1ull;
-#endif
-
- kvm_mmu_set_mmio_spte_mask(mask);
-}
-
#ifdef CONFIG_X86_64
static void pvclock_gtod_update_fn(struct work_struct *work)
{
In non-ept 64-bit of PAE case maxphyaddr may be 52bit as well, so we also need to disable mmio page fault. Here we can check MMIO_SPTE_GEN_HIGH_SHIFT directly to determine if we should set the present bit, and bring a little cleanup. Signed-off-by: Tiejun Chen <tiejun.chen@intel.com> --- v2: * Correct codes comments * Need to use "|=" to set the present bit arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/mmu.c | 25 +++++++++++++++++++++++++ arch/x86/kvm/x86.c | 30 ------------------------------ 3 files changed, 26 insertions(+), 30 deletions(-)