@@ -806,7 +806,7 @@ struct kvm_x86_ops {
void (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
int (*get_tdp_level)(void);
- u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
+ u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn);
int (*get_lpage_level)(void);
bool (*rdtscp_supported)(void);
bool (*invpcid_supported)(void);
@@ -2496,8 +2496,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
if (level > PT_PAGE_TABLE_LEVEL)
spte |= PT_PAGE_SIZE_MASK;
if (tdp_enabled)
- spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
- kvm_is_reserved_pfn(pfn));
+ spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn);
if (host_writable)
spte |= SPTE_HOST_WRITEABLE;
@@ -259,8 +259,7 @@ static void update_mtrr(struct kvm_vcpu *vcpu, u32 msr)
gfn_t start, end;
int index;
- if (msr == MSR_IA32_CR_PAT || !tdp_enabled ||
- !kvm_arch_has_noncoherent_dma(vcpu->kvm))
+ if (msr == MSR_IA32_CR_PAT || !tdp_enabled)
return;
if (!mtrr_state->mtrr_enabled && msr != MSR_MTRRdefType)
@@ -4075,7 +4075,7 @@ static bool svm_cpu_has_accelerated_tpr(void)
return false;
}
-static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
+static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn)
{
return 0;
}
@@ -8626,31 +8626,11 @@ static int get_ept_level(void)
return VMX_EPT_DEFAULT_GAW + 1;
}
-static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
-{
- u64 ret;
-
- /* For VT-d and EPT combination
- * 1. MMIO: always map as UC
- * 2. EPT with VT-d:
- * a. VT-d without snooping control feature: can't guarantee the
- * result, try to trust guest.
- * b. VT-d with snooping control feature: snooping control feature of
- * VT-d engine can guarantee the cache correctness. Just set it
- * to WB to keep consistent with host. So the same as item 3.
- * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep
- * consistent with host MTRR
- */
- if (is_mmio)
- ret = MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT;
- else if (kvm_arch_has_noncoherent_dma(vcpu->kvm))
- ret = kvm_mtrr_get_guest_memory_type(vcpu, gfn) <<
- VMX_EPT_MT_EPTE_SHIFT;
- else
- ret = (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT)
- | VMX_EPT_IPAT_BIT;
+static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn)
+{
+ u8 type = kvm_mtrr_get_guest_memory_type(vcpu, gfn);
- return ret;
+ return type << VMX_EPT_MT_EPTE_SHIFT;
}
static int vmx_get_lpage_level(void)
Currently guest MTRR is completely prohibited if cache snoop is supported on IOMMU (!noncoherent_dma) and host does the emulation based on the knowledge from host side, however, host side is not the good point to know what the purpose of guest is. A good example is that pass-throughed VGA frame buffer is not always UC as host expected This patchset enables full MTRR virtualization and currently only works on Intel EPT architecture Signed-off-by: Xiao Guangrong <guangrong.xiao@linux.intel.com> --- arch/x86/include/asm/kvm_host.h | 2 +- arch/x86/kvm/mmu.c | 3 +-- arch/x86/kvm/mtrr.c | 3 +-- arch/x86/kvm/svm.c | 2 +- arch/x86/kvm/vmx.c | 28 ++++------------------------ 5 files changed, 8 insertions(+), 30 deletions(-)