@@ -862,6 +862,28 @@ static void free_context_table(struct intel_iommu *iommu)
spin_unlock_irqrestore(&iommu->lock, flags);
}
+static inline void set_pte_mktme_keyid(unsigned long phys_pfn,
+ phys_addr_t *pteval)
+{
+ unsigned long keyid;
+
+ if (!pfn_valid(phys_pfn))
+ return;
+
+ keyid = page_keyid(pfn_to_page(phys_pfn));
+
+#ifdef CONFIG_X86_INTEL_MKTME
+ /*
+ * When MKTME is enabled, set keyid in PTE such that DMA
+ * remapping will include keyid in the translation from IOVA
+ * to physical address. This applies to both user and kernel
+ * allocated DMA memory.
+ */
+ *pteval &= ~mktme_keyid_mask;
+ *pteval |= keyid << mktme_keyid_shift;
+#endif
+}
+
static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
unsigned long pfn, int *target_level)
{
@@ -888,7 +910,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
break;
if (!dma_pte_present(pte)) {
- uint64_t pteval;
+ phys_addr_t pteval;
tmp_page = alloc_pgtable_page(domain->nid);
@@ -896,7 +918,8 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
return NULL;
domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
- pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
+ pteval = (virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
+ set_pte_mktme_keyid(virt_to_dma_pfn(tmp_page), &pteval);
if (cmpxchg64(&pte->val, 0ULL, pteval))
/* Someone else set it while we were thinking; use theirs. */
free_pgtable_page(tmp_page);
@@ -2289,6 +2312,8 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
}
}
+ set_pte_mktme_keyid(phys_pfn, &pteval);
+
/* We don't need lock here, nobody else
* touches the iova range
*/
@@ -34,6 +34,8 @@
#include <asm/cacheflush.h>
#include <asm/iommu.h>
+#include <asm/page.h>
+
/*
* VT-d hardware uses 4KiB page size regardless of host page size.
@@ -603,7 +605,12 @@ static inline void dma_clear_pte(struct dma_pte *pte)
static inline u64 dma_pte_addr(struct dma_pte *pte)
{
#ifdef CONFIG_64BIT
- return pte->val & VTD_PAGE_MASK;
+ u64 addr = pte->val;
+ addr &= VTD_PAGE_MASK;
+#ifdef CONFIG_X86_INTEL_MKTME
+ addr &= ~mktme_keyid_mask;
+#endif
+ return addr;
#else
/* Must have a full atomic 64-bit read */
return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;