@@ -1521,12 +1521,16 @@ config X86_CPA_STATISTICS
config ARCH_HAS_MEM_ENCRYPT
def_bool y
+config X86_MEM_ENCRYPT_COMMON
+ select ARCH_HAS_FORCE_DMA_UNENCRYPTED
+ select DYNAMIC_PHYSICAL_MASK
+ def_bool n
+
config AMD_MEM_ENCRYPT
bool "AMD Secure Memory Encryption (SME) support"
depends on X86_64 && CPU_SUP_AMD
- select DYNAMIC_PHYSICAL_MASK
select ARCH_USE_MEMREMAP_PROT
- select ARCH_HAS_FORCE_DMA_UNENCRYPTED
+ select X86_MEM_ENCRYPT_COMMON
---help---
Say yes to enable support for the encryption of system memory.
This requires an AMD processor that supports Secure Memory
@@ -55,3 +55,4 @@ obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_identity.o
obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_boot.o
obj-$(CONFIG_X86_INTEL_MKTME) += mktme.o
+obj-$(CONFIG_X86_MEM_ENCRYPT_COMMON) += mem_encrypt_common.o
@@ -15,10 +15,6 @@
#include <linux/dma-direct.h>
#include <linux/swiotlb.h>
#include <linux/mem_encrypt.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/bitops.h>
-#include <linux/dma-mapping.h>
#include <asm/tlbflush.h>
#include <asm/fixmap.h>
@@ -352,32 +348,6 @@ bool sev_active(void)
}
EXPORT_SYMBOL(sev_active);
-/* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
-bool force_dma_unencrypted(struct device *dev)
-{
- /*
- * For SEV, all DMA must be to unencrypted addresses.
- */
- if (sev_active())
- return true;
-
- /*
- * For SME, all DMA must be to unencrypted addresses if the
- * device does not support DMA to addresses that include the
- * encryption mask.
- */
- if (sme_active()) {
- u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask));
- u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask,
- dev->bus_dma_mask);
-
- if (dma_dev_mask <= dma_enc_mask)
- return true;
- }
-
- return false;
-}
-
/* Architecture __weak replacement functions */
void __init mem_encrypt_free_decrypted_mem(void)
{
new file mode 100644
@@ -0,0 +1,52 @@
+#include <linux/mm.h>
+#include <linux/mem_encrypt.h>
+#include <linux/dma-mapping.h>
+#include <asm/mktme.h>
+
+/*
+ * Encryption bits need to be set and cleared for both Intel MKTME and
+ * AMD SME when converting between DMA address and physical address.
+ */
+dma_addr_t __mem_encrypt_dma_set(dma_addr_t daddr, phys_addr_t paddr)
+{
+ unsigned long keyid;
+
+ if (sme_active())
+ return __sme_set(daddr);
+ keyid = page_keyid(pfn_to_page(__phys_to_pfn(paddr)));
+
+ return (daddr & ~mktme_keyid_mask()) | (keyid << mktme_keyid_shift());
+}
+
+phys_addr_t __mem_encrypt_dma_clear(phys_addr_t paddr)
+{
+ if (sme_active())
+ return __sme_clr(paddr);
+
+ return paddr & ~mktme_keyid_mask();
+}
+
+/* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
+bool force_dma_unencrypted(struct device *dev)
+{
+ u64 dma_enc_mask, dma_dev_mask;
+
+ /*
+ * For SEV, all DMA must be to unencrypted addresses.
+ */
+ if (sev_active())
+ return true;
+
+ /*
+ * For SME and MKTME, all DMA must be to unencrypted addresses if the
+ * device does not support DMA to addresses that include the encryption
+ * mask.
+ */
+ if (!sme_active() && !mktme_enabled())
+ return false;
+
+ dma_enc_mask = sme_me_mask | mktme_keyid_mask();
+ dma_dev_mask = min_not_zero(dev->coherent_dma_mask, dev->bus_dma_mask);
+
+ return (dma_dev_mask & dma_enc_mask) != dma_enc_mask;
+}