diff mbox series

[RFC,v2,20/22] sev-guest: Stop changing encrypted page state for TDISP devices

Message ID 20250218111017.491719-21-aik@amd.com (mailing list archive)
State RFC
Delegated to: Bjorn Helgaas
Headers show
Series TSM: Secure VFIO, TDISP, SEV TIO | expand

Commit Message

Alexey Kardashevskiy Feb. 18, 2025, 11:10 a.m. UTC
At the moment DMA is assumes insecure and either private memory is
converted into shared for the duration of DMA, or SWIOTLB is used.
With secure DMA enabled, neither is required.

Stop enforcing unencrypted DMA and SWIOTLB if the device is marked as
TDI enabled.

Signed-off-by: Alexey Kardashevskiy <aik@amd.com>
---
 include/linux/dma-direct.h | 8 ++++++++
 include/linux/swiotlb.h    | 8 ++++++++
 arch/x86/mm/mem_encrypt.c  | 6 ++++++
 3 files changed, 22 insertions(+)
diff mbox series

Patch

diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
index d7e30d4f7503..3bd533d2e65d 100644
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -94,6 +94,14 @@  static inline dma_addr_t phys_to_dma_unencrypted(struct device *dev,
  */
 static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
 {
+#if defined(CONFIG_TSM_GUEST) || defined(CONFIG_TSM_GUEST_MODULE)
+	if (dev->tdi_enabled) {
+		dev_warn_once(dev, "(TIO) Disable SME");
+		if (!dev->tdi_validated)
+			dev_warn(dev, "TDI is not validated, DMA @%llx will fail", paddr);
+		return phys_to_dma_unencrypted(dev, paddr);
+	}
+#endif
 	return __sme_set(phys_to_dma_unencrypted(dev, paddr));
 }
 
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 3dae0f592063..67bea31fa42a 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -173,6 +173,14 @@  static inline bool is_swiotlb_force_bounce(struct device *dev)
 {
 	struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
 
+#if defined(CONFIG_TSM_GUEST) || defined(CONFIG_TSM_GUEST_MODULE)
+	if (dev->tdi_enabled) {
+		dev_warn_once(dev, "(TIO) Disable SWIOTLB");
+		if (!dev->tdi_validated)
+			dev_warn(dev, "TDI is not validated");
+		return false;
+	}
+#endif
 	return mem && mem->force_bounce;
 }
 
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 95bae74fdab2..c9c99154bec9 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -19,6 +19,12 @@ 
 /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
 bool force_dma_unencrypted(struct device *dev)
 {
+#if defined(CONFIG_TSM_GUEST) || defined(CONFIG_TSM_GUEST_MODULE)
+	if (dev->tdi_enabled) {
+		dev_warn_once(dev, "(TIO) Disable decryption");
+		return false;
+	}
+#endif
 	/*
 	 * For SEV, all DMA must be to unencrypted addresses.
 	 */