@@ -21,4 +21,26 @@ static inline bool force_dma_unencrypted(struct device *dev)
return is_realm_world();
}
+static inline dma_addr_t dma_decrypted(dma_addr_t daddr)
+{
+ if (is_realm_world())
+ daddr |= prot_ns_shared;
+ return daddr;
+}
+#define dma_decrypted dma_decrypted
+
+static inline dma_addr_t dma_encrypted(dma_addr_t daddr)
+{
+ if (is_realm_world())
+ daddr &= prot_ns_shared - 1;
+ return daddr;
+}
+#define dma_encrypted dma_encrypted
+
+static inline dma_addr_t dma_clear_encryption(dma_addr_t daddr)
+{
+ return dma_encrypted(daddr);
+}
+#define dma_clear_encryption dma_clear_encryption
+
#endif /* __ASM_MEM_ENCRYPT_H */
When a device performs DMA to a shared buffer using physical addresses, (without Stage1 translation), the device must use the "{I}PA address" with the top bit set in Realm. This is to make sure that a trusted device will be able to write to shared buffers as well as the protected buffers. Thus, a Realm must always program the full address including the "protection" bit, like AMD SME encryption bits. Enable this by providing arm64 specific dma_{encrypted,decrypted,clear_encryption} helpers for Realms. Please note that the VMM needs to similarly make sure that the SMMU Stage2 in the Non-secure world is setup accordingly to map IPA at the unprotected alias. Cc: Will Deacon <will@kernel.org> Cc: Jean-Philippe Brucker <jean-philippe@linaro.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Robin Murphy <robin.murphy@arm.com> Cc: Steven Price <steven.price@arm.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Tom Lendacky <thomas.lendacky@amd.com> Cc: Aneesh Kumar K.V <aneesh.kumar@kernel.org> Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com> --- arch/arm64/include/asm/mem_encrypt.h | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+)