diff mbox series

[RFC,v2,02/21] iommu/dma: Implement link/unlink ranges callbacks

Message ID e3a8350baeaad544010c65dc62db53cf92ff2be1.1726138681.git.leon@kernel.org (mailing list archive)
State Handled Elsewhere
Headers show
Series Provide a new two step DMA API mapping API | expand

Commit Message

Leon Romanovsky Sept. 12, 2024, 11:15 a.m. UTC
From: Leon Romanovsky <leonro@nvidia.com>

Add an implementation of link/unlink interface to perform in map/unmap
pages in fast patch for pre-allocated IOVA.

Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
 drivers/iommu/dma-iommu.c | 86 +++++++++++++++++++++++++++++++++++++++
 include/linux/iommu-dma.h | 25 ++++++++++++
 2 files changed, 111 insertions(+)
diff mbox series

Patch

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 09deea2fc86b..72763f76b712 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -1743,6 +1743,92 @@  void iommu_dma_free_iova(struct dma_iova_state *state)
 			      &iotlb_gather);
 }
 
+int iommu_dma_start_range(struct device *dev)
+{
+	struct iommu_domain *domain = iommu_get_dma_domain(dev);
+
+	if (static_branch_unlikely(&iommu_deferred_attach_enabled))
+		return iommu_deferred_attach(dev, domain);
+
+	return 0;
+}
+
+void iommu_dma_end_range(struct device *dev)
+{
+	/* TODO: Factor out ops->iotlb_sync_map(..) call from iommu_map()
+	 * and put it here to provide batched iotlb sync for the range.
+	 */
+}
+
+dma_addr_t iommu_dma_link_range(struct dma_iova_state *state, phys_addr_t phys,
+				size_t size, unsigned long attrs)
+{
+	struct iommu_domain *domain = iommu_get_dma_domain(state->dev);
+	struct iommu_dma_cookie *cookie = domain->iova_cookie;
+	struct iova_domain *iovad = &cookie->iovad;
+	size_t iova_off = iova_offset(iovad, phys);
+	bool coherent = dev_is_dma_coherent(state->dev);
+	int prot = dma_info_to_prot(state->dir, coherent, attrs);
+	dma_addr_t addr = state->addr + state->range_size;
+	int ret;
+
+	WARN_ON_ONCE(iova_off && state->range_size > 0);
+
+	if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+		arch_sync_dma_for_device(phys, size, state->dir);
+
+	size = iova_align(iovad, size + iova_off);
+	ret = iommu_map(domain, addr, phys - iova_off, size, prot, GFP_ATOMIC);
+	if (ret)
+		return ret;
+
+	state->range_size += size;
+	return addr + iova_off;
+}
+
+static void iommu_sync_dma_for_cpu(struct iommu_domain *domain,
+				   dma_addr_t start, size_t size,
+				   enum dma_data_direction dir)
+{
+	size_t sync_size, unmapped = 0;
+	phys_addr_t phys;
+
+	do {
+		phys = iommu_iova_to_phys(domain, start + unmapped);
+		if (WARN_ON(!phys))
+			continue;
+
+		sync_size = (unmapped + PAGE_SIZE > size) ? size % PAGE_SIZE :
+							    PAGE_SIZE;
+		arch_sync_dma_for_cpu(phys, sync_size, dir);
+		unmapped += sync_size;
+	} while (unmapped < size);
+}
+
+void iommu_dma_unlink_range(struct device *dev, dma_addr_t start, size_t size,
+			    enum dma_data_direction dir, unsigned long attrs)
+{
+	struct iommu_domain *domain = iommu_get_dma_domain(dev);
+	struct iommu_dma_cookie *cookie = domain->iova_cookie;
+	struct iova_domain *iovad = &cookie->iovad;
+	struct iommu_iotlb_gather iotlb_gather;
+	bool coherent = dev_is_dma_coherent(dev);
+	size_t unmapped;
+
+	iommu_iotlb_gather_init(&iotlb_gather);
+	iotlb_gather.queued = READ_ONCE(cookie->fq_domain);
+
+	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !coherent)
+		iommu_sync_dma_for_cpu(domain, start, size, dir);
+
+	size = iova_align(iovad, size);
+	unmapped = iommu_unmap_fast(domain, start, size, &iotlb_gather);
+	WARN_ON(unmapped != size);
+
+	if (!iotlb_gather.queued)
+		iommu_iotlb_sync(domain, &iotlb_gather);
+}
+
 void iommu_setup_dma_ops(struct device *dev)
 {
 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
diff --git a/include/linux/iommu-dma.h b/include/linux/iommu-dma.h
index 698df67b152a..21b0341f52b8 100644
--- a/include/linux/iommu-dma.h
+++ b/include/linux/iommu-dma.h
@@ -60,6 +60,12 @@  void iommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
 int iommu_dma_alloc_iova(struct dma_iova_state *state, phys_addr_t phys,
 			 size_t size);
 void iommu_dma_free_iova(struct dma_iova_state *state);
+int iommu_dma_start_range(struct device *dev);
+void iommu_dma_end_range(struct device *dev);
+dma_addr_t iommu_dma_link_range(struct dma_iova_state *state, phys_addr_t phys,
+				size_t size, unsigned long attrs);
+void iommu_dma_unlink_range(struct device *dev, dma_addr_t start, size_t size,
+			    enum dma_data_direction dir, unsigned long attrs);
 #else
 static inline bool use_dma_iommu(struct device *dev)
 {
@@ -184,5 +190,24 @@  static inline int iommu_dma_alloc_iova(struct dma_iova_state *state,
 static inline void iommu_dma_free_iova(struct dma_iova_state *state)
 {
 }
+static inline int iommu_dma_start_range(struct device *dev)
+{
+	return -EOPNOTSUPP;
+}
+static inline void iommu_dma_end_range(struct device *dev)
+{
+}
+static inline dma_addr_t iommu_dma_link_range(struct dma_iova_state *state,
+					      phys_addr_t phys, size_t size,
+					      unsigned long attrs)
+{
+	return DMA_MAPPING_ERROR;
+}
+static inline void iommu_dma_unlink_range(struct device *dev, dma_addr_t start,
+					  size_t size,
+					  enum dma_data_direction dir,
+					  unsigned long attrs)
+{
+}
 #endif /* CONFIG_IOMMU_DMA */
 #endif /* _LINUX_IOMMU_DMA_H */