diff mbox series

[RFC,v1,04/18] dma-mapping: implement link range API

Message ID 8944a1211b243fed1234a56bc8004a11dbf85a87.1719909395.git.leon@kernel.org (mailing list archive)
State Superseded
Delegated to: Bjorn Helgaas
Headers show
Series Provide a new two step DMA API mapping API | expand

Commit Message

Leon Romanovsky July 2, 2024, 9:09 a.m. UTC
From: Leon Romanovsky <leonro@nvidia.com>

Introduce new DMA APIs to perform DMA linkage of buffers
in layers higher than DMA.

In proposed API, the callers will perform the following steps:
dma_alloc_iova()
if (dma_can_use_iova(...))
  dma_start_range(...)
  for (page in range)
     dma_link_range(...)
  dma_end_range(...)
else
  /* Fallback to legacy map pages */
  dma_map_page(...)

Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
 include/linux/dma-map-ops.h |  6 +++
 include/linux/dma-mapping.h | 22 +++++++++++
 kernel/dma/mapping.c        | 78 ++++++++++++++++++++++++++++++++++++-
 3 files changed, 105 insertions(+), 1 deletion(-)
diff mbox series

Patch

diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h
index b52e9c8db241..4868586b015e 100644
--- a/include/linux/dma-map-ops.h
+++ b/include/linux/dma-map-ops.h
@@ -90,6 +90,12 @@  struct dma_map_ops {
 
 	dma_addr_t (*alloc_iova)(struct device *dev, size_t size);
 	void (*free_iova)(struct device *dev, dma_addr_t dma_addr, size_t size);
+	int (*link_range)(struct dma_iova_state *state, phys_addr_t phys,
+			  dma_addr_t addr, size_t size);
+	void (*unlink_range)(struct dma_iova_state *state,
+			     dma_addr_t dma_handle, size_t size);
+	int (*start_range)(struct dma_iova_state *state);
+	void (*end_range)(struct dma_iova_state *state);
 };
 
 #ifdef CONFIG_DMA_OPS
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 9d1e020869a6..c530095ff232 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -11,6 +11,7 @@ 
 #include <linux/scatterlist.h>
 #include <linux/bug.h>
 #include <linux/mem_encrypt.h>
+#include <linux/iommu.h>
 
 /**
  * List of possible attributes associated with a DMA mapping. The semantics
@@ -103,6 +104,8 @@  struct dma_iova_attrs {
 struct dma_iova_state {
 	struct dma_iova_attrs *iova;
 	struct dma_memory_type *type;
+	struct iommu_domain *domain;
+	size_t range_size;
 };
 
 #ifdef CONFIG_DMA_API_DEBUG
@@ -184,6 +187,10 @@  int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
 
 void dma_get_memory_type(struct page *page, struct dma_memory_type *type);
 bool dma_can_use_iova(struct dma_iova_state *state, size_t size);
+int dma_start_range(struct dma_iova_state *state);
+void dma_end_range(struct dma_iova_state *state);
+int dma_link_range(struct dma_iova_state *state, phys_addr_t phys, size_t size);
+void dma_unlink_range(struct dma_iova_state *state);
 #else /* CONFIG_HAS_DMA */
 static inline int dma_alloc_iova(struct dma_iova_attrs *iova)
 {
@@ -329,6 +336,21 @@  static inline bool dma_can_use_iova(struct dma_iova_state *state, size_t size)
 {
 	return false;
 }
+static inline int dma_start_range(struct dma_iova_state *state)
+{
+	return -EOPNOTSUPP;
+}
+static inline void dma_end_range(struct dma_iova_state *state)
+{
+}
+static inline int dma_link_range(struct dma_iova_state *state, phys_addr_t phys,
+				 size_t size)
+{
+	return -EOPNOTSUPP;
+}
+static inline void dma_unlink_range(struct dma_iova_state *state)
+{
+}
 #endif /* CONFIG_HAS_DMA */
 
 #if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC)
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index 9044ee525fdb..089b4a977bab 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -989,7 +989,8 @@  bool dma_can_use_iova(struct dma_iova_state *state, size_t size)
 	    dev_use_swiotlb(dev, size, state->iova->dir))
 		return false;
 
-	if (dma_map_direct(dev, ops) || !ops->alloc_iova)
+	if (dma_map_direct(dev, ops) || !ops->alloc_iova || !ops->link_range ||
+	    !ops->start_range)
 		return false;
 
 	if (type->type == DMA_MEMORY_TYPE_P2P) {
@@ -1000,3 +1001,78 @@  bool dma_can_use_iova(struct dma_iova_state *state, size_t size)
 	return type->type == DMA_MEMORY_TYPE_NORMAL;
 }
 EXPORT_SYMBOL_GPL(dma_can_use_iova);
+
+/**
+ * dma_start_range - Start a range of IOVA space
+ * @state: IOVA state
+ *
+ * Start a range of IOVA space for the given IOVA state.
+ */
+int dma_start_range(struct dma_iova_state *state)
+{
+	struct device *dev = state->iova->dev;
+	const struct dma_map_ops *ops = get_dma_ops(dev);
+
+	if (!ops->start_range)
+		return 0;
+
+	return ops->start_range(state);
+}
+EXPORT_SYMBOL_GPL(dma_start_range);
+
+/**
+ * dma_end_range - End a range of IOVA space
+ * @state: IOVA state
+ *
+ * End a range of IOVA space for the given IOVA state.
+ */
+void dma_end_range(struct dma_iova_state *state)
+{
+	struct device *dev = state->iova->dev;
+	const struct dma_map_ops *ops = get_dma_ops(dev);
+
+	if (!ops->end_range)
+		return;
+
+	ops->end_range(state);
+}
+EXPORT_SYMBOL_GPL(dma_end_range);
+
+/**
+ * dma_link_range - Link a range of IOVA space
+ * @state: IOVA state
+ * @phys: physical address to link
+ * @size: size of the buffer
+ *
+ * Link a range of IOVA space for the given IOVA state.
+ */
+int dma_link_range(struct dma_iova_state *state, phys_addr_t phys, size_t size)
+{
+	struct device *dev = state->iova->dev;
+	dma_addr_t addr = state->iova->addr + state->range_size;
+	const struct dma_map_ops *ops = get_dma_ops(dev);
+	int ret;
+
+	ret = ops->link_range(state, phys, addr, size);
+	if (ret)
+		return ret;
+
+	state->range_size += size;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(dma_link_range);
+
+/**
+ * dma_unlink_range - Unlink a range of IOVA space
+ * @state: IOVA state
+ *
+ * Unlink a range of IOVA space for the given IOVA state.
+ */
+void dma_unlink_range(struct dma_iova_state *state)
+{
+	struct device *dev = state->iova->dev;
+	const struct dma_map_ops *ops = get_dma_ops(dev);
+
+	ops->unlink_range(state, state->iova->addr, state->range_size);
+}
+EXPORT_SYMBOL_GPL(dma_unlink_range);