diff mbox series

[04/14] RDMA/umem: Add rdma_umem_for_each_dma_block()

Message ID 4-v1-00f59ce24f1f+19f50-umem_1_jgg@nvidia.com (mailing list archive)
State Superseded
Headers show
Series RDMA: Improve use of umem in DMA drivers | expand

Commit Message

Jason Gunthorpe Sept. 2, 2020, 12:43 a.m. UTC
This helper does the same as rdma_for_each_block(), except it works on a
umem. This simplifies most of the call sites.

Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
---
 .clang-format                              |  1 +
 drivers/infiniband/hw/bnxt_re/ib_verbs.c   |  2 +-
 drivers/infiniband/hw/efa/efa_verbs.c      |  3 +--
 drivers/infiniband/hw/hns/hns_roce_alloc.c |  3 +--
 drivers/infiniband/hw/i40iw/i40iw_verbs.c  |  3 +--
 include/rdma/ib_umem.h                     | 20 ++++++++++++++++++++
 6 files changed, 25 insertions(+), 7 deletions(-)

Comments

Miguel Ojeda Sept. 2, 2020, 3:10 a.m. UTC | #1
On Wed, Sep 2, 2020 at 2:43 AM Jason Gunthorpe <jgg@nvidia.com> wrote:
>
>  .clang-format                              |  1 +

Acked-by: Miguel Ojeda <miguel.ojeda.sandonis@gmail.com>

Cheers,
Miguel
Shiraz Saleem Sept. 3, 2020, 2:12 p.m. UTC | #2
> Subject: [PATCH 04/14] RDMA/umem: Add rdma_umem_for_each_dma_block()
> 
> This helper does the same as rdma_for_each_block(), except it works on a umem.
> This simplifies most of the call sites.
> 
> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
> ---

[...]

> diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
> b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
> index b51339328a51ef..beb611b157bc8d 100644
> --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
> +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
> @@ -1320,8 +1320,7 @@ static void i40iw_copy_user_pgaddrs(struct i40iw_mr
> *iwmr,
>  	if (iwmr->type == IW_MEMREG_TYPE_QP)
>  		iwpbl->qp_mr.sq_page = sg_page(region->sg_head.sgl);
> 
> -	rdma_for_each_block(region->sg_head.sgl, &biter, region->nmap,
> -			    iwmr->page_size) {
> +	rdma_umem_for_each_dma_block(region, &biter, iwmr->page_size) {
>  		*pbl = rdma_block_iter_dma_address(&biter);
>  		pbl = i40iw_next_pbl_addr(pbl, &pinfo, &idx);
>  	}

Acked-by: Shiraz Saleem <shiraz.saleem@intel.com>


[....]
> +static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
> +						struct ib_umem *umem,
> +						unsigned long pgsz)
> +{
> +	__rdma_block_iter_start(biter, umem->sg_head.sgl, umem->nmap, pgsz); }
> +
> +/**
> + * rdma_umem_for_each_dma_block - iterate over contiguous DMA blocks of
> +the umem
> + * @umem: umem to iterate over
> + * @pgsz: Page size to split the list into
> + *
> + * pgsz must be <= PAGE_SIZE or computed by ib_umem_find_best_pgsz().

>= ?
diff mbox series

Patch

diff --git a/.clang-format b/.clang-format
index a0a96088c74f49..311ef2c61a1bdf 100644
--- a/.clang-format
+++ b/.clang-format
@@ -415,6 +415,7 @@  ForEachMacros:
   - 'rbtree_postorder_for_each_entry_safe'
   - 'rdma_for_each_block'
   - 'rdma_for_each_port'
+  - 'rdma_umem_for_each_dma_block'
   - 'resource_list_for_each_entry'
   - 'resource_list_for_each_entry_safe'
   - 'rhl_for_each_entry_rcu'
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 5ee272d27aaade..9e26e651730cb3 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -3783,7 +3783,7 @@  static int fill_umem_pbl_tbl(struct ib_umem *umem, u64 *pbl_tbl_orig,
 	u64 page_size =  BIT_ULL(page_shift);
 	struct ib_block_iter biter;
 
-	rdma_for_each_block(umem->sg_head.sgl, &biter, umem->nmap, page_size)
+	rdma_umem_for_each_dma_block(umem, &biter, page_size)
 		*pbl_tbl++ = rdma_block_iter_dma_address(&biter);
 
 	return pbl_tbl - pbl_tbl_orig;
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index de9a22f0fcc218..d85c63a5021a70 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -1142,8 +1142,7 @@  static int umem_to_page_list(struct efa_dev *dev,
 	ibdev_dbg(&dev->ibdev, "hp_cnt[%u], pages_in_hp[%u]\n",
 		  hp_cnt, pages_in_hp);
 
-	rdma_for_each_block(umem->sg_head.sgl, &biter, umem->nmap,
-			    BIT(hp_shift))
+	rdma_umem_for_each_dma_block(umem, &biter, BIT(hp_shift))
 		page_list[hp_idx++] = rdma_block_iter_dma_address(&biter);
 
 	return 0;
diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c
index a522cb2d29eabc..a6b23dec1adcf6 100644
--- a/drivers/infiniband/hw/hns/hns_roce_alloc.c
+++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
@@ -268,8 +268,7 @@  int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
 	}
 
 	/* convert system page cnt to hw page cnt */
-	rdma_for_each_block(umem->sg_head.sgl, &biter, umem->nmap,
-			    1 << page_shift) {
+	rdma_umem_for_each_dma_block(umem, &biter, 1 << page_shift) {
 		addr = rdma_block_iter_dma_address(&biter);
 		if (idx >= start) {
 			bufs[total++] = addr;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index b51339328a51ef..beb611b157bc8d 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -1320,8 +1320,7 @@  static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr,
 	if (iwmr->type == IW_MEMREG_TYPE_QP)
 		iwpbl->qp_mr.sq_page = sg_page(region->sg_head.sgl);
 
-	rdma_for_each_block(region->sg_head.sgl, &biter, region->nmap,
-			    iwmr->page_size) {
+	rdma_umem_for_each_dma_block(region, &biter, iwmr->page_size) {
 		*pbl = rdma_block_iter_dma_address(&biter);
 		pbl = i40iw_next_pbl_addr(pbl, &pinfo, &idx);
 	}
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
index 07a764eb692eed..b880512ba95f16 100644
--- a/include/rdma/ib_umem.h
+++ b/include/rdma/ib_umem.h
@@ -40,6 +40,26 @@  static inline size_t ib_umem_num_pages(struct ib_umem *umem)
 	       PAGE_SHIFT;
 }
 
+static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
+						struct ib_umem *umem,
+						unsigned long pgsz)
+{
+	__rdma_block_iter_start(biter, umem->sg_head.sgl, umem->nmap, pgsz);
+}
+
+/**
+ * rdma_umem_for_each_dma_block - iterate over contiguous DMA blocks of the umem
+ * @umem: umem to iterate over
+ * @pgsz: Page size to split the list into
+ *
+ * pgsz must be <= PAGE_SIZE or computed by ib_umem_find_best_pgsz(). The
+ * returned DMA blocks will be aligned to pgsz and span the range:
+ * ALIGN_DOWN(umem->address, pgsz) to ALIGN(umem->address + umem->length, pgsz)
+ */
+#define rdma_umem_for_each_dma_block(umem, biter, pgsz)                        \
+	for (__rdma_umem_block_iter_start(biter, umem, pgsz);                  \
+	     __rdma_block_iter_next(biter);)
+
 #ifdef CONFIG_INFINIBAND_USER_MEM
 
 struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,