diff mbox series

[13/14] RDMA/mlx5: Use ib_umem_num_dma_blocks()

Message ID 13-v1-00f59ce24f1f+19f50-umem_1_jgg@nvidia.com (mailing list archive)
State Superseded
Headers show
Series RDMA: Improve use of umem in DMA drivers | expand

Commit Message

Jason Gunthorpe Sept. 2, 2020, 12:43 a.m. UTC
For the calls linked to mlx4_ib_umem_calc_optimal_mtt_size() compute the
number of DMA pages directly based on the returned page_shift. This was
just being used as a weird default if the algorithm hits the lower limit.

All other places are just using it with PAGE_SIZE.

As this is the last call site, remove ib_umem_num_pages().

Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
---
 drivers/infiniband/core/umem.c       | 12 ------------
 drivers/infiniband/hw/mlx4/cq.c      |  8 ++++----
 drivers/infiniband/hw/mlx4/mlx4_ib.h |  3 +--
 drivers/infiniband/hw/mlx4/mr.c      | 14 ++++++--------
 drivers/infiniband/hw/mlx4/qp.c      | 17 +++++++++--------
 drivers/infiniband/hw/mlx4/srq.c     |  5 +++--
 include/rdma/ib_umem.h               |  2 --
 7 files changed, 23 insertions(+), 38 deletions(-)

Comments

Gal Pressman Sept. 2, 2020, 9:07 a.m. UTC | #1
On 02/09/2020 3:43, Jason Gunthorpe wrote:
> For the calls linked to mlx4_ib_umem_calc_optimal_mtt_size() compute the
> number of DMA pages directly based on the returned page_shift. This was
> just being used as a weird default if the algorithm hits the lower limit.
> 
> All other places are just using it with PAGE_SIZE.
> 
> As this is the last call site, remove ib_umem_num_pages().
> 
> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>

The subject line should be fixed to mlx4.
Shiraz Saleem Sept. 3, 2020, 3:14 p.m. UTC | #2
> Subject: [PATCH 13/14] RDMA/mlx5: Use ib_umem_num_dma_blocks()
> 
> For the calls linked to mlx4_ib_umem_calc_optimal_mtt_size() compute the number
> of DMA pages directly based on the returned page_shift. This was just being used
> as a weird default if the algorithm hits the lower limit.
> 
> All other places are just using it with PAGE_SIZE.
> 
> As this is the last call site, remove ib_umem_num_pages().

Typo. remove ib_umem_page_count
diff mbox series

Patch

diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index f02e34cac59581..49d6ddc37b6fde 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -346,18 +346,6 @@  void ib_umem_release(struct ib_umem *umem)
 }
 EXPORT_SYMBOL(ib_umem_release);
 
-int ib_umem_page_count(struct ib_umem *umem)
-{
-	int i, n = 0;
-	struct scatterlist *sg;
-
-	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i)
-		n += sg_dma_len(sg) >> PAGE_SHIFT;
-
-	return n;
-}
-EXPORT_SYMBOL(ib_umem_page_count);
-
 /*
  * Copy from the given ib_umem's pages to the given buffer.
  *
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 8a3436994f8097..3de97f428dc63b 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -142,16 +142,16 @@  static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_udata *udata,
 	int err;
 	int cqe_size = dev->dev->caps.cqe_size;
 	int shift;
-	int n;
 
 	*umem = ib_umem_get(&dev->ib_dev, buf_addr, cqe * cqe_size,
 			    IB_ACCESS_LOCAL_WRITE);
 	if (IS_ERR(*umem))
 		return PTR_ERR(*umem);
 
-	n = ib_umem_page_count(*umem);
-	shift = mlx4_ib_umem_calc_optimal_mtt_size(*umem, 0, &n);
-	err = mlx4_mtt_init(dev->dev, n, shift, &buf->mtt);
+	shift = mlx4_ib_umem_calc_optimal_mtt_size(*umem, 0);
+	err = mlx4_mtt_init(dev->dev,
+			    ib_umem_num_dma_blocks(*umem, 1UL << shift), shift,
+			    &buf->mtt);
 
 	if (err)
 		goto err_buf;
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index bcac8fc5031766..660955a11914e7 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -905,7 +905,6 @@  struct ib_rwq_ind_table
 			      struct ib_rwq_ind_table_init_attr *init_attr,
 			      struct ib_udata *udata);
 int mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
-int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
-				       int *num_of_mtts);
+int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va);
 
 #endif /* MLX4_IB_H */
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 1d5ef0de12c950..19b2d3fbe81e03 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -254,8 +254,7 @@  int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
  * middle already handled as part of mtt shift calculation for both their start
  * & end addresses.
  */
-int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
-				       int *num_of_mtts)
+int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va)
 {
 	u64 block_shift = MLX4_MAX_MTT_SHIFT;
 	u64 min_shift = PAGE_SHIFT;
@@ -353,7 +352,6 @@  int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
 		pr_warn("misaligned total length detected (%llu, %llu)!",
 			total_len, block_shift);
 
-	*num_of_mtts = total_len >> block_shift;
 end:
 	if (block_shift < min_shift) {
 		/*
@@ -409,7 +407,6 @@  struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 	struct mlx4_ib_mr *mr;
 	int shift;
 	int err;
-	int n;
 
 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
 	if (!mr)
@@ -421,11 +418,12 @@  struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 		goto err_free;
 	}
 
-	n = ib_umem_page_count(mr->umem);
-	shift = mlx4_ib_umem_calc_optimal_mtt_size(mr->umem, start, &n);
+	shift = mlx4_ib_umem_calc_optimal_mtt_size(mr->umem, start);
 
 	err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length,
-			    convert_access(access_flags), n, shift, &mr->mmr);
+			    convert_access(access_flags),
+			    ib_umem_num_dma_blocks(mr->umem, 1UL << shift),
+			    shift, &mr->mmr);
 	if (err)
 		goto err_umem;
 
@@ -511,7 +509,7 @@  int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
 			mmr->umem = NULL;
 			goto release_mpt_entry;
 		}
-		n = ib_umem_page_count(mmr->umem);
+		n = ib_umem_num_dma_blocks(mmr->umem, PAGE_SIZE);
 		shift = PAGE_SHIFT;
 
 		err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr,
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 2975f350b9fd10..3113d9ca112771 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -869,7 +869,6 @@  static int create_rq(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
 	struct mlx4_ib_create_wq wq;
 	size_t copy_len;
 	int shift;
-	int n;
 
 	qp->mlx4_ib_qp_type = MLX4_IB_QPT_RAW_PACKET;
 
@@ -922,9 +921,10 @@  static int create_rq(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
 		goto err;
 	}
 
-	n = ib_umem_page_count(qp->umem);
-	shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
-	err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
+	shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0);
+	err = mlx4_mtt_init(dev->dev,
+			    ib_umem_num_dma_blocks(qp->umem, 1UL << shift),
+			    shift, &qp->mtt);
 
 	if (err)
 		goto err_buf;
@@ -1077,7 +1077,6 @@  static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
 		struct mlx4_ib_create_qp ucmd;
 		size_t copy_len;
 		int shift;
-		int n;
 
 		copy_len = sizeof(struct mlx4_ib_create_qp);
 
@@ -1117,9 +1116,11 @@  static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
 			goto err;
 		}
 
-		n = ib_umem_page_count(qp->umem);
-		shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
-		err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
+		shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0);
+		err = mlx4_mtt_init(dev->dev,
+				    ib_umem_num_dma_blocks(qp->umem,
+							   1UL << shift),
+				    shift, &qp->mtt);
 
 		if (err)
 			goto err_buf;
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index 8f9d5035142d33..108b2d0118d064 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -115,8 +115,9 @@  int mlx4_ib_create_srq(struct ib_srq *ib_srq,
 		if (IS_ERR(srq->umem))
 			return PTR_ERR(srq->umem);
 
-		err = mlx4_mtt_init(dev->dev, ib_umem_page_count(srq->umem),
-				    PAGE_SHIFT, &srq->mtt);
+		err = mlx4_mtt_init(
+			dev->dev, ib_umem_num_dma_blocks(srq->umem, PAGE_SIZE),
+			PAGE_SHIFT, &srq->mtt);
 		if (err)
 			goto err_buf;
 
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
index ba3b9be0d8c56a..4bac6e29f030c2 100644
--- a/include/rdma/ib_umem.h
+++ b/include/rdma/ib_umem.h
@@ -73,7 +73,6 @@  static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
 struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
 			    size_t size, int access);
 void ib_umem_release(struct ib_umem *umem);
-int ib_umem_page_count(struct ib_umem *umem);
 int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
 		      size_t length);
 unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
@@ -91,7 +90,6 @@  static inline struct ib_umem *ib_umem_get(struct ib_device *device,
 	return ERR_PTR(-EINVAL);
 }
 static inline void ib_umem_release(struct ib_umem *umem) { }
-static inline int ib_umem_page_count(struct ib_umem *umem) { return 0; }
 static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
 		      		    size_t length) {
 	return -EINVAL;