diff mbox series

[rdma-next,1/2] RDMA/mlx4: Use ib_umem_find_best_pgsz() to calculate MTT size

Message ID c39ec6f5d4664c439a72f2961728ebb5895a9f07.1733233299.git.leonro@nvidia.com (mailing list archive)
State New
Headers show
Series [rdma-next,1/2] RDMA/mlx4: Use ib_umem_find_best_pgsz() to calculate MTT size | expand

Commit Message

Leon Romanovsky Dec. 3, 2024, 1:42 p.m. UTC
From: Leon Romanovsky <leonro@nvidia.com>

Convert mlx4 to use ib_umem_find_best_pgsz() instead of open-coded
variant to calculate MTT size.

Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
 drivers/infiniband/hw/mlx4/cq.c      |   6 +-
 drivers/infiniband/hw/mlx4/mlx4_ib.h |  18 ++-
 drivers/infiniband/hw/mlx4/mr.c      | 167 +--------------------------
 drivers/infiniband/hw/mlx4/qp.c      |  12 +-
 4 files changed, 35 insertions(+), 168 deletions(-)
diff mbox series

Patch

diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index aa9ea6ba26e5..c592374f4a58 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -150,8 +150,12 @@  static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev,
 		return PTR_ERR(*umem);
 
 	shift = mlx4_ib_umem_calc_optimal_mtt_size(*umem, 0, &n);
-	err = mlx4_mtt_init(dev->dev, n, shift, &buf->mtt);
+	if (shift < 0) {
+		err = shift;
+		goto err_buf;
+	}
 
+	err = mlx4_mtt_init(dev->dev, n, shift, &buf->mtt);
 	if (err)
 		goto err_buf;
 
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index b52bceff7d97..f53b1846594c 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -667,6 +667,9 @@  struct mlx4_uverbs_ex_query_device {
 	__u32 reserved;
 };
 
+/* 4k - 4G */
+#define MLX4_PAGE_SIZE_SUPPORTED	((unsigned long)GENMASK_ULL(31, 12))
+
 static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
 {
 	return container_of(ibdev, struct mlx4_ib_dev, ib_dev);
@@ -936,8 +939,19 @@  mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table)
 {
 	return 0;
 }
-int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
-				       int *num_of_mtts);
+static inline int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem,
+						     u64 start,
+						     int *num_of_mtts)
+{
+	unsigned long pg_sz;
+
+	pg_sz = ib_umem_find_best_pgsz(umem, MLX4_PAGE_SIZE_SUPPORTED, start);
+	if (!pg_sz)
+		return -EOPNOTSUPP;
+
+	*num_of_mtts = ib_umem_num_dma_blocks(umem, pg_sz);
+	return order_base_2(pg_sz);
+}
 
 int mlx4_ib_cm_init(void);
 void mlx4_ib_cm_destroy(void);
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index a40bf58bcdd3..819c98562e6a 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -87,10 +87,6 @@  struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc)
 	return ERR_PTR(err);
 }
 
-enum {
-	MLX4_MAX_MTT_SHIFT = 31
-};
-
 static int mlx4_ib_umem_write_mtt_block(struct mlx4_ib_dev *dev,
 					struct mlx4_mtt *mtt,
 					u64 mtt_size, u64 mtt_shift, u64 len,
@@ -144,41 +140,6 @@  static int mlx4_ib_umem_write_mtt_block(struct mlx4_ib_dev *dev,
 	return 0;
 }
 
-static inline u64 alignment_of(u64 ptr)
-{
-	return ilog2(ptr & (~(ptr - 1)));
-}
-
-static int mlx4_ib_umem_calc_block_mtt(u64 next_block_start,
-				       u64 current_block_end,
-				       u64 block_shift)
-{
-	/* Check whether the alignment of the new block is aligned as well as
-	 * the previous block.
-	 * Block address must start with zeros till size of entity_size.
-	 */
-	if ((next_block_start & ((1ULL << block_shift) - 1ULL)) != 0)
-		/*
-		 * It is not as well aligned as the previous block-reduce the
-		 * mtt size accordingly. Here we take the last right bit which
-		 * is 1.
-		 */
-		block_shift = alignment_of(next_block_start);
-
-	/*
-	 * Check whether the alignment of the end of previous block - is it
-	 * aligned as well as the start of the block
-	 */
-	if (((current_block_end) & ((1ULL << block_shift) - 1ULL)) != 0)
-		/*
-		 * It is not as well aligned as the start of the block -
-		 * reduce the mtt size accordingly.
-		 */
-		block_shift = alignment_of(current_block_end);
-
-	return block_shift;
-}
-
 int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
 			   struct ib_umem *umem)
 {
@@ -245,130 +206,6 @@  int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
 	return err;
 }
 
-/*
- * Calculate optimal mtt size based on contiguous pages.
- * Function will return also the number of pages that are not aligned to the
- * calculated mtt_size to be added to total number of pages. For that we should
- * check the first chunk length & last chunk length and if not aligned to
- * mtt_size we should increment the non_aligned_pages number. All chunks in the
- * middle already handled as part of mtt shift calculation for both their start
- * & end addresses.
- */
-int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
-				       int *num_of_mtts)
-{
-	u64 block_shift = MLX4_MAX_MTT_SHIFT;
-	u64 min_shift = PAGE_SHIFT;
-	u64 last_block_aligned_end = 0;
-	u64 current_block_start = 0;
-	u64 first_block_start = 0;
-	u64 current_block_len = 0;
-	u64 last_block_end = 0;
-	struct scatterlist *sg;
-	u64 current_block_end;
-	u64 misalignment_bits;
-	u64 next_block_start;
-	u64 total_len = 0;
-	int i;
-
-	*num_of_mtts = ib_umem_num_dma_blocks(umem, PAGE_SIZE);
-
-	for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) {
-		/*
-		 * Initialization - save the first chunk start as the
-		 * current_block_start - block means contiguous pages.
-		 */
-		if (current_block_len == 0 && current_block_start == 0) {
-			current_block_start = sg_dma_address(sg);
-			first_block_start = current_block_start;
-			/*
-			 * Find the bits that are different between the physical
-			 * address and the virtual address for the start of the
-			 * MR.
-			 * umem_get aligned the start_va to a page boundary.
-			 * Therefore, we need to align the start va to the same
-			 * boundary.
-			 * misalignment_bits is needed to handle the  case of a
-			 * single memory region. In this case, the rest of the
-			 * logic will not reduce the block size.  If we use a
-			 * block size which is bigger than the alignment of the
-			 * misalignment bits, we might use the virtual page
-			 * number instead of the physical page number, resulting
-			 * in access to the wrong data.
-			 */
-			misalignment_bits =
-				(start_va & (~(((u64)(PAGE_SIZE)) - 1ULL))) ^
-				current_block_start;
-			block_shift = min(alignment_of(misalignment_bits),
-					  block_shift);
-		}
-
-		/*
-		 * Go over the scatter entries and check if they continue the
-		 * previous scatter entry.
-		 */
-		next_block_start = sg_dma_address(sg);
-		current_block_end = current_block_start	+ current_block_len;
-		/* If we have a split (non-contig.) between two blocks */
-		if (current_block_end != next_block_start) {
-			block_shift = mlx4_ib_umem_calc_block_mtt
-					(next_block_start,
-					 current_block_end,
-					 block_shift);
-
-			/*
-			 * If we reached the minimum shift for 4k page we stop
-			 * the loop.
-			 */
-			if (block_shift <= min_shift)
-				goto end;
-
-			/*
-			 * If not saved yet we are in first block - we save the
-			 * length of first block to calculate the
-			 * non_aligned_pages number at the end.
-			 */
-			total_len += current_block_len;
-
-			/* Start a new block */
-			current_block_start = next_block_start;
-			current_block_len = sg_dma_len(sg);
-			continue;
-		}
-		/* The scatter entry is another part of the current block,
-		 * increase the block size.
-		 * An entry in the scatter can be larger than 4k (page) as of
-		 * dma mapping which merge some blocks together.
-		 */
-		current_block_len += sg_dma_len(sg);
-	}
-
-	/* Account for the last block in the total len */
-	total_len += current_block_len;
-	/* Add to the first block the misalignment that it suffers from. */
-	total_len += (first_block_start & ((1ULL << block_shift) - 1ULL));
-	last_block_end = current_block_start + current_block_len;
-	last_block_aligned_end = round_up(last_block_end, 1ULL << block_shift);
-	total_len += (last_block_aligned_end - last_block_end);
-
-	if (total_len & ((1ULL << block_shift) - 1ULL))
-		pr_warn("misaligned total length detected (%llu, %llu)!",
-			total_len, block_shift);
-
-	*num_of_mtts = total_len >> block_shift;
-end:
-	if (block_shift < min_shift) {
-		/*
-		 * If shift is less than the min we set a warning and return the
-		 * min shift.
-		 */
-		pr_warn("umem_calc_optimal_mtt_size - unexpected shift %lld\n", block_shift);
-
-		block_shift = min_shift;
-	}
-	return block_shift;
-}
-
 static struct ib_umem *mlx4_get_umem_mr(struct ib_device *device, u64 start,
 					u64 length, int access_flags)
 {
@@ -424,6 +261,10 @@  struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 	}
 
 	shift = mlx4_ib_umem_calc_optimal_mtt_size(mr->umem, start, &n);
+	if (shift < 0) {
+		err = shift;
+		goto err_umem;
+	}
 
 	err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length,
 			    convert_access(access_flags), n, shift, &mr->mmr);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 9d08aa99f3cb..50fd407103c7 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -925,8 +925,12 @@  static int create_rq(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
 	}
 
 	shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
-	err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
+	if (shift < 0) {
+		err = shift;
+		goto err_buf;
+	}
 
+	err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
 	if (err)
 		goto err_buf;
 
@@ -1108,8 +1112,12 @@  static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
 		}
 
 		shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
-		err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
+		if (shift < 0) {
+			err = shift;
+			goto err_buf;
+		}
 
+		err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
 		if (err)
 			goto err_buf;