diff mbox series

[mlx5-next,v1,3/5] net/mlx5: Manage ICM type of SW encap

Message ID bed5121255918eb132a1334141c76a0594df8143.1701871118.git.leon@kernel.org (mailing list archive)
State Accepted
Headers show
Series Expose c0 and SW encap ICM for RDMA | expand

Commit Message

Leon Romanovsky Dec. 6, 2023, 2:01 p.m. UTC
From: Shun Hao <shunh@nvidia.com>

Support allocate/deallocate the new SW encap ICM type memory.
The new ICM type is used for encap context allocation managed by SW,
instead FW. It can increase encap context maximum number and allocation
speed

Signed-off-by: Shun Hao <shunh@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
 .../net/ethernet/mellanox/mlx5/core/lib/dm.c  | 38 ++++++++++++++++++-
 1 file changed, 37 insertions(+), 1 deletion(-)
diff mbox series

Patch

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
index 9482e51ac82a..7c5516b0a844 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
@@ -13,11 +13,13 @@  struct mlx5_dm {
 	unsigned long *steering_sw_icm_alloc_blocks;
 	unsigned long *header_modify_sw_icm_alloc_blocks;
 	unsigned long *header_modify_pattern_sw_icm_alloc_blocks;
+	unsigned long *header_encap_sw_icm_alloc_blocks;
 };
 
 struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev)
 {
 	u64 header_modify_pattern_icm_blocks = 0;
+	u64 header_sw_encap_icm_blocks = 0;
 	u64 header_modify_icm_blocks = 0;
 	u64 steering_icm_blocks = 0;
 	struct mlx5_dm *dm;
@@ -54,6 +56,17 @@  struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev)
 			goto err_modify_hdr;
 	}
 
+	if (MLX5_CAP_DEV_MEM(dev, log_indirect_encap_sw_icm_size)) {
+		header_sw_encap_icm_blocks =
+			BIT(MLX5_CAP_DEV_MEM(dev, log_indirect_encap_sw_icm_size) -
+			    MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
+
+		dm->header_encap_sw_icm_alloc_blocks =
+			bitmap_zalloc(header_sw_encap_icm_blocks, GFP_KERNEL);
+		if (!dm->header_encap_sw_icm_alloc_blocks)
+			goto err_pattern;
+	}
+
 	support_v2 = MLX5_CAP_FLOWTABLE_NIC_RX(dev, sw_owner_v2) &&
 		     MLX5_CAP_FLOWTABLE_NIC_TX(dev, sw_owner_v2) &&
 		     MLX5_CAP64_DEV_MEM(dev, header_modify_pattern_sw_icm_start_address);
@@ -66,11 +79,14 @@  struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev)
 		dm->header_modify_pattern_sw_icm_alloc_blocks =
 			bitmap_zalloc(header_modify_pattern_icm_blocks, GFP_KERNEL);
 		if (!dm->header_modify_pattern_sw_icm_alloc_blocks)
-			goto err_pattern;
+			goto err_sw_encap;
 	}
 
 	return dm;
 
+err_sw_encap:
+	bitmap_free(dm->header_encap_sw_icm_alloc_blocks);
+
 err_pattern:
 	bitmap_free(dm->header_modify_sw_icm_alloc_blocks);
 
@@ -105,6 +121,14 @@  void mlx5_dm_cleanup(struct mlx5_core_dev *dev)
 		bitmap_free(dm->header_modify_sw_icm_alloc_blocks);
 	}
 
+	if (dm->header_encap_sw_icm_alloc_blocks) {
+		WARN_ON(!bitmap_empty(dm->header_encap_sw_icm_alloc_blocks,
+				      BIT(MLX5_CAP_DEV_MEM(dev,
+							   log_indirect_encap_sw_icm_size) -
+				      MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))));
+		bitmap_free(dm->header_encap_sw_icm_alloc_blocks);
+	}
+
 	if (dm->header_modify_pattern_sw_icm_alloc_blocks) {
 		WARN_ON(!bitmap_empty(dm->header_modify_pattern_sw_icm_alloc_blocks,
 				      BIT(MLX5_CAP_DEV_MEM(dev,
@@ -164,6 +188,13 @@  int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
 						log_header_modify_pattern_sw_icm_size);
 		block_map = dm->header_modify_pattern_sw_icm_alloc_blocks;
 		break;
+	case MLX5_SW_ICM_TYPE_SW_ENCAP:
+		icm_start_addr = MLX5_CAP64_DEV_MEM(dev,
+						    indirect_encap_sw_icm_start_address);
+		log_icm_size = MLX5_CAP_DEV_MEM(dev,
+						log_indirect_encap_sw_icm_size);
+		block_map = dm->header_encap_sw_icm_alloc_blocks;
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -242,6 +273,11 @@  int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type
 						    header_modify_pattern_sw_icm_start_address);
 		block_map = dm->header_modify_pattern_sw_icm_alloc_blocks;
 		break;
+	case MLX5_SW_ICM_TYPE_SW_ENCAP:
+		icm_start_addr = MLX5_CAP64_DEV_MEM(dev,
+						    indirect_encap_sw_icm_start_address);
+		block_map = dm->header_encap_sw_icm_alloc_blocks;
+		break;
 	default:
 		return -EINVAL;
 	}