diff mbox series

[net-next,06/15] net/mlx5e: XDP, Add support for multi-buffer XDP redirect-in

Message ID 20230417121903.46218-7-tariqt@nvidia.com (mailing list archive)
State Accepted
Delegated to: Netdev Maintainers
Headers show
Series net/mlx5e: Extend XDP multi-buffer capabilities | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net-next
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 18 this patch: 18
netdev/cc_maintainers warning 9 maintainers not CCed: leon@kernel.org dtatulea@nvidia.com linux-rdma@vger.kernel.org daniel@iogearbox.net john.fastabend@gmail.com bpf@vger.kernel.org maxtram95@gmail.com hawk@kernel.org ast@kernel.org
netdev/build_clang success Errors and warnings before: 18 this patch: 18
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 18 this patch: 18
netdev/checkpatch warning WARNING: line length of 100 exceeds 80 columns WARNING: line length of 87 exceeds 80 columns WARNING: line length of 89 exceeds 80 columns WARNING: line length of 93 exceeds 80 columns WARNING: line length of 96 exceeds 80 columns WARNING: line length of 97 exceeds 80 columns
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Tariq Toukan April 17, 2023, 12:18 p.m. UTC
Handle multi-buffer XDP redirect-in requests coming through
mlx5e_xdp_xmit.

Extend struct mlx5e_xmit_data_frags with an additional dma_arr field, to
point to the fragments dma mapping, as they cannot be retrieved via the
page_pool_get_dma_addr() function.

Push a dma_addr xdpi instance per each fragment, and use them in the
completion flow to dma_unmap the frags.

Finally, remove the restriction in mlx5e_open_xdpsq, and set the flag in
xdp_features.

Reviewed-by: Saeed Mahameed <saeedm@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
---
 .../net/ethernet/mellanox/mlx5/core/en/txrx.h |  1 +
 .../net/ethernet/mellanox/mlx5/core/en/xdp.c  | 82 ++++++++++++++++---
 .../net/ethernet/mellanox/mlx5/core/en_main.c |  9 +-
 3 files changed, 75 insertions(+), 17 deletions(-)
diff mbox series

Patch

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index 1302f52db883..47381e949f1f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -87,6 +87,7 @@  struct mlx5e_xmit_data {
 struct mlx5e_xmit_data_frags {
 	struct mlx5e_xmit_data xd;
 	struct skb_shared_info *sinfo;
+	dma_addr_t *dma_arr;
 };
 
 netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 5dab9012dc2a..c266d073e2f2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -126,6 +126,7 @@  mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
 
 	if (xdptxd->has_frags) {
 		xdptxdf.sinfo = xdp_get_shared_info_from_frame(xdpf);
+		xdptxdf.dma_arr = NULL;
 
 		for (i = 0; i < xdptxdf.sinfo->nr_frags; i++) {
 			skb_frag_t *frag = &xdptxdf.sinfo->frags[i];
@@ -548,7 +549,8 @@  mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
 			skb_frag_t *frag = &xdptxdf->sinfo->frags[i];
 			dma_addr_t addr;
 
-			addr = page_pool_get_dma_addr(skb_frag_page(frag)) +
+			addr = xdptxdf->dma_arr ? xdptxdf->dma_arr[i] :
+				page_pool_get_dma_addr(skb_frag_page(frag)) +
 				skb_frag_off(frag);
 
 			dseg++;
@@ -601,6 +603,21 @@  static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
 
 			dma_unmap_single(sq->pdev, dma_addr,
 					 xdpf->len, DMA_TO_DEVICE);
+			if (xdp_frame_has_frags(xdpf)) {
+				struct skb_shared_info *sinfo;
+				int j;
+
+				sinfo = xdp_get_shared_info_from_frame(xdpf);
+				for (j = 0; j < sinfo->nr_frags; j++) {
+					skb_frag_t *frag = &sinfo->frags[j];
+
+					xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo);
+					dma_addr = xdpi.frame.dma_addr;
+
+					dma_unmap_single(sq->pdev, dma_addr,
+							 skb_frag_size(frag), DMA_TO_DEVICE);
+				}
+			}
 			xdp_return_frame_bulk(xdpf, bq);
 			break;
 		}
@@ -759,23 +776,57 @@  int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
 	sq = &priv->channels.c[sq_num]->xdpsq;
 
 	for (i = 0; i < n; i++) {
+		struct mlx5e_xmit_data_frags xdptxdf = {};
 		struct xdp_frame *xdpf = frames[i];
-		struct mlx5e_xmit_data xdptxd = {};
+		dma_addr_t dma_arr[MAX_SKB_FRAGS];
+		struct mlx5e_xmit_data *xdptxd;
 		bool ret;
 
-		xdptxd.data = xdpf->data;
-		xdptxd.len = xdpf->len;
-		xdptxd.dma_addr = dma_map_single(sq->pdev, xdptxd.data,
-						 xdptxd.len, DMA_TO_DEVICE);
+		xdptxd = &xdptxdf.xd;
+		xdptxd->data = xdpf->data;
+		xdptxd->len = xdpf->len;
+		xdptxd->has_frags = xdp_frame_has_frags(xdpf);
+		xdptxd->dma_addr = dma_map_single(sq->pdev, xdptxd->data,
+						  xdptxd->len, DMA_TO_DEVICE);
 
-		if (unlikely(dma_mapping_error(sq->pdev, xdptxd.dma_addr)))
+		if (unlikely(dma_mapping_error(sq->pdev, xdptxd->dma_addr)))
 			break;
 
+		if (xdptxd->has_frags) {
+			int j;
+
+			xdptxdf.sinfo = xdp_get_shared_info_from_frame(xdpf);
+			xdptxdf.dma_arr = dma_arr;
+			for (j = 0; j < xdptxdf.sinfo->nr_frags; j++) {
+				skb_frag_t *frag = &xdptxdf.sinfo->frags[j];
+
+				dma_arr[j] = dma_map_single(sq->pdev, skb_frag_address(frag),
+							    skb_frag_size(frag), DMA_TO_DEVICE);
+
+				if (!dma_mapping_error(sq->pdev, dma_arr[j]))
+					continue;
+				/* mapping error */
+				while (--j >= 0)
+					dma_unmap_single(sq->pdev, dma_arr[j],
+							 skb_frag_size(&xdptxdf.sinfo->frags[j]),
+							 DMA_TO_DEVICE);
+				goto out;
+			}
+		}
+
 		ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
-				      mlx5e_xmit_xdp_frame, sq, &xdptxd, 0);
+				      mlx5e_xmit_xdp_frame, sq, xdptxd, 0);
 		if (unlikely(!ret)) {
-			dma_unmap_single(sq->pdev, xdptxd.dma_addr,
-					 xdptxd.len, DMA_TO_DEVICE);
+			int j;
+
+			dma_unmap_single(sq->pdev, xdptxd->dma_addr,
+					 xdptxd->len, DMA_TO_DEVICE);
+			if (!xdptxd->has_frags)
+				break;
+			for (j = 0; j < xdptxdf.sinfo->nr_frags; j++)
+				dma_unmap_single(sq->pdev, dma_arr[j],
+						 skb_frag_size(&xdptxdf.sinfo->frags[j]),
+						 DMA_TO_DEVICE);
 			break;
 		}
 
@@ -785,10 +836,19 @@  int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
 		mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
 				     (union mlx5e_xdp_info) { .frame.xdpf = xdpf });
 		mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
-				     (union mlx5e_xdp_info) { .frame.dma_addr = xdptxd.dma_addr });
+				     (union mlx5e_xdp_info) { .frame.dma_addr = xdptxd->dma_addr });
+		if (xdptxd->has_frags) {
+			int j;
+
+			for (j = 0; j < xdptxdf.sinfo->nr_frags; j++)
+				mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
+						     (union mlx5e_xdp_info)
+						     { .frame.dma_addr = dma_arr[j] });
+		}
 		nxmit++;
 	}
 
+out:
 	if (flags & XDP_XMIT_FLUSH) {
 		if (sq->mpwqe.wqe)
 			mlx5e_xdp_mpwqe_complete(sq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 0b5aafaefe4c..ccf7bb136f50 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1862,11 +1862,7 @@  int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
 	csp.min_inline_mode = sq->min_inline_mode;
 	set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
 
-	/* Don't enable multi buffer on XDP_REDIRECT SQ, as it's not yet
-	 * supported by upstream, and there is no defined trigger to allow
-	 * transmitting redirected multi-buffer frames.
-	 */
-	if (param->is_xdp_mb && !is_redirect)
+	if (param->is_xdp_mb)
 		set_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state);
 
 	err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
@@ -4068,7 +4064,8 @@  void mlx5e_set_xdp_feature(struct net_device *netdev)
 
 	val = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
 	      NETDEV_XDP_ACT_XSK_ZEROCOPY |
-	      NETDEV_XDP_ACT_NDO_XMIT;
+	      NETDEV_XDP_ACT_NDO_XMIT |
+	      NETDEV_XDP_ACT_NDO_XMIT_SG;
 	if (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC)
 		val |= NETDEV_XDP_ACT_RX_SG;
 	xdp_set_features_flag(netdev, val);