diff mbox series

[net-next,05/15] net/mlx5e: XDP, Use multiple single-entry objects in xdpi_fifo

Message ID 20230417121903.46218-6-tariqt@nvidia.com (mailing list archive)
State Accepted
Delegated to: Netdev Maintainers
Headers show
Series net/mlx5e: Extend XDP multi-buffer capabilities | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net-next
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 18 this patch: 18
netdev/cc_maintainers warning 8 maintainers not CCed: leon@kernel.org linux-rdma@vger.kernel.org daniel@iogearbox.net john.fastabend@gmail.com bpf@vger.kernel.org maxtram95@gmail.com hawk@kernel.org ast@kernel.org
netdev/build_clang success Errors and warnings before: 18 this patch: 18
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 18 this patch: 18
netdev/checkpatch warning WARNING: line length of 82 exceeds 80 columns WARNING: line length of 83 exceeds 80 columns WARNING: line length of 84 exceeds 80 columns WARNING: line length of 87 exceeds 80 columns WARNING: line length of 90 exceeds 80 columns WARNING: line length of 91 exceeds 80 columns WARNING: line length of 92 exceeds 80 columns WARNING: line length of 95 exceeds 80 columns WARNING: line length of 99 exceeds 80 columns
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Tariq Toukan April 17, 2023, 12:18 p.m. UTC
Here we fix the current wi->num_pkts abuse, as it was used to indicate
multiple xdpi entries in the xdpi_fifo.

Instead, reduce mlx5e_xdp_info to the size of a single field, making it
a union of unions. Per packet, use as many instances as needed to
provide the information needed at the time of completion.

The sequence of xdpi instances pushed is well defined, derived by the
xmit_mode.

Reviewed-by: Saeed Mahameed <saeedm@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
---
 drivers/net/ethernet/mellanox/mlx5/core/en.h  |  2 +-
 .../net/ethernet/mellanox/mlx5/core/en/xdp.c  | 95 +++++++++++++------
 .../net/ethernet/mellanox/mlx5/core/en/xdp.h  | 38 +++++---
 .../ethernet/mellanox/mlx5/core/en/xsk/tx.c   |  8 +-
 .../net/ethernet/mellanox/mlx5/core/en_main.c |  8 +-
 5 files changed, 101 insertions(+), 50 deletions(-)
diff mbox series

Patch

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 386f5a498e52..0e15afbe1673 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -476,7 +476,7 @@  struct mlx5e_txqsq {
 } ____cacheline_aligned_in_smp;
 
 struct mlx5e_xdp_info_fifo {
-	struct mlx5e_xdp_info *xi;
+	union mlx5e_xdp_info *xi;
 	u32 *cc;
 	u32 *pc;
 	u32 mask;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index dcae2d4e2c03..5dab9012dc2a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -63,7 +63,6 @@  mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
 	struct page *page = virt_to_page(xdp->data);
 	struct mlx5e_xmit_data_frags xdptxdf = {};
 	struct mlx5e_xmit_data *xdptxd;
-	struct mlx5e_xdp_info xdpi;
 	struct xdp_frame *xdpf;
 	dma_addr_t dma_addr;
 	int i;
@@ -90,8 +89,6 @@  mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
 		 */
 		__set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */
 
-		xdpi.mode = MLX5E_XDP_XMIT_MODE_FRAME;
-
 		if (unlikely(xdptxd->has_frags))
 			return false;
 
@@ -103,14 +100,18 @@  mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
 		}
 
 		xdptxd->dma_addr = dma_addr;
-		xdpi.frame.xdpf     = xdpf;
-		xdpi.frame.dma_addr = dma_addr;
 
 		if (unlikely(!INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
 					      mlx5e_xmit_xdp_frame, sq, xdptxd, 0)))
 			return false;
 
-		mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
+		/* xmit_mode == MLX5E_XDP_XMIT_MODE_FRAME */
+		mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
+				     (union mlx5e_xdp_info) { .mode = MLX5E_XDP_XMIT_MODE_FRAME });
+		mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
+				     (union mlx5e_xdp_info) { .frame.xdpf = xdpf });
+		mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
+				     (union mlx5e_xdp_info) { .frame.dma_addr = dma_addr });
 		return true;
 	}
 
@@ -120,9 +121,6 @@  mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
 	 * mode.
 	 */
 
-	xdpi.mode = MLX5E_XDP_XMIT_MODE_PAGE;
-	xdpi.page.rq = rq;
-
 	dma_addr = page_pool_get_dma_addr(page) + (xdpf->data - (void *)xdpf);
 	dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd->len, DMA_BIDIRECTIONAL);
 
@@ -148,16 +146,28 @@  mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
 				      mlx5e_xmit_xdp_frame, sq, xdptxd, 0)))
 		return false;
 
-	xdpi.page.page = page;
-	mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
+	/* xmit_mode == MLX5E_XDP_XMIT_MODE_PAGE */
+	mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
+			     (union mlx5e_xdp_info) { .mode = MLX5E_XDP_XMIT_MODE_PAGE });
 
 	if (xdptxd->has_frags) {
+		mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
+				     (union mlx5e_xdp_info)
+				     { .page.num = 1 + xdptxdf.sinfo->nr_frags });
+		mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
+				     (union mlx5e_xdp_info) { .page.page = page });
 		for (i = 0; i < xdptxdf.sinfo->nr_frags; i++) {
 			skb_frag_t *frag = &xdptxdf.sinfo->frags[i];
 
-			xdpi.page.page = skb_frag_page(frag);
-			mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
+			mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
+					     (union mlx5e_xdp_info)
+					     { .page.page = skb_frag_page(frag) });
 		}
+	} else {
+		mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
+				     (union mlx5e_xdp_info) { .page.num = 1 });
+		mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
+				     (union mlx5e_xdp_info) { .page.page = page });
 	}
 
 	return true;
@@ -526,7 +536,6 @@  mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
 	cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
 
 	if (test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state)) {
-		u8 num_pkts = 1 + num_frags;
 		int i;
 
 		memset(&cseg->trailer, 0, sizeof(cseg->trailer));
@@ -552,7 +561,7 @@  mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
 
 		sq->db.wqe_info[pi] = (struct mlx5e_xdp_wqe_info) {
 			.num_wqebbs = num_wqebbs,
-			.num_pkts = num_pkts,
+			.num_pkts = 1,
 		};
 
 		sq->pc += num_wqebbs;
@@ -577,20 +586,46 @@  static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
 	u16 i;
 
 	for (i = 0; i < wi->num_pkts; i++) {
-		struct mlx5e_xdp_info xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo);
+		union mlx5e_xdp_info xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo);
 
 		switch (xdpi.mode) {
-		case MLX5E_XDP_XMIT_MODE_FRAME:
+		case MLX5E_XDP_XMIT_MODE_FRAME: {
 			/* XDP_TX from the XSK RQ and XDP_REDIRECT */
-			dma_unmap_single(sq->pdev, xdpi.frame.dma_addr,
-					 xdpi.frame.xdpf->len, DMA_TO_DEVICE);
-			xdp_return_frame_bulk(xdpi.frame.xdpf, bq);
+			struct xdp_frame *xdpf;
+			dma_addr_t dma_addr;
+
+			xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo);
+			xdpf = xdpi.frame.xdpf;
+			xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo);
+			dma_addr = xdpi.frame.dma_addr;
+
+			dma_unmap_single(sq->pdev, dma_addr,
+					 xdpf->len, DMA_TO_DEVICE);
+			xdp_return_frame_bulk(xdpf, bq);
 			break;
-		case MLX5E_XDP_XMIT_MODE_PAGE:
+		}
+		case MLX5E_XDP_XMIT_MODE_PAGE: {
 			/* XDP_TX from the regular RQ */
-			page_pool_put_defragged_page(xdpi.page.rq->page_pool,
-						     xdpi.page.page, -1, true);
+			u8 num, n = 0;
+
+			xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo);
+			num = xdpi.page.num;
+
+			do {
+				struct page *page;
+
+				xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo);
+				page = xdpi.page.page;
+
+				/* No need to check ((page->pp_magic & ~0x3UL) == PP_SIGNATURE)
+				 * as we know this is a page_pool page.
+				 */
+				page_pool_put_defragged_page(page->pp,
+							     page, -1, true);
+			} while (++n < num);
+
 			break;
+		}
 		case MLX5E_XDP_XMIT_MODE_XSK:
 			/* AF_XDP send */
 			(*xsk_frames)++;
@@ -726,7 +761,6 @@  int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
 	for (i = 0; i < n; i++) {
 		struct xdp_frame *xdpf = frames[i];
 		struct mlx5e_xmit_data xdptxd = {};
-		struct mlx5e_xdp_info xdpi;
 		bool ret;
 
 		xdptxd.data = xdpf->data;
@@ -737,10 +771,6 @@  int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
 		if (unlikely(dma_mapping_error(sq->pdev, xdptxd.dma_addr)))
 			break;
 
-		xdpi.mode           = MLX5E_XDP_XMIT_MODE_FRAME;
-		xdpi.frame.xdpf     = xdpf;
-		xdpi.frame.dma_addr = xdptxd.dma_addr;
-
 		ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
 				      mlx5e_xmit_xdp_frame, sq, &xdptxd, 0);
 		if (unlikely(!ret)) {
@@ -748,7 +778,14 @@  int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
 					 xdptxd.len, DMA_TO_DEVICE);
 			break;
 		}
-		mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
+
+		/* xmit_mode == MLX5E_XDP_XMIT_MODE_FRAME */
+		mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
+				     (union mlx5e_xdp_info) { .mode = MLX5E_XDP_XMIT_MODE_FRAME });
+		mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
+				     (union mlx5e_xdp_info) { .frame.xdpf = xdpf });
+		mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
+				     (union mlx5e_xdp_info) { .frame.dma_addr = xdptxd.dma_addr });
 		nxmit++;
 	}
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index 8e97c68d11f4..9e8e6184f9e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -71,18 +71,30 @@  enum mlx5e_xdp_xmit_mode {
 	MLX5E_XDP_XMIT_MODE_XSK,
 };
 
-struct mlx5e_xdp_info {
+/* xmit_mode entry is pushed to the fifo per packet, followed by multiple
+ * entries, as follows:
+ *
+ * MLX5E_XDP_XMIT_MODE_FRAME:
+ *    xdpf, dma_addr_1, dma_addr_2, ... , dma_addr_num.
+ *    'num' is derived from xdpf.
+ *
+ * MLX5E_XDP_XMIT_MODE_PAGE:
+ *    num, page_1, page_2, ... , page_num.
+ *
+ * MLX5E_XDP_XMIT_MODE_XSK:
+ *    none.
+ */
+union mlx5e_xdp_info {
 	enum mlx5e_xdp_xmit_mode mode;
 	union {
-		struct {
-			struct xdp_frame *xdpf;
-			dma_addr_t dma_addr;
-		} frame;
-		struct {
-			struct mlx5e_rq *rq;
-			struct page *page;
-		} page;
-	};
+		struct xdp_frame *xdpf;
+		dma_addr_t dma_addr;
+	} frame;
+	union {
+		struct mlx5e_rq *rq;
+		u8 num;
+		struct page *page;
+	} page;
 };
 
 struct mlx5e_xsk_param;
@@ -212,14 +224,14 @@  mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq,
 
 static inline void
 mlx5e_xdpi_fifo_push(struct mlx5e_xdp_info_fifo *fifo,
-		     struct mlx5e_xdp_info *xi)
+		     union mlx5e_xdp_info xi)
 {
 	u32 i = (*fifo->pc)++ & fifo->mask;
 
-	fifo->xi[i] = *xi;
+	fifo->xi[i] = xi;
 }
 
-static inline struct mlx5e_xdp_info
+static inline union mlx5e_xdp_info
 mlx5e_xdpi_fifo_pop(struct mlx5e_xdp_info_fifo *fifo)
 {
 	return fifo->xi[(*fifo->cc)++ & fifo->mask];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
index b370a4daddfd..597f319d4770 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
@@ -44,7 +44,7 @@  int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
  * same.
  */
 static void mlx5e_xsk_tx_post_err(struct mlx5e_xdpsq *sq,
-				  struct mlx5e_xdp_info *xdpi)
+				  union mlx5e_xdp_info *xdpi)
 {
 	u16 pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
 	struct mlx5e_xdp_wqe_info *wi = &sq->db.wqe_info[pi];
@@ -54,14 +54,14 @@  static void mlx5e_xsk_tx_post_err(struct mlx5e_xdpsq *sq,
 	wi->num_pkts = 1;
 
 	nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
-	mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi);
+	mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, *xdpi);
 	sq->doorbell_cseg = &nopwqe->ctrl;
 }
 
 bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
 {
 	struct xsk_buff_pool *pool = sq->xsk_pool;
-	struct mlx5e_xdp_info xdpi;
+	union mlx5e_xdp_info xdpi;
 	bool work_done = true;
 	bool flush = false;
 
@@ -105,7 +105,7 @@  bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
 
 			mlx5e_xsk_tx_post_err(sq, &xdpi);
 		} else {
-			mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
+			mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi);
 		}
 
 		flush = true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index ec72743b64e2..0b5aafaefe4c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1300,17 +1300,19 @@  static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa)
 {
 	struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
 	int wq_sz        = mlx5_wq_cyc_get_size(&sq->wq);
-	int dsegs_per_wq = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
+	int entries = wq_sz * MLX5_SEND_WQEBB_NUM_DS * 2; /* upper bound for maximum num of
+							   * entries of all xmit_modes.
+							   */
 	size_t size;
 
-	size = array_size(sizeof(*xdpi_fifo->xi), dsegs_per_wq);
+	size = array_size(sizeof(*xdpi_fifo->xi), entries);
 	xdpi_fifo->xi = kvzalloc_node(size, GFP_KERNEL, numa);
 	if (!xdpi_fifo->xi)
 		return -ENOMEM;
 
 	xdpi_fifo->pc   = &sq->xdpi_fifo_pc;
 	xdpi_fifo->cc   = &sq->xdpi_fifo_cc;
-	xdpi_fifo->mask = dsegs_per_wq - 1;
+	xdpi_fifo->mask = entries - 1;
 
 	return 0;
 }