diff mbox series

[net-next,03/15] net/mlx5e: Introduce extended version for mlx5e_xmit_data

Message ID 20230417121903.46218-4-tariqt@nvidia.com (mailing list archive)
State Accepted
Delegated to: Netdev Maintainers
Headers show
Series net/mlx5e: Extend XDP multi-buffer capabilities | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net-next
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 18 this patch: 18
netdev/cc_maintainers warning 9 maintainers not CCed: leon@kernel.org dtatulea@nvidia.com linux-rdma@vger.kernel.org daniel@iogearbox.net john.fastabend@gmail.com bpf@vger.kernel.org maxtram95@gmail.com hawk@kernel.org ast@kernel.org
netdev/build_clang success Errors and warnings before: 18 this patch: 18
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 18 this patch: 18
netdev/checkpatch warning WARNING: line length of 84 exceeds 80 columns WARNING: line length of 87 exceeds 80 columns
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Tariq Toukan April 17, 2023, 12:18 p.m. UTC
Introduce struct mlx5e_xmit_data_frags to be used for non-linear xmit
buffers. Let it include sinfo pointer.

Take one bit from the len field to indicate if the descriptor has
fragments and can be casted-up into the extended version.

Zero-init to make sure has_frags, and potentially future fields, are
zero when not explicitly assigned.

Another field will be added in a downstream patch to indicate and point
to dma addresses of the different frags, for redirect-in requests.

This simplifies the mlx5e_xmit_xdp_frame/mlx5e_xmit_xdp_frame_mpwqe
functions params.

Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
---
 drivers/net/ethernet/mellanox/mlx5/core/en.h  |  1 -
 .../net/ethernet/mellanox/mlx5/core/en/txrx.h |  8 ++-
 .../net/ethernet/mellanox/mlx5/core/en/xdp.c  | 63 ++++++++++---------
 .../net/ethernet/mellanox/mlx5/core/en/xdp.h  |  2 -
 .../ethernet/mellanox/mlx5/core/en/xsk/tx.c   |  4 +-
 5 files changed, 44 insertions(+), 34 deletions(-)
diff mbox series

Patch

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 479979318c50..386f5a498e52 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -487,7 +487,6 @@  struct mlx5e_xmit_data;
 typedef int (*mlx5e_fp_xmit_xdp_frame_check)(struct mlx5e_xdpsq *);
 typedef bool (*mlx5e_fp_xmit_xdp_frame)(struct mlx5e_xdpsq *,
 					struct mlx5e_xmit_data *,
-					struct skb_shared_info *,
 					int);
 
 struct mlx5e_xdpsq {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index 6f7ebedda279..1302f52db883 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -80,7 +80,13 @@  static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
 struct mlx5e_xmit_data {
 	dma_addr_t  dma_addr;
 	void       *data;
-	u32         len;
+	u32         len : 31;
+	u32         has_frags : 1;
+};
+
+struct mlx5e_xmit_data_frags {
+	struct mlx5e_xmit_data xd;
+	struct skb_shared_info *sinfo;
 };
 
 netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index c8b532cea7d1..3e7ebf0f0f01 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -61,8 +61,8 @@  mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
 		    struct xdp_buff *xdp)
 {
 	struct page *page = virt_to_page(xdp->data);
-	struct skb_shared_info *sinfo = NULL;
-	struct mlx5e_xmit_data xdptxd;
+	struct mlx5e_xmit_data_frags xdptxdf = {};
+	struct mlx5e_xmit_data *xdptxd;
 	struct mlx5e_xdp_info xdpi;
 	struct xdp_frame *xdpf;
 	dma_addr_t dma_addr;
@@ -72,8 +72,10 @@  mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
 	if (unlikely(!xdpf))
 		return false;
 
-	xdptxd.data = xdpf->data;
-	xdptxd.len  = xdpf->len;
+	xdptxd = &xdptxdf.xd;
+	xdptxd->data = xdpf->data;
+	xdptxd->len  = xdpf->len;
+	xdptxd->has_frags = xdp_frame_has_frags(xdpf);
 
 	if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) {
 		/* The xdp_buff was in the UMEM and was copied into a newly
@@ -90,19 +92,22 @@  mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
 
 		xdpi.mode = MLX5E_XDP_XMIT_MODE_FRAME;
 
-		dma_addr = dma_map_single(sq->pdev, xdptxd.data, xdptxd.len,
+		if (unlikely(xdptxd->has_frags))
+			return false;
+
+		dma_addr = dma_map_single(sq->pdev, xdptxd->data, xdptxd->len,
 					  DMA_TO_DEVICE);
 		if (dma_mapping_error(sq->pdev, dma_addr)) {
 			xdp_return_frame(xdpf);
 			return false;
 		}
 
-		xdptxd.dma_addr     = dma_addr;
+		xdptxd->dma_addr = dma_addr;
 		xdpi.frame.xdpf     = xdpf;
 		xdpi.frame.dma_addr = dma_addr;
 
 		if (unlikely(!INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
-					      mlx5e_xmit_xdp_frame, sq, &xdptxd, NULL, 0)))
+					      mlx5e_xmit_xdp_frame, sq, xdptxd, 0)))
 			return false;
 
 		mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
@@ -119,13 +124,13 @@  mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
 	xdpi.page.rq = rq;
 
 	dma_addr = page_pool_get_dma_addr(page) + (xdpf->data - (void *)xdpf);
-	dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len, DMA_BIDIRECTIONAL);
+	dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd->len, DMA_BIDIRECTIONAL);
 
-	if (unlikely(xdp_frame_has_frags(xdpf))) {
-		sinfo = xdp_get_shared_info_from_frame(xdpf);
+	if (unlikely(xdptxd->has_frags)) {
+		xdptxdf.sinfo = xdp_get_shared_info_from_frame(xdpf);
 
-		for (i = 0; i < sinfo->nr_frags; i++) {
-			skb_frag_t *frag = &sinfo->frags[i];
+		for (i = 0; i < xdptxdf.sinfo->nr_frags; i++) {
+			skb_frag_t *frag = &xdptxdf.sinfo->frags[i];
 			dma_addr_t addr;
 			u32 len;
 
@@ -137,18 +142,18 @@  mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
 		}
 	}
 
-	xdptxd.dma_addr = dma_addr;
+	xdptxd->dma_addr = dma_addr;
 
 	if (unlikely(!INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
-				      mlx5e_xmit_xdp_frame, sq, &xdptxd, sinfo, 0)))
+				      mlx5e_xmit_xdp_frame, sq, xdptxd, 0)))
 		return false;
 
 	xdpi.page.page = page;
 	mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
 
-	if (unlikely(xdp_frame_has_frags(xdpf))) {
-		for (i = 0; i < sinfo->nr_frags; i++) {
-			skb_frag_t *frag = &sinfo->frags[i];
+	if (unlikely(xdptxd->has_frags)) {
+		for (i = 0; i < xdptxdf.sinfo->nr_frags; i++) {
+			skb_frag_t *frag = &xdptxdf.sinfo->frags[i];
 
 			xdpi.page.page = skb_frag_page(frag);
 			mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
@@ -381,23 +386,23 @@  INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq
 
 INDIRECT_CALLABLE_SCOPE bool
 mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
-		     struct skb_shared_info *sinfo, int check_result);
+		     int check_result);
 
 INDIRECT_CALLABLE_SCOPE bool
 mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
-			   struct skb_shared_info *sinfo, int check_result)
+			   int check_result)
 {
 	struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
 	struct mlx5e_xdpsq_stats *stats = sq->stats;
 
-	if (unlikely(sinfo)) {
+	if (unlikely(xdptxd->has_frags)) {
 		/* MPWQE is enabled, but a multi-buffer packet is queued for
 		 * transmission. MPWQE can't send fragmented packets, so close
 		 * the current session and fall back to a regular WQE.
 		 */
 		if (unlikely(sq->mpwqe.wqe))
 			mlx5e_xdp_mpwqe_complete(sq);
-		return mlx5e_xmit_xdp_frame(sq, xdptxd, sinfo, 0);
+		return mlx5e_xmit_xdp_frame(sq, xdptxd, 0);
 	}
 
 	if (unlikely(xdptxd->len > sq->hw_mtu)) {
@@ -446,8 +451,10 @@  INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq)
 
 INDIRECT_CALLABLE_SCOPE bool
 mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
-		     struct skb_shared_info *sinfo, int check_result)
+		     int check_result)
 {
+	struct mlx5e_xmit_data_frags *xdptxdf =
+		container_of(xdptxd, struct mlx5e_xmit_data_frags, xd);
 	struct mlx5_wq_cyc       *wq   = &sq->wq;
 	struct mlx5_wqe_ctrl_seg *cseg;
 	struct mlx5_wqe_data_seg *dseg;
@@ -476,9 +483,9 @@  mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
 	if (!check_result) {
 		int stop_room = 1;
 
-		if (unlikely(sinfo)) {
-			ds_cnt += sinfo->nr_frags;
-			num_frags = sinfo->nr_frags;
+		if (unlikely(xdptxd->has_frags)) {
+			ds_cnt += xdptxdf->sinfo->nr_frags;
+			num_frags = xdptxdf->sinfo->nr_frags;
 			num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
 			/* Assuming MLX5_CAP_GEN(mdev, max_wqe_sz_sq) is big
 			 * enough to hold all fragments.
@@ -529,7 +536,7 @@  mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
 		dseg->lkey = sq->mkey_be;
 
 		for (i = 0; i < num_frags; i++) {
-			skb_frag_t *frag = &sinfo->frags[i];
+			skb_frag_t *frag = &xdptxdf->sinfo->frags[i];
 			dma_addr_t addr;
 
 			addr = page_pool_get_dma_addr(skb_frag_page(frag)) +
@@ -718,7 +725,7 @@  int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
 
 	for (i = 0; i < n; i++) {
 		struct xdp_frame *xdpf = frames[i];
-		struct mlx5e_xmit_data xdptxd;
+		struct mlx5e_xmit_data xdptxd = {};
 		struct mlx5e_xdp_info xdpi;
 		bool ret;
 
@@ -735,7 +742,7 @@  int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
 		xdpi.frame.dma_addr = xdptxd.dma_addr;
 
 		ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
-				      mlx5e_xmit_xdp_frame, sq, &xdptxd, NULL, 0);
+				      mlx5e_xmit_xdp_frame, sq, &xdptxd, 0);
 		if (unlikely(!ret)) {
 			dma_unmap_single(sq->pdev, xdptxd.dma_addr,
 					 xdptxd.len, DMA_TO_DEVICE);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index 8208692035f8..8e97c68d11f4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -101,11 +101,9 @@  extern const struct xdp_metadata_ops mlx5e_xdp_metadata_ops;
 
 INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
 							  struct mlx5e_xmit_data *xdptxd,
-							  struct skb_shared_info *sinfo,
 							  int check_result));
 INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq,
 						    struct mlx5e_xmit_data *xdptxd,
-						    struct skb_shared_info *sinfo,
 						    int check_result));
 INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq));
 INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
index 367a9505ca4f..b370a4daddfd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
@@ -61,7 +61,6 @@  static void mlx5e_xsk_tx_post_err(struct mlx5e_xdpsq *sq,
 bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
 {
 	struct xsk_buff_pool *pool = sq->xsk_pool;
-	struct mlx5e_xmit_data xdptxd;
 	struct mlx5e_xdp_info xdpi;
 	bool work_done = true;
 	bool flush = false;
@@ -73,6 +72,7 @@  bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
 						   mlx5e_xmit_xdp_frame_check_mpwqe,
 						   mlx5e_xmit_xdp_frame_check,
 						   sq);
+		struct mlx5e_xmit_data xdptxd = {};
 		struct xdp_desc desc;
 		bool ret;
 
@@ -97,7 +97,7 @@  bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
 		xsk_buff_raw_dma_sync_for_device(pool, xdptxd.dma_addr, xdptxd.len);
 
 		ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
-				      mlx5e_xmit_xdp_frame, sq, &xdptxd, NULL,
+				      mlx5e_xmit_xdp_frame, sq, &xdptxd,
 				      check_result);
 		if (unlikely(!ret)) {
 			if (sq->mpwqe.wqe)