diff mbox series

[19/20] bnxt: use data pool

Message ID 20231107214045.2172393-20-dw@davidwei.uk (mailing list archive)
State Not Applicable
Delegated to: Netdev Maintainers
Headers show
Series Zero copy Rx using io_uring | expand

Checks

Context Check Description
netdev/tree_selection success Guessing tree name failed - patch did not apply, async

Commit Message

David Wei Nov. 7, 2023, 9:40 p.m. UTC
The BNXT driver is modified to use data pool in order to support ZC Rx.
A setup function bnxt_zc_rx is added that is called on XDP_SETUP_ZC_RX
XDP command which initialises a data_pool in a netdev_rx_queue.

Co-developed-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: David Wei <dw@davidwei.uk>
---
 drivers/net/ethernet/broadcom/bnxt/bnxt.c     | 61 ++++++++++++++++---
 drivers/net/ethernet/broadcom/bnxt/bnxt.h     |  5 ++
 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c |  3 +
 include/net/netdev_rx_queue.h                 |  2 +
 4 files changed, 61 insertions(+), 10 deletions(-)
diff mbox series

Patch

diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index ca1088f7107e..2787c1b474db 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -55,6 +55,8 @@ 
 #include <net/page_pool/helpers.h>
 #include <linux/align.h>
 #include <net/netdev_queues.h>
+#include <net/netdev_rx_queue.h>
+#include <net/data_pool.h>
 
 #include "bnxt_hsi.h"
 #include "bnxt.h"
@@ -798,13 +800,7 @@  static struct page *__bnxt_alloc_rx_64k_page(struct bnxt *bp, dma_addr_t *mappin
 	if (!page)
 		return NULL;
 
-	*mapping = dma_map_page_attrs(&bp->pdev->dev, page, offset,
-				      BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
-				      DMA_ATTR_WEAK_ORDERING);
-	if (dma_mapping_error(&bp->pdev->dev, *mapping)) {
-		page_pool_recycle_direct(rxr->page_pool, page);
-		return NULL;
-	}
+	*mapping = page_pool_get_dma_addr(page);
 
 	if (page_offset)
 		*page_offset = offset;
@@ -824,13 +820,13 @@  static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
 		page = page_pool_dev_alloc_frag(rxr->page_pool, offset,
 						BNXT_RX_PAGE_SIZE);
 	} else {
-		page = page_pool_dev_alloc_pages(rxr->page_pool);
+		page = data_pool_alloc_page(&rxr->rx_dp);
 		*offset = 0;
 	}
 	if (!page)
 		return NULL;
 
-	*mapping = page_pool_get_dma_addr(page) + *offset;
+	*mapping = data_pool_get_dma_addr(&rxr->rx_dp, page) + *offset;
 	return page;
 }
 
@@ -1816,6 +1812,8 @@  static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
 		return;
 	}
 	skb_record_rx_queue(skb, bnapi->index);
+	if (bnapi->rx_ring->rx_dp.zc_uarg)
+		skb_zcopy_init(skb, bnapi->rx_ring->rx_dp.zc_uarg);
 	skb_mark_for_recycle(skb);
 	napi_gro_receive(&bnapi->napi, skb);
 }
@@ -3100,7 +3098,7 @@  static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
 		rx_agg_buf->page = NULL;
 		__clear_bit(i, rxr->rx_agg_bmap);
 
-		page_pool_recycle_direct(rxr->page_pool, page);
+		data_pool_put_page(&rxr->rx_dp, page);
 	}
 
 skip_rx_agg_free:
@@ -3305,6 +3303,8 @@  static void bnxt_free_rx_rings(struct bnxt *bp)
 
 		page_pool_destroy(rxr->page_pool);
 		rxr->page_pool = NULL;
+		rxr->rx_dp.page_pool = NULL;
+		rxr->rx_dp.zc_ifq = NULL;
 
 		kfree(rxr->rx_agg_bmap);
 		rxr->rx_agg_bmap = NULL;
@@ -3333,6 +3333,8 @@  static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
 	pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
 	if (PAGE_SIZE > BNXT_RX_PAGE_SIZE)
 		pp.flags |= PP_FLAG_PAGE_FRAG;
+	pp.flags |= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+	pp.max_len = PAGE_SIZE;
 
 	rxr->page_pool = page_pool_create(&pp);
 	if (IS_ERR(rxr->page_pool)) {
@@ -3341,6 +3343,7 @@  static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
 		rxr->page_pool = NULL;
 		return err;
 	}
+	rxr->rx_dp.page_pool = rxr->page_pool;
 	return 0;
 }
 
@@ -3803,6 +3806,7 @@  static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
 {
 	struct bnxt_rx_ring_info *rxr;
 	struct bnxt_ring_struct *ring;
+	struct netdev_rx_queue *rxq;
 	u32 type;
 
 	type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
@@ -3831,6 +3835,12 @@  static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
 		bnxt_init_rxbd_pages(ring, type);
 	}
 
+	rxq = __netif_get_rx_queue(bp->dev, ring_nr);
+	if (rxq->data_pool.zc_ifq) {
+		rxr->rx_dp.zc_ifq = rxq->data_pool.zc_ifq;
+		rxr->rx_dp.zc_uarg = rxq->data_pool.zc_uarg;
+	}
+
 	return bnxt_alloc_one_rx_ring(bp, ring_nr);
 }
 
@@ -13974,6 +13984,37 @@  void bnxt_print_device_info(struct bnxt *bp)
 	pcie_print_link_status(bp->pdev);
 }
 
+int bnxt_zc_rx(struct bnxt *bp, struct netdev_bpf *xdp)
+{
+	unsigned ifq_idx = xdp->zc_rx.queue_id;
+
+	if (ifq_idx >= bp->rx_nr_rings)
+		return -EINVAL;
+
+	bnxt_rtnl_lock_sp(bp);
+	if (netif_running(bp->dev)) {
+		struct netdev_rx_queue *rxq;
+		int rc, napi_id;
+
+		bnxt_ulp_stop(bp);
+		bnxt_close_nic(bp, true, false);
+
+		rxq = __netif_get_rx_queue(bp->dev, ifq_idx);
+		rxq->data_pool.zc_ifq = xdp->zc_rx.ifq;
+		rxq->data_pool.zc_uarg = xdp->zc_rx.uarg;
+
+		rc = bnxt_open_nic(bp, true, false);
+		bnxt_ulp_start(bp, rc);
+
+		if (xdp->zc_rx.ifq) {
+			napi_id = bp->bnapi[ifq_idx]->napi.napi_id;
+			io_zc_rx_set_napi(xdp->zc_rx.ifq, napi_id);
+		}
+	}
+	bnxt_rtnl_unlock_sp(bp);
+	return 0;
+}
+
 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	struct net_device *dev;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index d95d0ca91f3f..7f3b03fa5960 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -33,6 +33,7 @@ 
 #ifdef CONFIG_TEE_BNXT_FW
 #include <linux/firmware/broadcom/tee_bnxt_fw.h>
 #endif
+#include <net/data_pool.h>
 
 extern struct list_head bnxt_block_cb_list;
 
@@ -946,6 +947,7 @@  struct bnxt_rx_ring_info {
 	struct bnxt_ring_struct	rx_agg_ring_struct;
 	struct xdp_rxq_info	xdp_rxq;
 	struct page_pool	*page_pool;
+	struct data_pool	rx_dp;
 };
 
 struct bnxt_rx_sw_stats {
@@ -2485,4 +2487,7 @@  int bnxt_get_port_parent_id(struct net_device *dev,
 void bnxt_dim_work(struct work_struct *work);
 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi);
 void bnxt_print_device_info(struct bnxt *bp);
+
+int bnxt_zc_rx(struct bnxt *bp, struct netdev_bpf *xdp);
+
 #endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 96f5ca778c67..b7ef2e551334 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -465,6 +465,9 @@  int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp)
 	case XDP_SETUP_PROG:
 		rc = bnxt_xdp_set(bp, xdp->prog);
 		break;
+	case XDP_SETUP_ZC_RX:
+		return bnxt_zc_rx(bp, xdp);
+		break;
 	default:
 		rc = -EINVAL;
 		break;
diff --git a/include/net/netdev_rx_queue.h b/include/net/netdev_rx_queue.h
index cdcafb30d437..1b2944e61e19 100644
--- a/include/net/netdev_rx_queue.h
+++ b/include/net/netdev_rx_queue.h
@@ -6,6 +6,7 @@ 
 #include <linux/netdevice.h>
 #include <linux/sysfs.h>
 #include <net/xdp.h>
+#include <net/data_pool.h>
 
 /* This structure contains an instance of an RX queue. */
 struct netdev_rx_queue {
@@ -18,6 +19,7 @@  struct netdev_rx_queue {
 	struct net_device		*dev;
 	netdevice_tracker		dev_tracker;
 
+	struct data_pool		data_pool;
 #ifdef CONFIG_XDP_SOCKETS
 	struct xsk_buff_pool            *pool;
 #endif