Message ID | 170130410439.5198.5369308046781025813.stgit@anambiarhost.jf.intel.com (mailing list archive) |
---|---|
State | Superseded |
Delegated to: | Netdev Maintainers |
Headers | show |
Series | Introduce queue and NAPI support in netdev-genl (Was: Introduce NAPI queues support) | expand |
On Wed, Nov 29, 2023 at 4:11 PM Amritha Nambiar <amritha.nambiar@intel.com> wrote: > > From: Jakub Kicinski <kuba@kernel.org> > > Make bnxt compatible with the newly added netlink queue GET APIs. > > Signed-off-by: Jakub Kicinski <kuba@kernel.org> > Signed-off-by: Amritha Nambiar <amritha.nambiar@intel.com> > --- > drivers/net/ethernet/broadcom/bnxt/bnxt.c | 12 ++++++++++++ > 1 file changed, 12 insertions(+) > > diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c > index e35e7e02538c..08793e24e0ee 100644 > --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c > +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c > @@ -3845,6 +3845,9 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) > ring = &rxr->rx_ring_struct; > bnxt_init_rxbd_pages(ring, type); > > + netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX, > + &rxr->bnapi->napi); > + > if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) { > bpf_prog_add(bp->xdp_prog, 1); > rxr->xdp_prog = bp->xdp_prog; > @@ -3921,6 +3924,9 @@ static int bnxt_init_tx_rings(struct bnxt *bp) > struct bnxt_ring_struct *ring = &txr->tx_ring_struct; > > ring->fw_ring_id = INVALID_HW_RING_ID; > + > + netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_TX, > + &txr->bnapi->napi); This will include the XDP TX rings that are internal to the driver. I think we need to exclude these XDP rings and do something like this: if (i > bp->tx_nr_rings_xdp) netif_queue_set_napi(bp->dev, i - bp->tx_nr_rings_xdp, NETDEV_QUEUE_TYPE_TX, &txr->bnapi->napi); > } > > return 0; > @@ -9754,6 +9760,7 @@ static int bnxt_request_irq(struct bnxt *bp) > if (rc) > break; > > + netif_napi_set_irq(&bp->bnapi[i]->napi, irq->vector); > irq->requested = 1; > > if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) { > @@ -9781,6 +9788,11 @@ static void bnxt_del_napi(struct bnxt *bp) > if (!bp->bnapi) > return; > > + for (i = 0; i < bp->rx_nr_rings; i++) > + netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_RX, NULL); > + for (i = 0; i < bp->tx_nr_rings; i++) Similarly, for (i = 0; i < bp->tx_nr_rings - bp->tx_nr_rings_xdp; i++) > + netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_TX, NULL); > + > for (i = 0; i < bp->cp_nr_rings; i++) { > struct bnxt_napi *bnapi = bp->bnapi[i]; > >
On 11/29/2023 9:52 PM, Michael Chan wrote: > On Wed, Nov 29, 2023 at 4:11 PM Amritha Nambiar > <amritha.nambiar@intel.com> wrote: >> >> From: Jakub Kicinski <kuba@kernel.org> >> >> Make bnxt compatible with the newly added netlink queue GET APIs. >> >> Signed-off-by: Jakub Kicinski <kuba@kernel.org> >> Signed-off-by: Amritha Nambiar <amritha.nambiar@intel.com> >> --- >> drivers/net/ethernet/broadcom/bnxt/bnxt.c | 12 ++++++++++++ >> 1 file changed, 12 insertions(+) >> >> diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c >> index e35e7e02538c..08793e24e0ee 100644 >> --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c >> +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c >> @@ -3845,6 +3845,9 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) >> ring = &rxr->rx_ring_struct; >> bnxt_init_rxbd_pages(ring, type); >> >> + netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX, >> + &rxr->bnapi->napi); >> + >> if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) { >> bpf_prog_add(bp->xdp_prog, 1); >> rxr->xdp_prog = bp->xdp_prog; >> @@ -3921,6 +3924,9 @@ static int bnxt_init_tx_rings(struct bnxt *bp) >> struct bnxt_ring_struct *ring = &txr->tx_ring_struct; >> >> ring->fw_ring_id = INVALID_HW_RING_ID; >> + >> + netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_TX, >> + &txr->bnapi->napi); > > This will include the XDP TX rings that are internal to the driver. I > think we need to exclude these XDP rings and do something like this: > > if (i > bp->tx_nr_rings_xdp) > netif_queue_set_napi(bp->dev, i - bp->tx_nr_rings_xdp, > NETDEV_QUEUE_TYPE_TX, &txr->bnapi->napi); > Okay, will wait for Jakub's response as well. I can make this change in the next version (after waiting for other comments on the rest of the series), but I may not be able to test this on bnxt. >> } >> >> return 0; >> @@ -9754,6 +9760,7 @@ static int bnxt_request_irq(struct bnxt *bp) >> if (rc) >> break; >> >> + netif_napi_set_irq(&bp->bnapi[i]->napi, irq->vector); >> irq->requested = 1; >> >> if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) { >> @@ -9781,6 +9788,11 @@ static void bnxt_del_napi(struct bnxt *bp) >> if (!bp->bnapi) >> return; >> >> + for (i = 0; i < bp->rx_nr_rings; i++) >> + netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_RX, NULL); >> + for (i = 0; i < bp->tx_nr_rings; i++) > > Similarly, > > for (i = 0; i < bp->tx_nr_rings - bp->tx_nr_rings_xdp; i++) > >> + netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_TX, NULL); >> + >> for (i = 0; i < bp->cp_nr_rings; i++) { >> struct bnxt_napi *bnapi = bp->bnapi[i]; >> >>
On Thu, 30 Nov 2023 12:53:50 -0800 Nambiar, Amritha wrote: > > This will include the XDP TX rings that are internal to the driver. I > > think we need to exclude these XDP rings and do something like this: > > > > if (i > bp->tx_nr_rings_xdp) > > netif_queue_set_napi(bp->dev, i - bp->tx_nr_rings_xdp, > > NETDEV_QUEUE_TYPE_TX, &txr->bnapi->napi); > > Okay, will wait for Jakub's response as well. I can make this change in > the next version (after waiting for other comments on the rest of the > series), but I may not be able to test this on bnxt. No extra comments from me, thanks for taking care of the update.
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index e35e7e02538c..08793e24e0ee 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -3845,6 +3845,9 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) ring = &rxr->rx_ring_struct; bnxt_init_rxbd_pages(ring, type); + netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX, + &rxr->bnapi->napi); + if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) { bpf_prog_add(bp->xdp_prog, 1); rxr->xdp_prog = bp->xdp_prog; @@ -3921,6 +3924,9 @@ static int bnxt_init_tx_rings(struct bnxt *bp) struct bnxt_ring_struct *ring = &txr->tx_ring_struct; ring->fw_ring_id = INVALID_HW_RING_ID; + + netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_TX, + &txr->bnapi->napi); } return 0; @@ -9754,6 +9760,7 @@ static int bnxt_request_irq(struct bnxt *bp) if (rc) break; + netif_napi_set_irq(&bp->bnapi[i]->napi, irq->vector); irq->requested = 1; if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) { @@ -9781,6 +9788,11 @@ static void bnxt_del_napi(struct bnxt *bp) if (!bp->bnapi) return; + for (i = 0; i < bp->rx_nr_rings; i++) + netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_RX, NULL); + for (i = 0; i < bp->tx_nr_rings; i++) + netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_TX, NULL); + for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i];