@@ -14934,13 +14934,89 @@ static void bnxt_init_rx_ring_rxbd_pages(struct bnxt *bp, struct bnxt_rx_ring_in
static void *bnxt_queue_mem_alloc(struct net_device *dev, int idx)
{
+ struct bnxt_rx_ring_info *rxr, *clone;
struct bnxt *bp = netdev_priv(dev);
+ int rc;
+
+ rxr = &bp->rx_ring[idx];
+ clone = kmemdup(rxr, sizeof(*rxr), GFP_KERNEL);
+ if (!clone)
+ return ERR_PTR(-ENOMEM);
+
+ clone->rx_prod = 0;
+ clone->rx_agg_prod = 0;
+ clone->rx_sw_agg_prod = 0;
+ clone->rx_next_cons = 0;
+
+ __bnxt_init_rx_ring_struct(bp, clone);
+
+ rc = bnxt_alloc_rx_page_pool(bp, clone, rxr->page_pool->p.nid);
+ if (rc)
+ goto err_free_clone;
+
+ rc = bnxt_alloc_rx_ring_struct(bp, &clone->rx_ring_struct);
+ if (rc)
+ goto err_free_page_pool;
- return &bp->rx_ring[idx];
+ if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+ rc = bnxt_alloc_rx_ring_struct(bp, &clone->rx_agg_ring_struct);
+ if (rc)
+ goto err_free_rx_ring;
+
+ rc = bnxt_alloc_rx_agg_bmap(bp, clone);
+ if (rc)
+ goto err_free_rx_agg_ring;
+ }
+
+ if (bp->flags & BNXT_FLAG_TPA) {
+ rc = __bnxt_alloc_one_tpa_info(bp, clone);
+ if (rc)
+ goto err_free_rx_agg_bmap;
+ }
+
+ bnxt_init_rx_ring_rxbd_pages(bp, clone);
+ bnxt_alloc_one_rx_ring(bp, clone);
+
+ rxr->rplc = clone;
+
+ return clone;
+
+err_free_rx_agg_bmap:
+ kfree(clone->rx_agg_bmap);
+err_free_rx_agg_ring:
+ bnxt_free_ring(bp, &clone->rx_agg_ring_struct.ring_mem);
+err_free_rx_ring:
+ bnxt_free_ring(bp, &clone->rx_ring_struct.ring_mem);
+err_free_page_pool:
+ page_pool_destroy(clone->page_pool);
+ rxr->page_pool = NULL;
+err_free_clone:
+ kfree(clone);
+
+ return ERR_PTR(rc);
}
static void bnxt_queue_mem_free(struct net_device *dev, void *qmem)
{
+ struct bnxt_rx_ring_info *rxr = qmem;
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_ring_struct *ring;
+
+ bnxt_free_tpa_info(bp, rxr);
+
+ page_pool_destroy(rxr->page_pool);
+ rxr->page_pool = NULL;
+
+ kfree(rxr->rx_agg_bmap);
+ rxr->rx_agg_bmap = NULL;
+
+ ring = &rxr->rx_ring_struct;
+ bnxt_free_ring(bp, &ring->ring_mem);
+
+ ring = &rxr->rx_agg_ring_struct;
+ bnxt_free_ring(bp, &ring->ring_mem);
+
+ kfree(rxr);
}
static int bnxt_queue_start(struct net_device *dev, int idx, void *qmem)
@@ -1076,6 +1076,8 @@ struct bnxt_rx_ring_info {
struct bnxt_ring_struct rx_agg_ring_struct;
struct xdp_rxq_info xdp_rxq;
struct page_pool *page_pool;
+
+ struct bnxt_rx_ring_info *rplc;
};
struct bnxt_rx_sw_stats {
Implement allocating rx ring mem in bnxt_queue_mem_alloc(). This is done by duplicating the existing rx ring entirely, then allocating new memory into this clone as most functions take rx ring as an argument. I've identified the following memory that gets allocated: * rx_desc_ring * separate allocation per hw page * rx_buf_ring * rx_agg_desc_ring * rx_agg_ring * rx_agg_bmap * rx_tpa * rx_tpa_idx_map So, zero the ring heads, alloc the rings, then call bnxt_alloc_one_rx_ring() to fills in the descriptors. It's interesting that struct bnxt_ring_mem_info points to addresses of stack allocated arrays within struct bnxt_rx_ring_info, instead of the heap allocated queue memory. __bnxt_init_rx_ring_struct() is first called to reconfigure these ptrs for the clone. The hardware is only aware of the pg_tbl, which we do not touch. In the coming patches after an rx ring has been quiesced, we'll swap the bits that are dynamically allocated then update the dma mappings in pg_tbl. Then the hardware is none the wiser! Signed-off-by: David Wei <dw@davidwei.uk> --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 78 ++++++++++++++++++++++- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 2 + 2 files changed, 79 insertions(+), 1 deletion(-)