Message ID | 20240227014014.44855-2-niklas.soderlund+renesas@ragnatech.se (mailing list archive) |
---|---|
State | Superseded |
Delegated to: | Geert Uytterhoeven |
Headers | show |
Series | ravb: Align Rx descriptor setup and maintenance | expand |
On 27/02/2024 01:40, Niklas Söderlund wrote: > The Rx ring can either be made up of normal or extended descriptors, not > a mix of the two at the same time. Make this explicitly by grouping the > two variables in a rx_ring union. > > The extension of the storage for more than one queue of normal > descriptors from a single to NUM_RX_QUEUE queues have no practical > effect. But aids in making the code readable as the code that uses it > already piggyback on other members of struct ravb_private that are > arrays of max length NUM_RX_QUEUE, e.g. rx_desc_dma. This will also make > further refactoring easier. > > While at it rename the normal descriptor Rx ring to make it clear it's > not strictly related to the GbEthernet E-MAC IP found in RZ/G2L, normal > descriptors could be used on R-Car SoCs too. > > Signed-off-by: Niklas Söderlund <niklas.soderlund+renesas@ragnatech.se> > --- > drivers/net/ethernet/renesas/ravb.h | 6 ++- > drivers/net/ethernet/renesas/ravb_main.c | 57 ++++++++++++------------ > 2 files changed, 33 insertions(+), 30 deletions(-) > > diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h > index 35e642fc4b2a..aecc98282c7e 100644 > --- a/drivers/net/ethernet/renesas/ravb.h > +++ b/drivers/net/ethernet/renesas/ravb.h > @@ -1092,8 +1092,10 @@ struct ravb_private { > struct ravb_desc *desc_bat; > dma_addr_t rx_desc_dma[NUM_RX_QUEUE]; > dma_addr_t tx_desc_dma[NUM_TX_QUEUE]; > - struct ravb_rx_desc *gbeth_rx_ring; > - struct ravb_ex_rx_desc *rx_ring[NUM_RX_QUEUE]; > + union { > + struct ravb_rx_desc *desc; > + struct ravb_ex_rx_desc *ex_desc; > + } rx_ring[NUM_RX_QUEUE]; > struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE]; > void *tx_align[NUM_TX_QUEUE]; > struct sk_buff *rx_1st_skb; > diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c > index f9fb772b05c7..c25a80f4d3b9 100644 > --- a/drivers/net/ethernet/renesas/ravb_main.c > +++ b/drivers/net/ethernet/renesas/ravb_main.c > @@ -241,11 +241,11 @@ static void ravb_rx_ring_free_gbeth(struct net_device *ndev, int q) > unsigned int ring_size; > unsigned int i; > > - if (!priv->gbeth_rx_ring) > + if (!priv->rx_ring[q].desc) > return; > > for (i = 0; i < priv->num_rx_ring[q]; i++) { > - struct ravb_rx_desc *desc = &priv->gbeth_rx_ring[i]; > + struct ravb_rx_desc *desc = &priv->rx_ring[q].desc[i]; > > if (!dma_mapping_error(ndev->dev.parent, > le32_to_cpu(desc->dptr))) > @@ -255,9 +255,9 @@ static void ravb_rx_ring_free_gbeth(struct net_device *ndev, int q) > DMA_FROM_DEVICE); > } > ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1); > - dma_free_coherent(ndev->dev.parent, ring_size, priv->gbeth_rx_ring, > + dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q].desc, > priv->rx_desc_dma[q]); > - priv->gbeth_rx_ring = NULL; > + priv->rx_ring[q].desc = NULL; > } > > static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q) > @@ -266,11 +266,11 @@ static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q) > unsigned int ring_size; > unsigned int i; > > - if (!priv->rx_ring[q]) > + if (!priv->rx_ring[q].ex_desc) > return; > > for (i = 0; i < priv->num_rx_ring[q]; i++) { > - struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i]; > + struct ravb_ex_rx_desc *desc = &priv->rx_ring[q].ex_desc[i]; > > if (!dma_mapping_error(ndev->dev.parent, > le32_to_cpu(desc->dptr))) > @@ -281,9 +281,9 @@ static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q) > } > ring_size = sizeof(struct ravb_ex_rx_desc) * > (priv->num_rx_ring[q] + 1); > - dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q], > + dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q].ex_desc, > priv->rx_desc_dma[q]); > - priv->rx_ring[q] = NULL; > + priv->rx_ring[q].ex_desc = NULL; > } > > /* Free skb's and DMA buffers for Ethernet AVB */ > @@ -335,11 +335,11 @@ static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q) > unsigned int i; > > rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q]; > - memset(priv->gbeth_rx_ring, 0, rx_ring_size); > + memset(priv->rx_ring[q].desc, 0, rx_ring_size); > /* Build RX ring buffer */ > for (i = 0; i < priv->num_rx_ring[q]; i++) { > /* RX descriptor */ > - rx_desc = &priv->gbeth_rx_ring[i]; > + rx_desc = &priv->rx_ring[q].desc[i]; > rx_desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE); > dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data, > GBETH_RX_BUFF_MAX, > @@ -352,7 +352,7 @@ static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q) > rx_desc->dptr = cpu_to_le32(dma_addr); > rx_desc->die_dt = DT_FEMPTY; > } > - rx_desc = &priv->gbeth_rx_ring[i]; > + rx_desc = &priv->rx_ring[q].desc[i]; > rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); > rx_desc->die_dt = DT_LINKFIX; /* type */ > } > @@ -365,11 +365,11 @@ static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q) > dma_addr_t dma_addr; > unsigned int i; > > - memset(priv->rx_ring[q], 0, rx_ring_size); > + memset(priv->rx_ring[q].ex_desc, 0, rx_ring_size); > /* Build RX ring buffer */ > for (i = 0; i < priv->num_rx_ring[q]; i++) { > /* RX descriptor */ > - rx_desc = &priv->rx_ring[q][i]; > + rx_desc = &priv->rx_ring[q].ex_desc[i]; > rx_desc->ds_cc = cpu_to_le16(RX_BUF_SZ); > dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data, > RX_BUF_SZ, > @@ -382,7 +382,7 @@ static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q) > rx_desc->dptr = cpu_to_le32(dma_addr); > rx_desc->die_dt = DT_FEMPTY; > } > - rx_desc = &priv->rx_ring[q][i]; > + rx_desc = &priv->rx_ring[q].ex_desc[i]; > rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); > rx_desc->die_dt = DT_LINKFIX; /* type */ > } > @@ -437,10 +437,10 @@ static void *ravb_alloc_rx_desc_gbeth(struct net_device *ndev, int q) > > ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1); > > - priv->gbeth_rx_ring = dma_alloc_coherent(ndev->dev.parent, ring_size, > - &priv->rx_desc_dma[q], > - GFP_KERNEL); > - return priv->gbeth_rx_ring; > + priv->rx_ring[q].desc = dma_alloc_coherent(ndev->dev.parent, ring_size, > + &priv->rx_desc_dma[q], > + GFP_KERNEL); > + return priv->rx_ring[q].desc; > } > > static void *ravb_alloc_rx_desc_rcar(struct net_device *ndev, int q) > @@ -450,10 +450,11 @@ static void *ravb_alloc_rx_desc_rcar(struct net_device *ndev, int q) > > ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1); > > - priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size, > - &priv->rx_desc_dma[q], > - GFP_KERNEL); > - return priv->rx_ring[q]; > + priv->rx_ring[q].ex_desc = dma_alloc_coherent(ndev->dev.parent, > + ring_size, > + &priv->rx_desc_dma[q], > + GFP_KERNEL); > + return priv->rx_ring[q].ex_desc; > } > > /* Init skb and descriptor buffer for Ethernet AVB */ > @@ -830,7 +831,7 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q) > limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q]; > stats = &priv->stats[q]; > > - desc = &priv->gbeth_rx_ring[entry]; > + desc = &priv->rx_ring[q].desc[entry]; > for (i = 0; i < limit && rx_packets < *quota && desc->die_dt != DT_FEMPTY; i++) { > /* Descriptor type must be checked before all other reads */ > dma_rmb(); > @@ -901,13 +902,13 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q) > } > > entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q]; > - desc = &priv->gbeth_rx_ring[entry]; > + desc = &priv->rx_ring[q].desc[entry]; > } > > /* Refill the RX ring buffers. */ > for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) { > entry = priv->dirty_rx[q] % priv->num_rx_ring[q]; > - desc = &priv->gbeth_rx_ring[entry]; > + desc = &priv->rx_ring[q].desc[entry]; > desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE); > > if (!priv->rx_skb[q][entry]) { > @@ -957,7 +958,7 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q) > > boguscnt = min(boguscnt, *quota); > limit = boguscnt; > - desc = &priv->rx_ring[q][entry]; > + desc = &priv->rx_ring[q].ex_desc[entry]; > while (desc->die_dt != DT_FEMPTY) { > /* Descriptor type must be checked before all other reads */ > dma_rmb(); > @@ -1017,13 +1018,13 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q) > } > > entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q]; > - desc = &priv->rx_ring[q][entry]; > + desc = &priv->rx_ring[q].ex_desc[entry]; > } > > /* Refill the RX ring buffers. */ > for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) { > entry = priv->dirty_rx[q] % priv->num_rx_ring[q]; > - desc = &priv->rx_ring[q][entry]; > + desc = &priv->rx_ring[q].ex_desc[entry]; > desc->ds_cc = cpu_to_le16(RX_BUF_SZ); > > if (!priv->rx_skb[q][entry]) { Reviewed-by: Paul Barker <paul.barker.ct@bp.renesas.com>
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index 35e642fc4b2a..aecc98282c7e 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -1092,8 +1092,10 @@ struct ravb_private { struct ravb_desc *desc_bat; dma_addr_t rx_desc_dma[NUM_RX_QUEUE]; dma_addr_t tx_desc_dma[NUM_TX_QUEUE]; - struct ravb_rx_desc *gbeth_rx_ring; - struct ravb_ex_rx_desc *rx_ring[NUM_RX_QUEUE]; + union { + struct ravb_rx_desc *desc; + struct ravb_ex_rx_desc *ex_desc; + } rx_ring[NUM_RX_QUEUE]; struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE]; void *tx_align[NUM_TX_QUEUE]; struct sk_buff *rx_1st_skb; diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index f9fb772b05c7..c25a80f4d3b9 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -241,11 +241,11 @@ static void ravb_rx_ring_free_gbeth(struct net_device *ndev, int q) unsigned int ring_size; unsigned int i; - if (!priv->gbeth_rx_ring) + if (!priv->rx_ring[q].desc) return; for (i = 0; i < priv->num_rx_ring[q]; i++) { - struct ravb_rx_desc *desc = &priv->gbeth_rx_ring[i]; + struct ravb_rx_desc *desc = &priv->rx_ring[q].desc[i]; if (!dma_mapping_error(ndev->dev.parent, le32_to_cpu(desc->dptr))) @@ -255,9 +255,9 @@ static void ravb_rx_ring_free_gbeth(struct net_device *ndev, int q) DMA_FROM_DEVICE); } ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1); - dma_free_coherent(ndev->dev.parent, ring_size, priv->gbeth_rx_ring, + dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q].desc, priv->rx_desc_dma[q]); - priv->gbeth_rx_ring = NULL; + priv->rx_ring[q].desc = NULL; } static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q) @@ -266,11 +266,11 @@ static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q) unsigned int ring_size; unsigned int i; - if (!priv->rx_ring[q]) + if (!priv->rx_ring[q].ex_desc) return; for (i = 0; i < priv->num_rx_ring[q]; i++) { - struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i]; + struct ravb_ex_rx_desc *desc = &priv->rx_ring[q].ex_desc[i]; if (!dma_mapping_error(ndev->dev.parent, le32_to_cpu(desc->dptr))) @@ -281,9 +281,9 @@ static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q) } ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1); - dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q], + dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q].ex_desc, priv->rx_desc_dma[q]); - priv->rx_ring[q] = NULL; + priv->rx_ring[q].ex_desc = NULL; } /* Free skb's and DMA buffers for Ethernet AVB */ @@ -335,11 +335,11 @@ static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q) unsigned int i; rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q]; - memset(priv->gbeth_rx_ring, 0, rx_ring_size); + memset(priv->rx_ring[q].desc, 0, rx_ring_size); /* Build RX ring buffer */ for (i = 0; i < priv->num_rx_ring[q]; i++) { /* RX descriptor */ - rx_desc = &priv->gbeth_rx_ring[i]; + rx_desc = &priv->rx_ring[q].desc[i]; rx_desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE); dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data, GBETH_RX_BUFF_MAX, @@ -352,7 +352,7 @@ static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q) rx_desc->dptr = cpu_to_le32(dma_addr); rx_desc->die_dt = DT_FEMPTY; } - rx_desc = &priv->gbeth_rx_ring[i]; + rx_desc = &priv->rx_ring[q].desc[i]; rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); rx_desc->die_dt = DT_LINKFIX; /* type */ } @@ -365,11 +365,11 @@ static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q) dma_addr_t dma_addr; unsigned int i; - memset(priv->rx_ring[q], 0, rx_ring_size); + memset(priv->rx_ring[q].ex_desc, 0, rx_ring_size); /* Build RX ring buffer */ for (i = 0; i < priv->num_rx_ring[q]; i++) { /* RX descriptor */ - rx_desc = &priv->rx_ring[q][i]; + rx_desc = &priv->rx_ring[q].ex_desc[i]; rx_desc->ds_cc = cpu_to_le16(RX_BUF_SZ); dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data, RX_BUF_SZ, @@ -382,7 +382,7 @@ static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q) rx_desc->dptr = cpu_to_le32(dma_addr); rx_desc->die_dt = DT_FEMPTY; } - rx_desc = &priv->rx_ring[q][i]; + rx_desc = &priv->rx_ring[q].ex_desc[i]; rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); rx_desc->die_dt = DT_LINKFIX; /* type */ } @@ -437,10 +437,10 @@ static void *ravb_alloc_rx_desc_gbeth(struct net_device *ndev, int q) ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1); - priv->gbeth_rx_ring = dma_alloc_coherent(ndev->dev.parent, ring_size, - &priv->rx_desc_dma[q], - GFP_KERNEL); - return priv->gbeth_rx_ring; + priv->rx_ring[q].desc = dma_alloc_coherent(ndev->dev.parent, ring_size, + &priv->rx_desc_dma[q], + GFP_KERNEL); + return priv->rx_ring[q].desc; } static void *ravb_alloc_rx_desc_rcar(struct net_device *ndev, int q) @@ -450,10 +450,11 @@ static void *ravb_alloc_rx_desc_rcar(struct net_device *ndev, int q) ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1); - priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size, - &priv->rx_desc_dma[q], - GFP_KERNEL); - return priv->rx_ring[q]; + priv->rx_ring[q].ex_desc = dma_alloc_coherent(ndev->dev.parent, + ring_size, + &priv->rx_desc_dma[q], + GFP_KERNEL); + return priv->rx_ring[q].ex_desc; } /* Init skb and descriptor buffer for Ethernet AVB */ @@ -830,7 +831,7 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q) limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q]; stats = &priv->stats[q]; - desc = &priv->gbeth_rx_ring[entry]; + desc = &priv->rx_ring[q].desc[entry]; for (i = 0; i < limit && rx_packets < *quota && desc->die_dt != DT_FEMPTY; i++) { /* Descriptor type must be checked before all other reads */ dma_rmb(); @@ -901,13 +902,13 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q) } entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q]; - desc = &priv->gbeth_rx_ring[entry]; + desc = &priv->rx_ring[q].desc[entry]; } /* Refill the RX ring buffers. */ for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) { entry = priv->dirty_rx[q] % priv->num_rx_ring[q]; - desc = &priv->gbeth_rx_ring[entry]; + desc = &priv->rx_ring[q].desc[entry]; desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE); if (!priv->rx_skb[q][entry]) { @@ -957,7 +958,7 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q) boguscnt = min(boguscnt, *quota); limit = boguscnt; - desc = &priv->rx_ring[q][entry]; + desc = &priv->rx_ring[q].ex_desc[entry]; while (desc->die_dt != DT_FEMPTY) { /* Descriptor type must be checked before all other reads */ dma_rmb(); @@ -1017,13 +1018,13 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q) } entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q]; - desc = &priv->rx_ring[q][entry]; + desc = &priv->rx_ring[q].ex_desc[entry]; } /* Refill the RX ring buffers. */ for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) { entry = priv->dirty_rx[q] % priv->num_rx_ring[q]; - desc = &priv->rx_ring[q][entry]; + desc = &priv->rx_ring[q].ex_desc[entry]; desc->ds_cc = cpu_to_le16(RX_BUF_SZ); if (!priv->rx_skb[q][entry]) {
The Rx ring can either be made up of normal or extended descriptors, not a mix of the two at the same time. Make this explicitly by grouping the two variables in a rx_ring union. The extension of the storage for more than one queue of normal descriptors from a single to NUM_RX_QUEUE queues have no practical effect. But aids in making the code readable as the code that uses it already piggyback on other members of struct ravb_private that are arrays of max length NUM_RX_QUEUE, e.g. rx_desc_dma. This will also make further refactoring easier. While at it rename the normal descriptor Rx ring to make it clear it's not strictly related to the GbEthernet E-MAC IP found in RZ/G2L, normal descriptors could be used on R-Car SoCs too. Signed-off-by: Niklas Söderlund <niklas.soderlund+renesas@ragnatech.se> --- drivers/net/ethernet/renesas/ravb.h | 6 ++- drivers/net/ethernet/renesas/ravb_main.c | 57 ++++++++++++------------ 2 files changed, 33 insertions(+), 30 deletions(-)