Message ID | 20231127115334.3670790-9-yoshihiro.shimoda.uh@renesas.com (mailing list archive) |
---|---|
State | Superseded |
Delegated to: | Netdev Maintainers |
Headers | show |
Series | net: rswitch: Add jumbo frames support | expand |
Context | Check | Description |
---|---|---|
netdev/tree_selection | success | Clearly marked for net-next |
netdev/apply | fail | Patch does not apply to net-next |
Hi Yoshihiro,
kernel test robot noticed the following build warnings:
[auto build test WARNING on net/main]
[cannot apply to net-next/main linus/master horms-ipvs/master v6.7-rc3]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Yoshihiro-Shimoda/net-rswitch-Drop-unused-argument-return-value/20231127-195705
base: net/main
patch link: https://lore.kernel.org/r/20231127115334.3670790-9-yoshihiro.shimoda.uh%40renesas.com
patch subject: [PATCH net-next 8/9] net: rswitch: Add jumbo frames handling for TX
config: arm64-allyesconfig (https://download.01.org/0day-ci/archive/20231128/202311280447.HzrM7Jdd-lkp@intel.com/config)
compiler: clang version 17.0.0 (https://github.com/llvm/llvm-project.git 4a5ac14ee968ff0ad5d2cc1ffa0299048db4c88a)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20231128/202311280447.HzrM7Jdd-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202311280447.HzrM7Jdd-lkp@intel.com/
All warnings (new ones prefixed by >>):
>> drivers/net/ethernet/renesas/rswitch.c:1680:42: warning: variable 'dma_addr' is uninitialized when used here [-Wuninitialized]
1680 | if (dma_mapping_error(ndev->dev.parent, dma_addr))
| ^~~~~~~~
drivers/net/ethernet/renesas/rswitch.c:1663:21: note: initialize the variable 'dma_addr' to silence this warning
1663 | dma_addr_t dma_addr, dma_addr_orig;
| ^
| = 0
1 warning generated.
vim +/dma_addr +1680 drivers/net/ethernet/renesas/rswitch.c
9ce54e0ed5479a1 Yoshihiro Shimoda 2023-11-27 1658
8e0aa1ff44ca30b Nathan Chancellor 2022-11-03 1659 static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *ndev)
3590918b5d07aa5 Yoshihiro Shimoda 2022-10-31 1660 {
3590918b5d07aa5 Yoshihiro Shimoda 2022-10-31 1661 struct rswitch_device *rdev = netdev_priv(ndev);
3590918b5d07aa5 Yoshihiro Shimoda 2022-10-31 1662 struct rswitch_gwca_queue *gq = rdev->tx_queue;
9ce54e0ed5479a1 Yoshihiro Shimoda 2023-11-27 1663 dma_addr_t dma_addr, dma_addr_orig;
109b25d13e00543 Yoshihiro Shimoda 2023-11-22 1664 netdev_tx_t ret = NETDEV_TX_OK;
3590918b5d07aa5 Yoshihiro Shimoda 2022-10-31 1665 struct rswitch_ext_desc *desc;
9ce54e0ed5479a1 Yoshihiro Shimoda 2023-11-27 1666 unsigned int i, nr_desc;
9ce54e0ed5479a1 Yoshihiro Shimoda 2023-11-27 1667 u8 die_dt;
9ce54e0ed5479a1 Yoshihiro Shimoda 2023-11-27 1668 u16 len;
3590918b5d07aa5 Yoshihiro Shimoda 2022-10-31 1669
9ce54e0ed5479a1 Yoshihiro Shimoda 2023-11-27 1670 nr_desc = (skb->len - 1) / RSWITCH_DESC_BUF_SIZE + 1;
9ce54e0ed5479a1 Yoshihiro Shimoda 2023-11-27 1671 if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - nr_desc) {
3590918b5d07aa5 Yoshihiro Shimoda 2022-10-31 1672 netif_stop_subqueue(ndev, 0);
a60caf039e96d80 Yoshihiro Shimoda 2023-05-29 1673 return NETDEV_TX_BUSY;
3590918b5d07aa5 Yoshihiro Shimoda 2022-10-31 1674 }
3590918b5d07aa5 Yoshihiro Shimoda 2022-10-31 1675
3590918b5d07aa5 Yoshihiro Shimoda 2022-10-31 1676 if (skb_put_padto(skb, ETH_ZLEN))
3590918b5d07aa5 Yoshihiro Shimoda 2022-10-31 1677 return ret;
3590918b5d07aa5 Yoshihiro Shimoda 2022-10-31 1678
9ce54e0ed5479a1 Yoshihiro Shimoda 2023-11-27 1679 dma_addr_orig = dma_map_single(ndev->dev.parent, skb->data, skb->len, DMA_TO_DEVICE);
782486af9b5b764 Yoshihiro Shimoda 2023-11-22 @1680 if (dma_mapping_error(ndev->dev.parent, dma_addr))
782486af9b5b764 Yoshihiro Shimoda 2023-11-22 1681 goto err_kfree;
3590918b5d07aa5 Yoshihiro Shimoda 2022-10-31 1682
3590918b5d07aa5 Yoshihiro Shimoda 2022-10-31 1683 gq->skbs[gq->cur] = skb;
e0e4f789171ba70 Yoshihiro Shimoda 2023-11-27 1684 gq->unmap_addrs[gq->cur] = dma_addr;
9ce54e0ed5479a1 Yoshihiro Shimoda 2023-11-27 1685
9ce54e0ed5479a1 Yoshihiro Shimoda 2023-11-27 1686 /* DT_FSTART should be set at last. So, this is reverse order. */
9ce54e0ed5479a1 Yoshihiro Shimoda 2023-11-27 1687 for (i = nr_desc; i-- > 0; ) {
9ce54e0ed5479a1 Yoshihiro Shimoda 2023-11-27 1688 desc = &gq->tx_ring[rswitch_next_queue_index(gq, true, i)];
9ce54e0ed5479a1 Yoshihiro Shimoda 2023-11-27 1689 die_dt = rswitch_ext_desc_get_die_dt(nr_desc, i);
9ce54e0ed5479a1 Yoshihiro Shimoda 2023-11-27 1690 dma_addr = dma_addr_orig + i * RSWITCH_DESC_BUF_SIZE;
9ce54e0ed5479a1 Yoshihiro Shimoda 2023-11-27 1691 len = rswitch_ext_desc_get_len(die_dt, skb->len);
9ce54e0ed5479a1 Yoshihiro Shimoda 2023-11-27 1692 if (!rswitch_ext_desc_set(rdev, skb, desc, dma_addr, len, die_dt))
782486af9b5b764 Yoshihiro Shimoda 2023-11-22 1693 goto err_unmap;
9ce54e0ed5479a1 Yoshihiro Shimoda 2023-11-27 1694 }
33f5d733b589031 Yoshihiro Shimoda 2023-02-09 1695
3590918b5d07aa5 Yoshihiro Shimoda 2022-10-31 1696 wmb(); /* gq->cur must be incremented after die_dt was set */
3590918b5d07aa5 Yoshihiro Shimoda 2022-10-31 1697
9ce54e0ed5479a1 Yoshihiro Shimoda 2023-11-27 1698 gq->cur = rswitch_next_queue_index(gq, true, nr_desc);
3590918b5d07aa5 Yoshihiro Shimoda 2022-10-31 1699 rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32));
3590918b5d07aa5 Yoshihiro Shimoda 2022-10-31 1700
782486af9b5b764 Yoshihiro Shimoda 2023-11-22 1701 return ret;
782486af9b5b764 Yoshihiro Shimoda 2023-11-22 1702
782486af9b5b764 Yoshihiro Shimoda 2023-11-22 1703 err_unmap:
782486af9b5b764 Yoshihiro Shimoda 2023-11-22 1704 dma_unmap_single(ndev->dev.parent, dma_addr, skb->len, DMA_TO_DEVICE);
782486af9b5b764 Yoshihiro Shimoda 2023-11-22 1705
782486af9b5b764 Yoshihiro Shimoda 2023-11-22 1706 err_kfree:
782486af9b5b764 Yoshihiro Shimoda 2023-11-22 1707 dev_kfree_skb_any(skb);
782486af9b5b764 Yoshihiro Shimoda 2023-11-22 1708
3590918b5d07aa5 Yoshihiro Shimoda 2022-10-31 1709 return ret;
3590918b5d07aa5 Yoshihiro Shimoda 2022-10-31 1710 }
3590918b5d07aa5 Yoshihiro Shimoda 2022-10-31 1711
On 11/27/23 2:53 PM, Yoshihiro Shimoda wrote: > If the driver would like to transmit a jumbo frame like 2KiB or more, > it should be split into multiple queues. In near the future, to support In the near future, you mean (again)? > this, add handling specific descriptor types F{START,MID,END}. However, > such jumbo frames will not happen yet because the maximum MTU size is > still default for now. > > Signed-off-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com> [...] MBR, Sergey
diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c index 009e6bfdad27..c5e3ee8f82bc 100644 --- a/drivers/net/ethernet/renesas/rswitch.c +++ b/drivers/net/ethernet/renesas/rswitch.c @@ -1631,15 +1631,44 @@ static bool rswitch_ext_desc_set(struct rswitch_device *rdev, return true; } +static u8 rswitch_ext_desc_get_die_dt(unsigned int nr_desc, unsigned int index) +{ + if (nr_desc == 1) + return DT_FSINGLE | DIE; + if (index == 0) + return DT_FSTART; + if (nr_desc - 1 == index) + return DT_FEND | DIE; + return DT_FMID; +} + +static u16 rswitch_ext_desc_get_len(u8 die_dt, unsigned int orig_len) +{ + switch (die_dt & DT_MASK) { + case DT_FSINGLE: + case DT_FEND: + return (orig_len % RSWITCH_DESC_BUF_SIZE) ?: RSWITCH_DESC_BUF_SIZE; + case DT_FSTART: + case DT_FMID: + return RSWITCH_DESC_BUF_SIZE; + default: + return 0; + } +} + static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct rswitch_device *rdev = netdev_priv(ndev); struct rswitch_gwca_queue *gq = rdev->tx_queue; + dma_addr_t dma_addr, dma_addr_orig; netdev_tx_t ret = NETDEV_TX_OK; struct rswitch_ext_desc *desc; - dma_addr_t dma_addr; + unsigned int i, nr_desc; + u8 die_dt; + u16 len; - if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - 1) { + nr_desc = (skb->len - 1) / RSWITCH_DESC_BUF_SIZE + 1; + if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - nr_desc) { netif_stop_subqueue(ndev, 0); return NETDEV_TX_BUSY; } @@ -1647,19 +1676,26 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd if (skb_put_padto(skb, ETH_ZLEN)) return ret; - dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len, DMA_TO_DEVICE); + dma_addr_orig = dma_map_single(ndev->dev.parent, skb->data, skb->len, DMA_TO_DEVICE); if (dma_mapping_error(ndev->dev.parent, dma_addr)) goto err_kfree; gq->skbs[gq->cur] = skb; gq->unmap_addrs[gq->cur] = dma_addr; - desc = &gq->tx_ring[gq->cur]; - if (!rswitch_ext_desc_set(rdev, skb, desc, dma_addr, skb->len, DT_FSINGLE | DIE)) - goto err_unmap; + + /* DT_FSTART should be set at last. So, this is reverse order. */ + for (i = nr_desc; i-- > 0; ) { + desc = &gq->tx_ring[rswitch_next_queue_index(gq, true, i)]; + die_dt = rswitch_ext_desc_get_die_dt(nr_desc, i); + dma_addr = dma_addr_orig + i * RSWITCH_DESC_BUF_SIZE; + len = rswitch_ext_desc_get_len(die_dt, skb->len); + if (!rswitch_ext_desc_set(rdev, skb, desc, dma_addr, len, die_dt)) + goto err_unmap; + } wmb(); /* gq->cur must be incremented after die_dt was set */ - gq->cur = rswitch_next_queue_index(gq, true, 1); + gq->cur = rswitch_next_queue_index(gq, true, nr_desc); rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32)); return ret;
If the driver would like to transmit a jumbo frame like 2KiB or more, it should be split into multiple queues. In near the future, to support this, add handling specific descriptor types F{START,MID,END}. However, such jumbo frames will not happen yet because the maximum MTU size is still default for now. Signed-off-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com> --- drivers/net/ethernet/renesas/rswitch.c | 50 ++++++++++++++++++++++---- 1 file changed, 43 insertions(+), 7 deletions(-)