Message ID | 20230524081842.3060-2-jasowang@redhat.com (mailing list archive) |
---|---|
State | Changes Requested |
Delegated to: | Netdev Maintainers |
Headers | show |
Series | virtio-net: don't busy poll for cvq command | expand |
Context | Check | Description |
---|---|---|
netdev/series_format | success | Posting correctly formatted |
netdev/tree_selection | success | Clearly marked for net-next |
netdev/fixes_present | success | Fixes tag not required for -next series |
netdev/header_inline | success | No static functions without inline keyword in header files |
netdev/build_32bit | success | Errors and warnings before: 8 this patch: 8 |
netdev/cc_maintainers | success | CCed 9 of 9 maintainers |
netdev/build_clang | success | Errors and warnings before: 8 this patch: 8 |
netdev/verify_signedoff | success | Signed-off-by tag matches author and committer |
netdev/deprecated_api | success | None detected |
netdev/check_selftest | success | No net selftest shell script |
netdev/verify_fixes | success | No Fixes tag |
netdev/build_allmodconfig_warn | success | Errors and warnings before: 8 this patch: 8 |
netdev/checkpatch | success | total: 0 errors, 0 warnings, 0 checks, 138 lines checked |
netdev/kdoc | success | Errors and warnings before: 0 this patch: 0 |
netdev/source_inline | success | Was 0 now: 0 |
On Wed, May 24, 2023 at 04:18:41PM +0800, Jason Wang wrote: > This patch convert rx mode setting to be done in a workqueue, this is > a must for allow to sleep when waiting for the cvq command to > response since current code is executed under addr spin lock. > > Signed-off-by: Jason Wang <jasowang@redhat.com> > --- > Changes since V1: > - use RTNL to synchronize rx mode worker > --- > drivers/net/virtio_net.c | 55 +++++++++++++++++++++++++++++++++++++--- > 1 file changed, 52 insertions(+), 3 deletions(-) > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c > index 56ca1d270304..5d2f1da4eaa0 100644 > --- a/drivers/net/virtio_net.c > +++ b/drivers/net/virtio_net.c > @@ -265,6 +265,12 @@ struct virtnet_info { > /* Work struct for config space updates */ > struct work_struct config_work; > > + /* Work struct for config rx mode */ With a bit less abbreviation maybe? setting rx mode? > + struct work_struct rx_mode_work; > + > + /* Is rx mode work enabled? */ Ugh not a great comment. > + bool rx_mode_work_enabled; > + > /* Does the affinity hint is set for virtqueues? */ > bool affinity_hint_set; > > @@ -388,6 +394,20 @@ static void disable_delayed_refill(struct virtnet_info *vi) > spin_unlock_bh(&vi->refill_lock); > } > > +static void enable_rx_mode_work(struct virtnet_info *vi) > +{ > + rtnl_lock(); > + vi->rx_mode_work_enabled = true; > + rtnl_unlock(); > +} > + > +static void disable_rx_mode_work(struct virtnet_info *vi) > +{ > + rtnl_lock(); > + vi->rx_mode_work_enabled = false; > + rtnl_unlock(); > +} > + > static void virtqueue_napi_schedule(struct napi_struct *napi, > struct virtqueue *vq) > { > @@ -2341,9 +2361,11 @@ static int virtnet_close(struct net_device *dev) > return 0; > } > > -static void virtnet_set_rx_mode(struct net_device *dev) > +static void virtnet_rx_mode_work(struct work_struct *work) > { > - struct virtnet_info *vi = netdev_priv(dev); > + struct virtnet_info *vi = > + container_of(work, struct virtnet_info, rx_mode_work); > + struct net_device *dev = vi->dev; > struct scatterlist sg[2]; > struct virtio_net_ctrl_mac *mac_data; > struct netdev_hw_addr *ha; > @@ -2356,6 +2378,8 @@ static void virtnet_set_rx_mode(struct net_device *dev) > if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) > return; > > + rtnl_lock(); > + > vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0); > vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0); > > @@ -2373,14 +2397,19 @@ static void virtnet_set_rx_mode(struct net_device *dev) > dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", > vi->ctrl->allmulti ? "en" : "dis"); > > + netif_addr_lock_bh(dev); > + > uc_count = netdev_uc_count(dev); > mc_count = netdev_mc_count(dev); > /* MAC filter - use one buffer for both lists */ > buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + > (2 * sizeof(mac_data->entries)), GFP_ATOMIC); > mac_data = buf; > - if (!buf) > + if (!buf) { > + netif_addr_unlock_bh(dev); > + rtnl_unlock(); > return; > + } > > sg_init_table(sg, 2); > > @@ -2401,6 +2430,8 @@ static void virtnet_set_rx_mode(struct net_device *dev) > netdev_for_each_mc_addr(ha, dev) > memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); > > + netif_addr_unlock_bh(dev); > + > sg_set_buf(&sg[1], mac_data, > sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); > > @@ -2408,9 +2439,19 @@ static void virtnet_set_rx_mode(struct net_device *dev) > VIRTIO_NET_CTRL_MAC_TABLE_SET, sg)) > dev_warn(&dev->dev, "Failed to set MAC filter table.\n"); > > + rtnl_unlock(); > + > kfree(buf); > } > > +static void virtnet_set_rx_mode(struct net_device *dev) > +{ > + struct virtnet_info *vi = netdev_priv(dev); > + > + if (vi->rx_mode_work_enabled) > + schedule_work(&vi->rx_mode_work); > +} > + > static int virtnet_vlan_rx_add_vid(struct net_device *dev, > __be16 proto, u16 vid) > { > @@ -3181,6 +3222,8 @@ static void virtnet_freeze_down(struct virtio_device *vdev) > > /* Make sure no work handler is accessing the device */ > flush_work(&vi->config_work); > + disable_rx_mode_work(vi); > + flush_work(&vi->rx_mode_work); > > netif_tx_lock_bh(vi->dev); > netif_device_detach(vi->dev); Hmm so queued rx mode work will just get skipped and on restore we get a wrong rx mode. Any way to make this more robust? > @@ -3203,6 +3246,7 @@ static int virtnet_restore_up(struct virtio_device *vdev) > virtio_device_ready(vdev); > > enable_delayed_refill(vi); > + enable_rx_mode_work(vi); > > if (netif_running(vi->dev)) { > err = virtnet_open(vi->dev); > @@ -4002,6 +4046,7 @@ static int virtnet_probe(struct virtio_device *vdev) > vdev->priv = vi; > > INIT_WORK(&vi->config_work, virtnet_config_changed_work); > + INIT_WORK(&vi->rx_mode_work, virtnet_rx_mode_work); > spin_lock_init(&vi->refill_lock); > > if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) { > @@ -4110,6 +4155,8 @@ static int virtnet_probe(struct virtio_device *vdev) > if (vi->has_rss || vi->has_rss_hash_report) > virtnet_init_default_rss(vi); > > + enable_rx_mode_work(vi); > + > /* serialize netdev register + virtio_device_ready() with ndo_open() */ > rtnl_lock(); > > @@ -4207,6 +4254,8 @@ static void virtnet_remove(struct virtio_device *vdev) > > /* Make sure no work handler is accessing the device. */ > flush_work(&vi->config_work); > + disable_rx_mode_work(vi); > + flush_work(&vi->rx_mode_work); > > unregister_netdev(vi->dev); > > -- > 2.25.1
On Wed, May 24, 2023 at 5:15 PM Michael S. Tsirkin <mst@redhat.com> wrote: > > On Wed, May 24, 2023 at 04:18:41PM +0800, Jason Wang wrote: > > This patch convert rx mode setting to be done in a workqueue, this is > > a must for allow to sleep when waiting for the cvq command to > > response since current code is executed under addr spin lock. > > > > Signed-off-by: Jason Wang <jasowang@redhat.com> > > --- > > Changes since V1: > > - use RTNL to synchronize rx mode worker > > --- > > drivers/net/virtio_net.c | 55 +++++++++++++++++++++++++++++++++++++--- > > 1 file changed, 52 insertions(+), 3 deletions(-) > > > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c > > index 56ca1d270304..5d2f1da4eaa0 100644 > > --- a/drivers/net/virtio_net.c > > +++ b/drivers/net/virtio_net.c > > @@ -265,6 +265,12 @@ struct virtnet_info { > > /* Work struct for config space updates */ > > struct work_struct config_work; > > > > + /* Work struct for config rx mode */ > > With a bit less abbreviation maybe? setting rx mode? That's fine. > > > + struct work_struct rx_mode_work; > > + > > + /* Is rx mode work enabled? */ > > Ugh not a great comment. Any suggestions for this. E.g we had: /* Is delayed refill enabled? */ > > > + bool rx_mode_work_enabled; > > + > > > > > /* Does the affinity hint is set for virtqueues? */ > > bool affinity_hint_set; > > > > @@ -388,6 +394,20 @@ static void disable_delayed_refill(struct virtnet_info *vi) > > spin_unlock_bh(&vi->refill_lock); > > } > > > > +static void enable_rx_mode_work(struct virtnet_info *vi) > > +{ > > + rtnl_lock(); > > + vi->rx_mode_work_enabled = true; > > + rtnl_unlock(); > > +} > > + > > +static void disable_rx_mode_work(struct virtnet_info *vi) > > +{ > > + rtnl_lock(); > > + vi->rx_mode_work_enabled = false; > > + rtnl_unlock(); > > +} > > + > > static void virtqueue_napi_schedule(struct napi_struct *napi, > > struct virtqueue *vq) > > { > > @@ -2341,9 +2361,11 @@ static int virtnet_close(struct net_device *dev) > > return 0; > > } > > > > -static void virtnet_set_rx_mode(struct net_device *dev) > > +static void virtnet_rx_mode_work(struct work_struct *work) > > { > > - struct virtnet_info *vi = netdev_priv(dev); > > + struct virtnet_info *vi = > > + container_of(work, struct virtnet_info, rx_mode_work); > > + struct net_device *dev = vi->dev; > > struct scatterlist sg[2]; > > struct virtio_net_ctrl_mac *mac_data; > > struct netdev_hw_addr *ha; > > @@ -2356,6 +2378,8 @@ static void virtnet_set_rx_mode(struct net_device *dev) > > if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) > > return; > > > > + rtnl_lock(); > > + > > vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0); > > vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0); > > > > @@ -2373,14 +2397,19 @@ static void virtnet_set_rx_mode(struct net_device *dev) > > dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", > > vi->ctrl->allmulti ? "en" : "dis"); > > > > + netif_addr_lock_bh(dev); > > + > > uc_count = netdev_uc_count(dev); > > mc_count = netdev_mc_count(dev); > > /* MAC filter - use one buffer for both lists */ > > buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + > > (2 * sizeof(mac_data->entries)), GFP_ATOMIC); > > mac_data = buf; > > - if (!buf) > > + if (!buf) { > > + netif_addr_unlock_bh(dev); > > + rtnl_unlock(); > > return; > > + } > > > > sg_init_table(sg, 2); > > > > @@ -2401,6 +2430,8 @@ static void virtnet_set_rx_mode(struct net_device *dev) > > netdev_for_each_mc_addr(ha, dev) > > memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); > > > > + netif_addr_unlock_bh(dev); > > + > > sg_set_buf(&sg[1], mac_data, > > sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); > > > > @@ -2408,9 +2439,19 @@ static void virtnet_set_rx_mode(struct net_device *dev) > > VIRTIO_NET_CTRL_MAC_TABLE_SET, sg)) > > dev_warn(&dev->dev, "Failed to set MAC filter table.\n"); > > > > + rtnl_unlock(); > > + > > kfree(buf); > > } > > > > +static void virtnet_set_rx_mode(struct net_device *dev) > > +{ > > + struct virtnet_info *vi = netdev_priv(dev); > > + > > + if (vi->rx_mode_work_enabled) > > + schedule_work(&vi->rx_mode_work); > > +} > > + > > > static int virtnet_vlan_rx_add_vid(struct net_device *dev, > > __be16 proto, u16 vid) > > { > > @@ -3181,6 +3222,8 @@ static void virtnet_freeze_down(struct virtio_device *vdev) > > > > /* Make sure no work handler is accessing the device */ > > flush_work(&vi->config_work); > > + disable_rx_mode_work(vi); > > + flush_work(&vi->rx_mode_work); > > > > netif_tx_lock_bh(vi->dev); > > netif_device_detach(vi->dev); > > Hmm so queued rx mode work will just get skipped > and on restore we get a wrong rx mode. > Any way to make this more robust? It could be done by scheduling a work on restore. Thanks > > > > @@ -3203,6 +3246,7 @@ static int virtnet_restore_up(struct virtio_device *vdev) > > virtio_device_ready(vdev); > > > > enable_delayed_refill(vi); > > + enable_rx_mode_work(vi); > > > > if (netif_running(vi->dev)) { > > err = virtnet_open(vi->dev); > > @@ -4002,6 +4046,7 @@ static int virtnet_probe(struct virtio_device *vdev) > > vdev->priv = vi; > > > > INIT_WORK(&vi->config_work, virtnet_config_changed_work); > > + INIT_WORK(&vi->rx_mode_work, virtnet_rx_mode_work); > > spin_lock_init(&vi->refill_lock); > > > > if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) { > > @@ -4110,6 +4155,8 @@ static int virtnet_probe(struct virtio_device *vdev) > > if (vi->has_rss || vi->has_rss_hash_report) > > virtnet_init_default_rss(vi); > > > > + enable_rx_mode_work(vi); > > + > > /* serialize netdev register + virtio_device_ready() with ndo_open() */ > > rtnl_lock(); > > > > @@ -4207,6 +4254,8 @@ static void virtnet_remove(struct virtio_device *vdev) > > > > /* Make sure no work handler is accessing the device. */ > > flush_work(&vi->config_work); > > + disable_rx_mode_work(vi); > > + flush_work(&vi->rx_mode_work); > > > > unregister_netdev(vi->dev); > > > > -- > > 2.25.1 >
On Thu, May 25, 2023 at 11:43:34AM +0800, Jason Wang wrote: > On Wed, May 24, 2023 at 5:15 PM Michael S. Tsirkin <mst@redhat.com> wrote: > > > > On Wed, May 24, 2023 at 04:18:41PM +0800, Jason Wang wrote: > > > This patch convert rx mode setting to be done in a workqueue, this is > > > a must for allow to sleep when waiting for the cvq command to > > > response since current code is executed under addr spin lock. > > > > > > Signed-off-by: Jason Wang <jasowang@redhat.com> > > > --- > > > Changes since V1: > > > - use RTNL to synchronize rx mode worker > > > --- > > > drivers/net/virtio_net.c | 55 +++++++++++++++++++++++++++++++++++++--- > > > 1 file changed, 52 insertions(+), 3 deletions(-) > > > > > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c > > > index 56ca1d270304..5d2f1da4eaa0 100644 > > > --- a/drivers/net/virtio_net.c > > > +++ b/drivers/net/virtio_net.c > > > @@ -265,6 +265,12 @@ struct virtnet_info { > > > /* Work struct for config space updates */ > > > struct work_struct config_work; > > > > > > + /* Work struct for config rx mode */ > > > > With a bit less abbreviation maybe? setting rx mode? > > That's fine. > > > > > > + struct work_struct rx_mode_work; > > > + > > > + /* Is rx mode work enabled? */ > > > > Ugh not a great comment. > > Any suggestions for this. E.g we had: > > /* Is delayed refill enabled? */ /* OK to queue work setting RX mode? */ > > > > > + bool rx_mode_work_enabled; > > > + > > > > > > > > > /* Does the affinity hint is set for virtqueues? */ > > > bool affinity_hint_set; > > > > > > @@ -388,6 +394,20 @@ static void disable_delayed_refill(struct virtnet_info *vi) > > > spin_unlock_bh(&vi->refill_lock); > > > } > > > > > > +static void enable_rx_mode_work(struct virtnet_info *vi) > > > +{ > > > + rtnl_lock(); > > > + vi->rx_mode_work_enabled = true; > > > + rtnl_unlock(); > > > +} > > > + > > > +static void disable_rx_mode_work(struct virtnet_info *vi) > > > +{ > > > + rtnl_lock(); > > > + vi->rx_mode_work_enabled = false; > > > + rtnl_unlock(); > > > +} > > > + > > > static void virtqueue_napi_schedule(struct napi_struct *napi, > > > struct virtqueue *vq) > > > { > > > @@ -2341,9 +2361,11 @@ static int virtnet_close(struct net_device *dev) > > > return 0; > > > } > > > > > > -static void virtnet_set_rx_mode(struct net_device *dev) > > > +static void virtnet_rx_mode_work(struct work_struct *work) > > > { > > > - struct virtnet_info *vi = netdev_priv(dev); > > > + struct virtnet_info *vi = > > > + container_of(work, struct virtnet_info, rx_mode_work); > > > + struct net_device *dev = vi->dev; > > > struct scatterlist sg[2]; > > > struct virtio_net_ctrl_mac *mac_data; > > > struct netdev_hw_addr *ha; > > > @@ -2356,6 +2378,8 @@ static void virtnet_set_rx_mode(struct net_device *dev) > > > if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) > > > return; > > > > > > + rtnl_lock(); > > > + > > > vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0); > > > vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0); > > > > > > @@ -2373,14 +2397,19 @@ static void virtnet_set_rx_mode(struct net_device *dev) > > > dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", > > > vi->ctrl->allmulti ? "en" : "dis"); > > > > > > + netif_addr_lock_bh(dev); > > > + > > > uc_count = netdev_uc_count(dev); > > > mc_count = netdev_mc_count(dev); > > > /* MAC filter - use one buffer for both lists */ > > > buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + > > > (2 * sizeof(mac_data->entries)), GFP_ATOMIC); > > > mac_data = buf; > > > - if (!buf) > > > + if (!buf) { > > > + netif_addr_unlock_bh(dev); > > > + rtnl_unlock(); > > > return; > > > + } > > > > > > sg_init_table(sg, 2); > > > > > > @@ -2401,6 +2430,8 @@ static void virtnet_set_rx_mode(struct net_device *dev) > > > netdev_for_each_mc_addr(ha, dev) > > > memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); > > > > > > + netif_addr_unlock_bh(dev); > > > + > > > sg_set_buf(&sg[1], mac_data, > > > sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); > > > > > > @@ -2408,9 +2439,19 @@ static void virtnet_set_rx_mode(struct net_device *dev) > > > VIRTIO_NET_CTRL_MAC_TABLE_SET, sg)) > > > dev_warn(&dev->dev, "Failed to set MAC filter table.\n"); > > > > > > + rtnl_unlock(); > > > + > > > kfree(buf); > > > } > > > > > > +static void virtnet_set_rx_mode(struct net_device *dev) > > > +{ > > > + struct virtnet_info *vi = netdev_priv(dev); > > > + > > > + if (vi->rx_mode_work_enabled) > > > + schedule_work(&vi->rx_mode_work); > > > +} > > > + > > > > > static int virtnet_vlan_rx_add_vid(struct net_device *dev, > > > __be16 proto, u16 vid) > > > { > > > @@ -3181,6 +3222,8 @@ static void virtnet_freeze_down(struct virtio_device *vdev) > > > > > > /* Make sure no work handler is accessing the device */ > > > flush_work(&vi->config_work); > > > + disable_rx_mode_work(vi); > > > + flush_work(&vi->rx_mode_work); > > > > > > netif_tx_lock_bh(vi->dev); > > > netif_device_detach(vi->dev); > > > > Hmm so queued rx mode work will just get skipped > > and on restore we get a wrong rx mode. > > Any way to make this more robust? > > It could be done by scheduling a work on restore. > > Thanks > > > > > > > @@ -3203,6 +3246,7 @@ static int virtnet_restore_up(struct virtio_device *vdev) > > > virtio_device_ready(vdev); > > > > > > enable_delayed_refill(vi); > > > + enable_rx_mode_work(vi); > > > > > > if (netif_running(vi->dev)) { > > > err = virtnet_open(vi->dev); > > > @@ -4002,6 +4046,7 @@ static int virtnet_probe(struct virtio_device *vdev) > > > vdev->priv = vi; > > > > > > INIT_WORK(&vi->config_work, virtnet_config_changed_work); > > > + INIT_WORK(&vi->rx_mode_work, virtnet_rx_mode_work); > > > spin_lock_init(&vi->refill_lock); > > > > > > if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) { > > > @@ -4110,6 +4155,8 @@ static int virtnet_probe(struct virtio_device *vdev) > > > if (vi->has_rss || vi->has_rss_hash_report) > > > virtnet_init_default_rss(vi); > > > > > > + enable_rx_mode_work(vi); > > > + > > > /* serialize netdev register + virtio_device_ready() with ndo_open() */ > > > rtnl_lock(); > > > > > > @@ -4207,6 +4254,8 @@ static void virtnet_remove(struct virtio_device *vdev) > > > > > > /* Make sure no work handler is accessing the device. */ > > > flush_work(&vi->config_work); > > > + disable_rx_mode_work(vi); > > > + flush_work(&vi->rx_mode_work); > > > > > > unregister_netdev(vi->dev); > > > > > > -- > > > 2.25.1 > >
On Thu, May 25, 2023 at 3:41 PM Michael S. Tsirkin <mst@redhat.com> wrote: > > On Thu, May 25, 2023 at 11:43:34AM +0800, Jason Wang wrote: > > On Wed, May 24, 2023 at 5:15 PM Michael S. Tsirkin <mst@redhat.com> wrote: > > > > > > On Wed, May 24, 2023 at 04:18:41PM +0800, Jason Wang wrote: > > > > This patch convert rx mode setting to be done in a workqueue, this is > > > > a must for allow to sleep when waiting for the cvq command to > > > > response since current code is executed under addr spin lock. > > > > > > > > Signed-off-by: Jason Wang <jasowang@redhat.com> > > > > --- > > > > Changes since V1: > > > > - use RTNL to synchronize rx mode worker > > > > --- > > > > drivers/net/virtio_net.c | 55 +++++++++++++++++++++++++++++++++++++--- > > > > 1 file changed, 52 insertions(+), 3 deletions(-) > > > > > > > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c > > > > index 56ca1d270304..5d2f1da4eaa0 100644 > > > > --- a/drivers/net/virtio_net.c > > > > +++ b/drivers/net/virtio_net.c > > > > @@ -265,6 +265,12 @@ struct virtnet_info { > > > > /* Work struct for config space updates */ > > > > struct work_struct config_work; > > > > > > > > + /* Work struct for config rx mode */ > > > > > > With a bit less abbreviation maybe? setting rx mode? > > > > That's fine. > > > > > > > > > + struct work_struct rx_mode_work; > > > > + > > > > + /* Is rx mode work enabled? */ > > > > > > Ugh not a great comment. > > > > Any suggestions for this. E.g we had: > > > > /* Is delayed refill enabled? */ > > /* OK to queue work setting RX mode? */ Ok. > > > > > > > > > + bool rx_mode_work_enabled; > > > > + > > > > > > > > > > > > > /* Does the affinity hint is set for virtqueues? */ > > > > bool affinity_hint_set; > > > > > > > > @@ -388,6 +394,20 @@ static void disable_delayed_refill(struct virtnet_info *vi) > > > > spin_unlock_bh(&vi->refill_lock); > > > > } > > > > > > > > +static void enable_rx_mode_work(struct virtnet_info *vi) > > > > +{ > > > > + rtnl_lock(); > > > > + vi->rx_mode_work_enabled = true; > > > > + rtnl_unlock(); > > > > +} > > > > + > > > > +static void disable_rx_mode_work(struct virtnet_info *vi) > > > > +{ > > > > + rtnl_lock(); > > > > + vi->rx_mode_work_enabled = false; > > > > + rtnl_unlock(); > > > > +} > > > > + > > > > static void virtqueue_napi_schedule(struct napi_struct *napi, > > > > struct virtqueue *vq) > > > > { > > > > @@ -2341,9 +2361,11 @@ static int virtnet_close(struct net_device *dev) > > > > return 0; > > > > } > > > > > > > > -static void virtnet_set_rx_mode(struct net_device *dev) > > > > +static void virtnet_rx_mode_work(struct work_struct *work) > > > > { > > > > - struct virtnet_info *vi = netdev_priv(dev); > > > > + struct virtnet_info *vi = > > > > + container_of(work, struct virtnet_info, rx_mode_work); > > > > + struct net_device *dev = vi->dev; > > > > struct scatterlist sg[2]; > > > > struct virtio_net_ctrl_mac *mac_data; > > > > struct netdev_hw_addr *ha; > > > > @@ -2356,6 +2378,8 @@ static void virtnet_set_rx_mode(struct net_device *dev) > > > > if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) > > > > return; > > > > > > > > + rtnl_lock(); > > > > + > > > > vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0); > > > > vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0); > > > > > > > > @@ -2373,14 +2397,19 @@ static void virtnet_set_rx_mode(struct net_device *dev) > > > > dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", > > > > vi->ctrl->allmulti ? "en" : "dis"); > > > > > > > > + netif_addr_lock_bh(dev); > > > > + > > > > uc_count = netdev_uc_count(dev); > > > > mc_count = netdev_mc_count(dev); > > > > /* MAC filter - use one buffer for both lists */ > > > > buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + > > > > (2 * sizeof(mac_data->entries)), GFP_ATOMIC); > > > > mac_data = buf; > > > > - if (!buf) > > > > + if (!buf) { > > > > + netif_addr_unlock_bh(dev); > > > > + rtnl_unlock(); > > > > return; > > > > + } > > > > > > > > sg_init_table(sg, 2); > > > > > > > > @@ -2401,6 +2430,8 @@ static void virtnet_set_rx_mode(struct net_device *dev) > > > > netdev_for_each_mc_addr(ha, dev) > > > > memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); > > > > > > > > + netif_addr_unlock_bh(dev); > > > > + > > > > sg_set_buf(&sg[1], mac_data, > > > > sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); > > > > > > > > @@ -2408,9 +2439,19 @@ static void virtnet_set_rx_mode(struct net_device *dev) > > > > VIRTIO_NET_CTRL_MAC_TABLE_SET, sg)) > > > > dev_warn(&dev->dev, "Failed to set MAC filter table.\n"); > > > > > > > > + rtnl_unlock(); > > > > + > > > > kfree(buf); > > > > } > > > > > > > > +static void virtnet_set_rx_mode(struct net_device *dev) > > > > +{ > > > > + struct virtnet_info *vi = netdev_priv(dev); > > > > + > > > > + if (vi->rx_mode_work_enabled) > > > > + schedule_work(&vi->rx_mode_work); > > > > +} > > > > + > > > > > > > static int virtnet_vlan_rx_add_vid(struct net_device *dev, > > > > __be16 proto, u16 vid) > > > > { > > > > @@ -3181,6 +3222,8 @@ static void virtnet_freeze_down(struct virtio_device *vdev) > > > > > > > > /* Make sure no work handler is accessing the device */ > > > > flush_work(&vi->config_work); > > > > + disable_rx_mode_work(vi); > > > > + flush_work(&vi->rx_mode_work); > > > > > > > > netif_tx_lock_bh(vi->dev); > > > > netif_device_detach(vi->dev); > > > > > > Hmm so queued rx mode work will just get skipped > > > and on restore we get a wrong rx mode. > > > Any way to make this more robust? > > > > It could be done by scheduling a work on restore. Rethink this, I think we don't need to care about this case since the user processes should have been frozened. And that the reason we don't even need to hold RTNL here. Thanks > > > > Thanks > > > > > > > > > > > > @@ -3203,6 +3246,7 @@ static int virtnet_restore_up(struct virtio_device *vdev) > > > > virtio_device_ready(vdev); > > > > > > > > enable_delayed_refill(vi); > > > > + enable_rx_mode_work(vi); > > > > > > > > if (netif_running(vi->dev)) { > > > > err = virtnet_open(vi->dev); > > > > @@ -4002,6 +4046,7 @@ static int virtnet_probe(struct virtio_device *vdev) > > > > vdev->priv = vi; > > > > > > > > INIT_WORK(&vi->config_work, virtnet_config_changed_work); > > > > + INIT_WORK(&vi->rx_mode_work, virtnet_rx_mode_work); > > > > spin_lock_init(&vi->refill_lock); > > > > > > > > if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) { > > > > @@ -4110,6 +4155,8 @@ static int virtnet_probe(struct virtio_device *vdev) > > > > if (vi->has_rss || vi->has_rss_hash_report) > > > > virtnet_init_default_rss(vi); > > > > > > > > + enable_rx_mode_work(vi); > > > > + > > > > /* serialize netdev register + virtio_device_ready() with ndo_open() */ > > > > rtnl_lock(); > > > > > > > > @@ -4207,6 +4254,8 @@ static void virtnet_remove(struct virtio_device *vdev) > > > > > > > > /* Make sure no work handler is accessing the device. */ > > > > flush_work(&vi->config_work); > > > > + disable_rx_mode_work(vi); > > > > + flush_work(&vi->rx_mode_work); > > > > > > > > unregister_netdev(vi->dev); > > > > > > > > -- > > > > 2.25.1 > > > >
On Fri, May 26, 2023 at 09:31:34AM +0800, Jason Wang wrote: > On Thu, May 25, 2023 at 3:41 PM Michael S. Tsirkin <mst@redhat.com> wrote: > > > > On Thu, May 25, 2023 at 11:43:34AM +0800, Jason Wang wrote: > > > On Wed, May 24, 2023 at 5:15 PM Michael S. Tsirkin <mst@redhat.com> wrote: > > > > > > > > On Wed, May 24, 2023 at 04:18:41PM +0800, Jason Wang wrote: > > > > > This patch convert rx mode setting to be done in a workqueue, this is > > > > > a must for allow to sleep when waiting for the cvq command to > > > > > response since current code is executed under addr spin lock. > > > > > > > > > > Signed-off-by: Jason Wang <jasowang@redhat.com> > > > > > --- > > > > > Changes since V1: > > > > > - use RTNL to synchronize rx mode worker > > > > > --- > > > > > drivers/net/virtio_net.c | 55 +++++++++++++++++++++++++++++++++++++--- > > > > > 1 file changed, 52 insertions(+), 3 deletions(-) > > > > > > > > > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c > > > > > index 56ca1d270304..5d2f1da4eaa0 100644 > > > > > --- a/drivers/net/virtio_net.c > > > > > +++ b/drivers/net/virtio_net.c > > > > > @@ -265,6 +265,12 @@ struct virtnet_info { > > > > > /* Work struct for config space updates */ > > > > > struct work_struct config_work; > > > > > > > > > > + /* Work struct for config rx mode */ > > > > > > > > With a bit less abbreviation maybe? setting rx mode? > > > > > > That's fine. > > > > > > > > > > > > + struct work_struct rx_mode_work; > > > > > + > > > > > + /* Is rx mode work enabled? */ > > > > > > > > Ugh not a great comment. > > > > > > Any suggestions for this. E.g we had: > > > > > > /* Is delayed refill enabled? */ > > > > /* OK to queue work setting RX mode? */ > > Ok. > > > > > > > > > > > > > > + bool rx_mode_work_enabled; > > > > > + > > > > > > > > > > > > > > > > > /* Does the affinity hint is set for virtqueues? */ > > > > > bool affinity_hint_set; > > > > > > > > > > @@ -388,6 +394,20 @@ static void disable_delayed_refill(struct virtnet_info *vi) > > > > > spin_unlock_bh(&vi->refill_lock); > > > > > } > > > > > > > > > > +static void enable_rx_mode_work(struct virtnet_info *vi) > > > > > +{ > > > > > + rtnl_lock(); > > > > > + vi->rx_mode_work_enabled = true; > > > > > + rtnl_unlock(); > > > > > +} > > > > > + > > > > > +static void disable_rx_mode_work(struct virtnet_info *vi) > > > > > +{ > > > > > + rtnl_lock(); > > > > > + vi->rx_mode_work_enabled = false; > > > > > + rtnl_unlock(); > > > > > +} > > > > > + > > > > > static void virtqueue_napi_schedule(struct napi_struct *napi, > > > > > struct virtqueue *vq) > > > > > { > > > > > @@ -2341,9 +2361,11 @@ static int virtnet_close(struct net_device *dev) > > > > > return 0; > > > > > } > > > > > > > > > > -static void virtnet_set_rx_mode(struct net_device *dev) > > > > > +static void virtnet_rx_mode_work(struct work_struct *work) > > > > > { > > > > > - struct virtnet_info *vi = netdev_priv(dev); > > > > > + struct virtnet_info *vi = > > > > > + container_of(work, struct virtnet_info, rx_mode_work); > > > > > + struct net_device *dev = vi->dev; > > > > > struct scatterlist sg[2]; > > > > > struct virtio_net_ctrl_mac *mac_data; > > > > > struct netdev_hw_addr *ha; > > > > > @@ -2356,6 +2378,8 @@ static void virtnet_set_rx_mode(struct net_device *dev) > > > > > if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) > > > > > return; > > > > > > > > > > + rtnl_lock(); > > > > > + > > > > > vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0); > > > > > vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0); > > > > > > > > > > @@ -2373,14 +2397,19 @@ static void virtnet_set_rx_mode(struct net_device *dev) > > > > > dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", > > > > > vi->ctrl->allmulti ? "en" : "dis"); > > > > > > > > > > + netif_addr_lock_bh(dev); > > > > > + > > > > > uc_count = netdev_uc_count(dev); > > > > > mc_count = netdev_mc_count(dev); > > > > > /* MAC filter - use one buffer for both lists */ > > > > > buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + > > > > > (2 * sizeof(mac_data->entries)), GFP_ATOMIC); > > > > > mac_data = buf; > > > > > - if (!buf) > > > > > + if (!buf) { > > > > > + netif_addr_unlock_bh(dev); > > > > > + rtnl_unlock(); > > > > > return; > > > > > + } > > > > > > > > > > sg_init_table(sg, 2); > > > > > > > > > > @@ -2401,6 +2430,8 @@ static void virtnet_set_rx_mode(struct net_device *dev) > > > > > netdev_for_each_mc_addr(ha, dev) > > > > > memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); > > > > > > > > > > + netif_addr_unlock_bh(dev); > > > > > + > > > > > sg_set_buf(&sg[1], mac_data, > > > > > sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); > > > > > > > > > > @@ -2408,9 +2439,19 @@ static void virtnet_set_rx_mode(struct net_device *dev) > > > > > VIRTIO_NET_CTRL_MAC_TABLE_SET, sg)) > > > > > dev_warn(&dev->dev, "Failed to set MAC filter table.\n"); > > > > > > > > > > + rtnl_unlock(); > > > > > + > > > > > kfree(buf); > > > > > } > > > > > > > > > > +static void virtnet_set_rx_mode(struct net_device *dev) > > > > > +{ > > > > > + struct virtnet_info *vi = netdev_priv(dev); > > > > > + > > > > > + if (vi->rx_mode_work_enabled) > > > > > + schedule_work(&vi->rx_mode_work); > > > > > +} > > > > > + > > > > > > > > > static int virtnet_vlan_rx_add_vid(struct net_device *dev, > > > > > __be16 proto, u16 vid) > > > > > { > > > > > @@ -3181,6 +3222,8 @@ static void virtnet_freeze_down(struct virtio_device *vdev) > > > > > > > > > > /* Make sure no work handler is accessing the device */ > > > > > flush_work(&vi->config_work); > > > > > + disable_rx_mode_work(vi); > > > > > + flush_work(&vi->rx_mode_work); > > > > > > > > > > netif_tx_lock_bh(vi->dev); > > > > > netif_device_detach(vi->dev); > > > > > > > > Hmm so queued rx mode work will just get skipped > > > > and on restore we get a wrong rx mode. > > > > Any way to make this more robust? > > > > > > It could be done by scheduling a work on restore. > > Rethink this, I think we don't need to care about this case since the > user processes should have been frozened. Yes but not the workqueue. Want to switch to system_freezable_wq? > And that the reason we don't > even need to hold RTNL here. > > Thanks > > > > > > > Thanks > > > > > > > > > > > > > > > > > @@ -3203,6 +3246,7 @@ static int virtnet_restore_up(struct virtio_device *vdev) > > > > > virtio_device_ready(vdev); > > > > > > > > > > enable_delayed_refill(vi); > > > > > + enable_rx_mode_work(vi); > > > > > > > > > > if (netif_running(vi->dev)) { > > > > > err = virtnet_open(vi->dev); > > > > > @@ -4002,6 +4046,7 @@ static int virtnet_probe(struct virtio_device *vdev) > > > > > vdev->priv = vi; > > > > > > > > > > INIT_WORK(&vi->config_work, virtnet_config_changed_work); > > > > > + INIT_WORK(&vi->rx_mode_work, virtnet_rx_mode_work); > > > > > spin_lock_init(&vi->refill_lock); > > > > > > > > > > if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) { > > > > > @@ -4110,6 +4155,8 @@ static int virtnet_probe(struct virtio_device *vdev) > > > > > if (vi->has_rss || vi->has_rss_hash_report) > > > > > virtnet_init_default_rss(vi); > > > > > > > > > > + enable_rx_mode_work(vi); > > > > > + > > > > > /* serialize netdev register + virtio_device_ready() with ndo_open() */ > > > > > rtnl_lock(); > > > > > > > > > > @@ -4207,6 +4254,8 @@ static void virtnet_remove(struct virtio_device *vdev) > > > > > > > > > > /* Make sure no work handler is accessing the device. */ > > > > > flush_work(&vi->config_work); > > > > > + disable_rx_mode_work(vi); > > > > > + flush_work(&vi->rx_mode_work); > > > > > > > > > > unregister_netdev(vi->dev); > > > > > > > > > > -- > > > > > 2.25.1 > > > > > >
On Sun, May 28, 2023 at 7:39 PM Michael S. Tsirkin <mst@redhat.com> wrote: > > On Fri, May 26, 2023 at 09:31:34AM +0800, Jason Wang wrote: > > On Thu, May 25, 2023 at 3:41 PM Michael S. Tsirkin <mst@redhat.com> wrote: > > > > > > On Thu, May 25, 2023 at 11:43:34AM +0800, Jason Wang wrote: > > > > On Wed, May 24, 2023 at 5:15 PM Michael S. Tsirkin <mst@redhat.com> wrote: > > > > > > > > > > On Wed, May 24, 2023 at 04:18:41PM +0800, Jason Wang wrote: > > > > > > This patch convert rx mode setting to be done in a workqueue, this is > > > > > > a must for allow to sleep when waiting for the cvq command to > > > > > > response since current code is executed under addr spin lock. > > > > > > > > > > > > Signed-off-by: Jason Wang <jasowang@redhat.com> > > > > > > --- > > > > > > Changes since V1: > > > > > > - use RTNL to synchronize rx mode worker > > > > > > --- > > > > > > drivers/net/virtio_net.c | 55 +++++++++++++++++++++++++++++++++++++--- > > > > > > 1 file changed, 52 insertions(+), 3 deletions(-) > > > > > > > > > > > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c > > > > > > index 56ca1d270304..5d2f1da4eaa0 100644 > > > > > > --- a/drivers/net/virtio_net.c > > > > > > +++ b/drivers/net/virtio_net.c > > > > > > @@ -265,6 +265,12 @@ struct virtnet_info { > > > > > > /* Work struct for config space updates */ > > > > > > struct work_struct config_work; > > > > > > > > > > > > + /* Work struct for config rx mode */ > > > > > > > > > > With a bit less abbreviation maybe? setting rx mode? > > > > > > > > That's fine. > > > > > > > > > > > > > > > + struct work_struct rx_mode_work; > > > > > > + > > > > > > + /* Is rx mode work enabled? */ > > > > > > > > > > Ugh not a great comment. > > > > > > > > Any suggestions for this. E.g we had: > > > > > > > > /* Is delayed refill enabled? */ > > > > > > /* OK to queue work setting RX mode? */ > > > > Ok. > > > > > > > > > > > > > > > > > > > + bool rx_mode_work_enabled; > > > > > > + > > > > > > > > > > > > > > > > > > > > > /* Does the affinity hint is set for virtqueues? */ > > > > > > bool affinity_hint_set; > > > > > > > > > > > > @@ -388,6 +394,20 @@ static void disable_delayed_refill(struct virtnet_info *vi) > > > > > > spin_unlock_bh(&vi->refill_lock); > > > > > > } > > > > > > > > > > > > +static void enable_rx_mode_work(struct virtnet_info *vi) > > > > > > +{ > > > > > > + rtnl_lock(); > > > > > > + vi->rx_mode_work_enabled = true; > > > > > > + rtnl_unlock(); > > > > > > +} > > > > > > + > > > > > > +static void disable_rx_mode_work(struct virtnet_info *vi) > > > > > > +{ > > > > > > + rtnl_lock(); > > > > > > + vi->rx_mode_work_enabled = false; > > > > > > + rtnl_unlock(); > > > > > > +} > > > > > > + > > > > > > static void virtqueue_napi_schedule(struct napi_struct *napi, > > > > > > struct virtqueue *vq) > > > > > > { > > > > > > @@ -2341,9 +2361,11 @@ static int virtnet_close(struct net_device *dev) > > > > > > return 0; > > > > > > } > > > > > > > > > > > > -static void virtnet_set_rx_mode(struct net_device *dev) > > > > > > +static void virtnet_rx_mode_work(struct work_struct *work) > > > > > > { > > > > > > - struct virtnet_info *vi = netdev_priv(dev); > > > > > > + struct virtnet_info *vi = > > > > > > + container_of(work, struct virtnet_info, rx_mode_work); > > > > > > + struct net_device *dev = vi->dev; > > > > > > struct scatterlist sg[2]; > > > > > > struct virtio_net_ctrl_mac *mac_data; > > > > > > struct netdev_hw_addr *ha; > > > > > > @@ -2356,6 +2378,8 @@ static void virtnet_set_rx_mode(struct net_device *dev) > > > > > > if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) > > > > > > return; > > > > > > > > > > > > + rtnl_lock(); > > > > > > + > > > > > > vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0); > > > > > > vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0); > > > > > > > > > > > > @@ -2373,14 +2397,19 @@ static void virtnet_set_rx_mode(struct net_device *dev) > > > > > > dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", > > > > > > vi->ctrl->allmulti ? "en" : "dis"); > > > > > > > > > > > > + netif_addr_lock_bh(dev); > > > > > > + > > > > > > uc_count = netdev_uc_count(dev); > > > > > > mc_count = netdev_mc_count(dev); > > > > > > /* MAC filter - use one buffer for both lists */ > > > > > > buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + > > > > > > (2 * sizeof(mac_data->entries)), GFP_ATOMIC); > > > > > > mac_data = buf; > > > > > > - if (!buf) > > > > > > + if (!buf) { > > > > > > + netif_addr_unlock_bh(dev); > > > > > > + rtnl_unlock(); > > > > > > return; > > > > > > + } > > > > > > > > > > > > sg_init_table(sg, 2); > > > > > > > > > > > > @@ -2401,6 +2430,8 @@ static void virtnet_set_rx_mode(struct net_device *dev) > > > > > > netdev_for_each_mc_addr(ha, dev) > > > > > > memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); > > > > > > > > > > > > + netif_addr_unlock_bh(dev); > > > > > > + > > > > > > sg_set_buf(&sg[1], mac_data, > > > > > > sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); > > > > > > > > > > > > @@ -2408,9 +2439,19 @@ static void virtnet_set_rx_mode(struct net_device *dev) > > > > > > VIRTIO_NET_CTRL_MAC_TABLE_SET, sg)) > > > > > > dev_warn(&dev->dev, "Failed to set MAC filter table.\n"); > > > > > > > > > > > > + rtnl_unlock(); > > > > > > + > > > > > > kfree(buf); > > > > > > } > > > > > > > > > > > > +static void virtnet_set_rx_mode(struct net_device *dev) > > > > > > +{ > > > > > > + struct virtnet_info *vi = netdev_priv(dev); > > > > > > + > > > > > > + if (vi->rx_mode_work_enabled) > > > > > > + schedule_work(&vi->rx_mode_work); > > > > > > +} > > > > > > + > > > > > > > > > > > static int virtnet_vlan_rx_add_vid(struct net_device *dev, > > > > > > __be16 proto, u16 vid) > > > > > > { > > > > > > @@ -3181,6 +3222,8 @@ static void virtnet_freeze_down(struct virtio_device *vdev) > > > > > > > > > > > > /* Make sure no work handler is accessing the device */ > > > > > > flush_work(&vi->config_work); > > > > > > + disable_rx_mode_work(vi); > > > > > > + flush_work(&vi->rx_mode_work); > > > > > > > > > > > > netif_tx_lock_bh(vi->dev); > > > > > > netif_device_detach(vi->dev); > > > > > > > > > > Hmm so queued rx mode work will just get skipped > > > > > and on restore we get a wrong rx mode. > > > > > Any way to make this more robust? > > > > > > > > It could be done by scheduling a work on restore. > > > > Rethink this, I think we don't need to care about this case since the > > user processes should have been frozened. > > Yes but not the workqueue. Want to switch to system_freezable_wq? Yes, I will do it in v2. Thanks > > > And that the reason we don't > > even need to hold RTNL here. > > > > Thanks > > > > > > > > > > Thanks > > > > > > > > > > > > > > > > > > > > > > @@ -3203,6 +3246,7 @@ static int virtnet_restore_up(struct virtio_device *vdev) > > > > > > virtio_device_ready(vdev); > > > > > > > > > > > > enable_delayed_refill(vi); > > > > > > + enable_rx_mode_work(vi); > > > > > > > > > > > > if (netif_running(vi->dev)) { > > > > > > err = virtnet_open(vi->dev); > > > > > > @@ -4002,6 +4046,7 @@ static int virtnet_probe(struct virtio_device *vdev) > > > > > > vdev->priv = vi; > > > > > > > > > > > > INIT_WORK(&vi->config_work, virtnet_config_changed_work); > > > > > > + INIT_WORK(&vi->rx_mode_work, virtnet_rx_mode_work); > > > > > > spin_lock_init(&vi->refill_lock); > > > > > > > > > > > > if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) { > > > > > > @@ -4110,6 +4155,8 @@ static int virtnet_probe(struct virtio_device *vdev) > > > > > > if (vi->has_rss || vi->has_rss_hash_report) > > > > > > virtnet_init_default_rss(vi); > > > > > > > > > > > > + enable_rx_mode_work(vi); > > > > > > + > > > > > > /* serialize netdev register + virtio_device_ready() with ndo_open() */ > > > > > > rtnl_lock(); > > > > > > > > > > > > @@ -4207,6 +4254,8 @@ static void virtnet_remove(struct virtio_device *vdev) > > > > > > > > > > > > /* Make sure no work handler is accessing the device. */ > > > > > > flush_work(&vi->config_work); > > > > > > + disable_rx_mode_work(vi); > > > > > > + flush_work(&vi->rx_mode_work); > > > > > > > > > > > > unregister_netdev(vi->dev); > > > > > > > > > > > > -- > > > > > > 2.25.1 > > > > > > > > >
On Mon, May 29, 2023 at 9:21 AM Jason Wang <jasowang@redhat.com> wrote: > > On Sun, May 28, 2023 at 7:39 PM Michael S. Tsirkin <mst@redhat.com> wrote: > > > > On Fri, May 26, 2023 at 09:31:34AM +0800, Jason Wang wrote: > > > On Thu, May 25, 2023 at 3:41 PM Michael S. Tsirkin <mst@redhat.com> wrote: > > > > > > > > On Thu, May 25, 2023 at 11:43:34AM +0800, Jason Wang wrote: > > > > > On Wed, May 24, 2023 at 5:15 PM Michael S. Tsirkin <mst@redhat.com> wrote: > > > > > > > > > > > > On Wed, May 24, 2023 at 04:18:41PM +0800, Jason Wang wrote: > > > > > > > This patch convert rx mode setting to be done in a workqueue, this is > > > > > > > a must for allow to sleep when waiting for the cvq command to > > > > > > > response since current code is executed under addr spin lock. > > > > > > > > > > > > > > Signed-off-by: Jason Wang <jasowang@redhat.com> > > > > > > > --- > > > > > > > Changes since V1: > > > > > > > - use RTNL to synchronize rx mode worker > > > > > > > --- > > > > > > > drivers/net/virtio_net.c | 55 +++++++++++++++++++++++++++++++++++++--- > > > > > > > 1 file changed, 52 insertions(+), 3 deletions(-) > > > > > > > > > > > > > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c > > > > > > > index 56ca1d270304..5d2f1da4eaa0 100644 > > > > > > > --- a/drivers/net/virtio_net.c > > > > > > > +++ b/drivers/net/virtio_net.c > > > > > > > @@ -265,6 +265,12 @@ struct virtnet_info { > > > > > > > /* Work struct for config space updates */ > > > > > > > struct work_struct config_work; > > > > > > > > > > > > > > + /* Work struct for config rx mode */ > > > > > > > > > > > > With a bit less abbreviation maybe? setting rx mode? > > > > > > > > > > That's fine. > > > > > > > > > > > > > > > > > > + struct work_struct rx_mode_work; > > > > > > > + > > > > > > > + /* Is rx mode work enabled? */ > > > > > > > > > > > > Ugh not a great comment. > > > > > > > > > > Any suggestions for this. E.g we had: > > > > > > > > > > /* Is delayed refill enabled? */ > > > > > > > > /* OK to queue work setting RX mode? */ > > > > > > Ok. > > > > > > > > > > > > > > > > > > > > > > > > + bool rx_mode_work_enabled; > > > > > > > + > > > > > > > > > > > > > > > > > > > > > > > > > /* Does the affinity hint is set for virtqueues? */ > > > > > > > bool affinity_hint_set; > > > > > > > > > > > > > > @@ -388,6 +394,20 @@ static void disable_delayed_refill(struct virtnet_info *vi) > > > > > > > spin_unlock_bh(&vi->refill_lock); > > > > > > > } > > > > > > > > > > > > > > +static void enable_rx_mode_work(struct virtnet_info *vi) > > > > > > > +{ > > > > > > > + rtnl_lock(); > > > > > > > + vi->rx_mode_work_enabled = true; > > > > > > > + rtnl_unlock(); > > > > > > > +} > > > > > > > + > > > > > > > +static void disable_rx_mode_work(struct virtnet_info *vi) > > > > > > > +{ > > > > > > > + rtnl_lock(); > > > > > > > + vi->rx_mode_work_enabled = false; > > > > > > > + rtnl_unlock(); > > > > > > > +} > > > > > > > + > > > > > > > static void virtqueue_napi_schedule(struct napi_struct *napi, > > > > > > > struct virtqueue *vq) > > > > > > > { > > > > > > > @@ -2341,9 +2361,11 @@ static int virtnet_close(struct net_device *dev) > > > > > > > return 0; > > > > > > > } > > > > > > > > > > > > > > -static void virtnet_set_rx_mode(struct net_device *dev) > > > > > > > +static void virtnet_rx_mode_work(struct work_struct *work) > > > > > > > { > > > > > > > - struct virtnet_info *vi = netdev_priv(dev); > > > > > > > + struct virtnet_info *vi = > > > > > > > + container_of(work, struct virtnet_info, rx_mode_work); > > > > > > > + struct net_device *dev = vi->dev; > > > > > > > struct scatterlist sg[2]; > > > > > > > struct virtio_net_ctrl_mac *mac_data; > > > > > > > struct netdev_hw_addr *ha; > > > > > > > @@ -2356,6 +2378,8 @@ static void virtnet_set_rx_mode(struct net_device *dev) > > > > > > > if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) > > > > > > > return; > > > > > > > > > > > > > > + rtnl_lock(); > > > > > > > + > > > > > > > vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0); > > > > > > > vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0); > > > > > > > > > > > > > > @@ -2373,14 +2397,19 @@ static void virtnet_set_rx_mode(struct net_device *dev) > > > > > > > dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", > > > > > > > vi->ctrl->allmulti ? "en" : "dis"); > > > > > > > > > > > > > > + netif_addr_lock_bh(dev); > > > > > > > + > > > > > > > uc_count = netdev_uc_count(dev); > > > > > > > mc_count = netdev_mc_count(dev); > > > > > > > /* MAC filter - use one buffer for both lists */ > > > > > > > buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + > > > > > > > (2 * sizeof(mac_data->entries)), GFP_ATOMIC); > > > > > > > mac_data = buf; > > > > > > > - if (!buf) > > > > > > > + if (!buf) { > > > > > > > + netif_addr_unlock_bh(dev); > > > > > > > + rtnl_unlock(); > > > > > > > return; > > > > > > > + } > > > > > > > > > > > > > > sg_init_table(sg, 2); > > > > > > > > > > > > > > @@ -2401,6 +2430,8 @@ static void virtnet_set_rx_mode(struct net_device *dev) > > > > > > > netdev_for_each_mc_addr(ha, dev) > > > > > > > memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); > > > > > > > > > > > > > > + netif_addr_unlock_bh(dev); > > > > > > > + > > > > > > > sg_set_buf(&sg[1], mac_data, > > > > > > > sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); > > > > > > > > > > > > > > @@ -2408,9 +2439,19 @@ static void virtnet_set_rx_mode(struct net_device *dev) > > > > > > > VIRTIO_NET_CTRL_MAC_TABLE_SET, sg)) > > > > > > > dev_warn(&dev->dev, "Failed to set MAC filter table.\n"); > > > > > > > > > > > > > > + rtnl_unlock(); > > > > > > > + > > > > > > > kfree(buf); > > > > > > > } > > > > > > > > > > > > > > +static void virtnet_set_rx_mode(struct net_device *dev) > > > > > > > +{ > > > > > > > + struct virtnet_info *vi = netdev_priv(dev); > > > > > > > + > > > > > > > + if (vi->rx_mode_work_enabled) > > > > > > > + schedule_work(&vi->rx_mode_work); > > > > > > > +} > > > > > > > + > > > > > > > > > > > > > static int virtnet_vlan_rx_add_vid(struct net_device *dev, > > > > > > > __be16 proto, u16 vid) > > > > > > > { > > > > > > > @@ -3181,6 +3222,8 @@ static void virtnet_freeze_down(struct virtio_device *vdev) > > > > > > > > > > > > > > /* Make sure no work handler is accessing the device */ > > > > > > > flush_work(&vi->config_work); > > > > > > > + disable_rx_mode_work(vi); > > > > > > > + flush_work(&vi->rx_mode_work); > > > > > > > > > > > > > > netif_tx_lock_bh(vi->dev); > > > > > > > netif_device_detach(vi->dev); > > > > > > > > > > > > Hmm so queued rx mode work will just get skipped > > > > > > and on restore we get a wrong rx mode. > > > > > > Any way to make this more robust? > > > > > > > > > > It could be done by scheduling a work on restore. > > > > > > Rethink this, I think we don't need to care about this case since the > > > user processes should have been frozened. > > > > Yes but not the workqueue. Want to switch to system_freezable_wq? > > Yes, I will do it in v2. Actually, this doesn't work. Freezable workqueue can only guarantee when being freezed the new work will be queued and not scheduled until thaw. So the ktrhead that is executing the workqueue is not freezable. The busy loop (even with cond_resched()) will force suspend in this case. I wonder if we should switch to using a dedicated kthread for virtio-net then we can allow it to be frozen. Thanks > > Thanks > > > > > > And that the reason we don't > > > even need to hold RTNL here. > > > > > > Thanks > > > > > > > > > > > > > Thanks > > > > > > > > > > > > > > > > > > > > > > > > > > > @@ -3203,6 +3246,7 @@ static int virtnet_restore_up(struct virtio_device *vdev) > > > > > > > virtio_device_ready(vdev); > > > > > > > > > > > > > > enable_delayed_refill(vi); > > > > > > > + enable_rx_mode_work(vi); > > > > > > > > > > > > > > if (netif_running(vi->dev)) { > > > > > > > err = virtnet_open(vi->dev); > > > > > > > @@ -4002,6 +4046,7 @@ static int virtnet_probe(struct virtio_device *vdev) > > > > > > > vdev->priv = vi; > > > > > > > > > > > > > > INIT_WORK(&vi->config_work, virtnet_config_changed_work); > > > > > > > + INIT_WORK(&vi->rx_mode_work, virtnet_rx_mode_work); > > > > > > > spin_lock_init(&vi->refill_lock); > > > > > > > > > > > > > > if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) { > > > > > > > @@ -4110,6 +4155,8 @@ static int virtnet_probe(struct virtio_device *vdev) > > > > > > > if (vi->has_rss || vi->has_rss_hash_report) > > > > > > > virtnet_init_default_rss(vi); > > > > > > > > > > > > > > + enable_rx_mode_work(vi); > > > > > > > + > > > > > > > /* serialize netdev register + virtio_device_ready() with ndo_open() */ > > > > > > > rtnl_lock(); > > > > > > > > > > > > > > @@ -4207,6 +4254,8 @@ static void virtnet_remove(struct virtio_device *vdev) > > > > > > > > > > > > > > /* Make sure no work handler is accessing the device. */ > > > > > > > flush_work(&vi->config_work); > > > > > > > + disable_rx_mode_work(vi); > > > > > > > + flush_work(&vi->rx_mode_work); > > > > > > > > > > > > > > unregister_netdev(vi->dev); > > > > > > > > > > > > > > -- > > > > > > > 2.25.1 > > > > > > > > > > > >
On Wed, Jun 28, 2023 at 9:34 PM Michael S. Tsirkin <mst@redhat.com> wrote: > > On Wed, May 31, 2023 at 09:07:25AM +0800, Jason Wang wrote: > > On Mon, May 29, 2023 at 9:21 AM Jason Wang <jasowang@redhat.com> wrote: > > > > > > On Sun, May 28, 2023 at 7:39 PM Michael S. Tsirkin <mst@redhat.com> wrote: > > > > > > > > On Fri, May 26, 2023 at 09:31:34AM +0800, Jason Wang wrote: > > > > > On Thu, May 25, 2023 at 3:41 PM Michael S. Tsirkin <mst@redhat.com> wrote: > > > > > > > > > > > > On Thu, May 25, 2023 at 11:43:34AM +0800, Jason Wang wrote: > > > > > > > On Wed, May 24, 2023 at 5:15 PM Michael S. Tsirkin <mst@redhat.com> wrote: > > > > > > > > > > > > > > > > On Wed, May 24, 2023 at 04:18:41PM +0800, Jason Wang wrote: > > > > > > > > > This patch convert rx mode setting to be done in a workqueue, this is > > > > > > > > > a must for allow to sleep when waiting for the cvq command to > > > > > > > > > response since current code is executed under addr spin lock. > > > > > > > > > > > > > > > > > > Signed-off-by: Jason Wang <jasowang@redhat.com> > > > > > > > > > --- > > > > > > > > > Changes since V1: > > > > > > > > > - use RTNL to synchronize rx mode worker > > > > > > > > > --- > > > > > > > > > drivers/net/virtio_net.c | 55 +++++++++++++++++++++++++++++++++++++--- > > > > > > > > > 1 file changed, 52 insertions(+), 3 deletions(-) > > > > > > > > > > > > > > > > > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c > > > > > > > > > index 56ca1d270304..5d2f1da4eaa0 100644 > > > > > > > > > --- a/drivers/net/virtio_net.c > > > > > > > > > +++ b/drivers/net/virtio_net.c > > > > > > > > > @@ -265,6 +265,12 @@ struct virtnet_info { > > > > > > > > > /* Work struct for config space updates */ > > > > > > > > > struct work_struct config_work; > > > > > > > > > > > > > > > > > > + /* Work struct for config rx mode */ > > > > > > > > > > > > > > > > With a bit less abbreviation maybe? setting rx mode? > > > > > > > > > > > > > > That's fine. > > > > > > > > > > > > > > > > > > > > > > > > + struct work_struct rx_mode_work; > > > > > > > > > + > > > > > > > > > + /* Is rx mode work enabled? */ > > > > > > > > > > > > > > > > Ugh not a great comment. > > > > > > > > > > > > > > Any suggestions for this. E.g we had: > > > > > > > > > > > > > > /* Is delayed refill enabled? */ > > > > > > > > > > > > /* OK to queue work setting RX mode? */ > > > > > > > > > > Ok. > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > + bool rx_mode_work_enabled; > > > > > > > > > + > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > /* Does the affinity hint is set for virtqueues? */ > > > > > > > > > bool affinity_hint_set; > > > > > > > > > > > > > > > > > > @@ -388,6 +394,20 @@ static void disable_delayed_refill(struct virtnet_info *vi) > > > > > > > > > spin_unlock_bh(&vi->refill_lock); > > > > > > > > > } > > > > > > > > > > > > > > > > > > +static void enable_rx_mode_work(struct virtnet_info *vi) > > > > > > > > > +{ > > > > > > > > > + rtnl_lock(); > > > > > > > > > + vi->rx_mode_work_enabled = true; > > > > > > > > > + rtnl_unlock(); > > > > > > > > > +} > > > > > > > > > + > > > > > > > > > +static void disable_rx_mode_work(struct virtnet_info *vi) > > > > > > > > > +{ > > > > > > > > > + rtnl_lock(); > > > > > > > > > + vi->rx_mode_work_enabled = false; > > > > > > > > > + rtnl_unlock(); > > > > > > > > > +} > > > > > > > > > + > > > > > > > > > static void virtqueue_napi_schedule(struct napi_struct *napi, > > > > > > > > > struct virtqueue *vq) > > > > > > > > > { > > > > > > > > > @@ -2341,9 +2361,11 @@ static int virtnet_close(struct net_device *dev) > > > > > > > > > return 0; > > > > > > > > > } > > > > > > > > > > > > > > > > > > -static void virtnet_set_rx_mode(struct net_device *dev) > > > > > > > > > +static void virtnet_rx_mode_work(struct work_struct *work) > > > > > > > > > { > > > > > > > > > - struct virtnet_info *vi = netdev_priv(dev); > > > > > > > > > + struct virtnet_info *vi = > > > > > > > > > + container_of(work, struct virtnet_info, rx_mode_work); > > > > > > > > > + struct net_device *dev = vi->dev; > > > > > > > > > struct scatterlist sg[2]; > > > > > > > > > struct virtio_net_ctrl_mac *mac_data; > > > > > > > > > struct netdev_hw_addr *ha; > > > > > > > > > @@ -2356,6 +2378,8 @@ static void virtnet_set_rx_mode(struct net_device *dev) > > > > > > > > > if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) > > > > > > > > > return; > > > > > > > > > > > > > > > > > > + rtnl_lock(); > > > > > > > > > + > > > > > > > > > vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0); > > > > > > > > > vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0); > > > > > > > > > > > > > > > > > > @@ -2373,14 +2397,19 @@ static void virtnet_set_rx_mode(struct net_device *dev) > > > > > > > > > dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", > > > > > > > > > vi->ctrl->allmulti ? "en" : "dis"); > > > > > > > > > > > > > > > > > > + netif_addr_lock_bh(dev); > > > > > > > > > + > > > > > > > > > uc_count = netdev_uc_count(dev); > > > > > > > > > mc_count = netdev_mc_count(dev); > > > > > > > > > /* MAC filter - use one buffer for both lists */ > > > > > > > > > buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + > > > > > > > > > (2 * sizeof(mac_data->entries)), GFP_ATOMIC); > > > > > > > > > mac_data = buf; > > > > > > > > > - if (!buf) > > > > > > > > > + if (!buf) { > > > > > > > > > + netif_addr_unlock_bh(dev); > > > > > > > > > + rtnl_unlock(); > > > > > > > > > return; > > > > > > > > > + } > > > > > > > > > > > > > > > > > > sg_init_table(sg, 2); > > > > > > > > > > > > > > > > > > @@ -2401,6 +2430,8 @@ static void virtnet_set_rx_mode(struct net_device *dev) > > > > > > > > > netdev_for_each_mc_addr(ha, dev) > > > > > > > > > memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); > > > > > > > > > > > > > > > > > > + netif_addr_unlock_bh(dev); > > > > > > > > > + > > > > > > > > > sg_set_buf(&sg[1], mac_data, > > > > > > > > > sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); > > > > > > > > > > > > > > > > > > @@ -2408,9 +2439,19 @@ static void virtnet_set_rx_mode(struct net_device *dev) > > > > > > > > > VIRTIO_NET_CTRL_MAC_TABLE_SET, sg)) > > > > > > > > > dev_warn(&dev->dev, "Failed to set MAC filter table.\n"); > > > > > > > > > > > > > > > > > > + rtnl_unlock(); > > > > > > > > > + > > > > > > > > > kfree(buf); > > > > > > > > > } > > > > > > > > > > > > > > > > > > +static void virtnet_set_rx_mode(struct net_device *dev) > > > > > > > > > +{ > > > > > > > > > + struct virtnet_info *vi = netdev_priv(dev); > > > > > > > > > + > > > > > > > > > + if (vi->rx_mode_work_enabled) > > > > > > > > > + schedule_work(&vi->rx_mode_work); > > > > > > > > > +} > > > > > > > > > + > > > > > > > > > > > > > > > > > static int virtnet_vlan_rx_add_vid(struct net_device *dev, > > > > > > > > > __be16 proto, u16 vid) > > > > > > > > > { > > > > > > > > > @@ -3181,6 +3222,8 @@ static void virtnet_freeze_down(struct virtio_device *vdev) > > > > > > > > > > > > > > > > > > /* Make sure no work handler is accessing the device */ > > > > > > > > > flush_work(&vi->config_work); > > > > > > > > > + disable_rx_mode_work(vi); > > > > > > > > > + flush_work(&vi->rx_mode_work); > > > > > > > > > > > > > > > > > > netif_tx_lock_bh(vi->dev); > > > > > > > > > netif_device_detach(vi->dev); > > > > > > > > > > > > > > > > Hmm so queued rx mode work will just get skipped > > > > > > > > and on restore we get a wrong rx mode. > > > > > > > > Any way to make this more robust? > > > > > > > > > > > > > > It could be done by scheduling a work on restore. > > > > > > > > > > Rethink this, I think we don't need to care about this case since the > > > > > user processes should have been frozened. > > > > > > > > Yes but not the workqueue. Want to switch to system_freezable_wq? > > > > > > Yes, I will do it in v2. > > > > Actually, this doesn't work. Freezable workqueue can only guarantee > > when being freezed the new work will be queued and not scheduled until > > thaw. So the ktrhead that is executing the workqueue is not freezable. > > The busy loop (even with cond_resched()) will force suspend in this > > case. > > > > I wonder if we should switch to using a dedicated kthread for > > virtio-net then we can allow it to be frozen. > > > > Thanks > > > > So what's the plan then? I plan to send a new version that doesn't take special care to freeze. And address the freeze on top, probably with a dedicated kthread that can be frozen and respond to things like SIGKILL (which is somehow similar to what we want to solve for vhost). Thanks > > > > > > > Thanks > > > > > > > > > > > > And that the reason we don't > > > > > even need to hold RTNL here. > > > > > > > > > > Thanks > > > > > > > > > > > > > > > > > > > Thanks > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > @@ -3203,6 +3246,7 @@ static int virtnet_restore_up(struct virtio_device *vdev) > > > > > > > > > virtio_device_ready(vdev); > > > > > > > > > > > > > > > > > > enable_delayed_refill(vi); > > > > > > > > > + enable_rx_mode_work(vi); > > > > > > > > > > > > > > > > > > if (netif_running(vi->dev)) { > > > > > > > > > err = virtnet_open(vi->dev); > > > > > > > > > @@ -4002,6 +4046,7 @@ static int virtnet_probe(struct virtio_device *vdev) > > > > > > > > > vdev->priv = vi; > > > > > > > > > > > > > > > > > > INIT_WORK(&vi->config_work, virtnet_config_changed_work); > > > > > > > > > + INIT_WORK(&vi->rx_mode_work, virtnet_rx_mode_work); > > > > > > > > > spin_lock_init(&vi->refill_lock); > > > > > > > > > > > > > > > > > > if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) { > > > > > > > > > @@ -4110,6 +4155,8 @@ static int virtnet_probe(struct virtio_device *vdev) > > > > > > > > > if (vi->has_rss || vi->has_rss_hash_report) > > > > > > > > > virtnet_init_default_rss(vi); > > > > > > > > > > > > > > > > > > + enable_rx_mode_work(vi); > > > > > > > > > + > > > > > > > > > /* serialize netdev register + virtio_device_ready() with ndo_open() */ > > > > > > > > > rtnl_lock(); > > > > > > > > > > > > > > > > > > @@ -4207,6 +4254,8 @@ static void virtnet_remove(struct virtio_device *vdev) > > > > > > > > > > > > > > > > > > /* Make sure no work handler is accessing the device. */ > > > > > > > > > flush_work(&vi->config_work); > > > > > > > > > + disable_rx_mode_work(vi); > > > > > > > > > + flush_work(&vi->rx_mode_work); > > > > > > > > > > > > > > > > > > unregister_netdev(vi->dev); > > > > > > > > > > > > > > > > > > -- > > > > > > > > > 2.25.1 > > > > > > > > > > > > > > > > > > >
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 56ca1d270304..5d2f1da4eaa0 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -265,6 +265,12 @@ struct virtnet_info { /* Work struct for config space updates */ struct work_struct config_work; + /* Work struct for config rx mode */ + struct work_struct rx_mode_work; + + /* Is rx mode work enabled? */ + bool rx_mode_work_enabled; + /* Does the affinity hint is set for virtqueues? */ bool affinity_hint_set; @@ -388,6 +394,20 @@ static void disable_delayed_refill(struct virtnet_info *vi) spin_unlock_bh(&vi->refill_lock); } +static void enable_rx_mode_work(struct virtnet_info *vi) +{ + rtnl_lock(); + vi->rx_mode_work_enabled = true; + rtnl_unlock(); +} + +static void disable_rx_mode_work(struct virtnet_info *vi) +{ + rtnl_lock(); + vi->rx_mode_work_enabled = false; + rtnl_unlock(); +} + static void virtqueue_napi_schedule(struct napi_struct *napi, struct virtqueue *vq) { @@ -2341,9 +2361,11 @@ static int virtnet_close(struct net_device *dev) return 0; } -static void virtnet_set_rx_mode(struct net_device *dev) +static void virtnet_rx_mode_work(struct work_struct *work) { - struct virtnet_info *vi = netdev_priv(dev); + struct virtnet_info *vi = + container_of(work, struct virtnet_info, rx_mode_work); + struct net_device *dev = vi->dev; struct scatterlist sg[2]; struct virtio_net_ctrl_mac *mac_data; struct netdev_hw_addr *ha; @@ -2356,6 +2378,8 @@ static void virtnet_set_rx_mode(struct net_device *dev) if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) return; + rtnl_lock(); + vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0); vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0); @@ -2373,14 +2397,19 @@ static void virtnet_set_rx_mode(struct net_device *dev) dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", vi->ctrl->allmulti ? "en" : "dis"); + netif_addr_lock_bh(dev); + uc_count = netdev_uc_count(dev); mc_count = netdev_mc_count(dev); /* MAC filter - use one buffer for both lists */ buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + (2 * sizeof(mac_data->entries)), GFP_ATOMIC); mac_data = buf; - if (!buf) + if (!buf) { + netif_addr_unlock_bh(dev); + rtnl_unlock(); return; + } sg_init_table(sg, 2); @@ -2401,6 +2430,8 @@ static void virtnet_set_rx_mode(struct net_device *dev) netdev_for_each_mc_addr(ha, dev) memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); + netif_addr_unlock_bh(dev); + sg_set_buf(&sg[1], mac_data, sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); @@ -2408,9 +2439,19 @@ static void virtnet_set_rx_mode(struct net_device *dev) VIRTIO_NET_CTRL_MAC_TABLE_SET, sg)) dev_warn(&dev->dev, "Failed to set MAC filter table.\n"); + rtnl_unlock(); + kfree(buf); } +static void virtnet_set_rx_mode(struct net_device *dev) +{ + struct virtnet_info *vi = netdev_priv(dev); + + if (vi->rx_mode_work_enabled) + schedule_work(&vi->rx_mode_work); +} + static int virtnet_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) { @@ -3181,6 +3222,8 @@ static void virtnet_freeze_down(struct virtio_device *vdev) /* Make sure no work handler is accessing the device */ flush_work(&vi->config_work); + disable_rx_mode_work(vi); + flush_work(&vi->rx_mode_work); netif_tx_lock_bh(vi->dev); netif_device_detach(vi->dev); @@ -3203,6 +3246,7 @@ static int virtnet_restore_up(struct virtio_device *vdev) virtio_device_ready(vdev); enable_delayed_refill(vi); + enable_rx_mode_work(vi); if (netif_running(vi->dev)) { err = virtnet_open(vi->dev); @@ -4002,6 +4046,7 @@ static int virtnet_probe(struct virtio_device *vdev) vdev->priv = vi; INIT_WORK(&vi->config_work, virtnet_config_changed_work); + INIT_WORK(&vi->rx_mode_work, virtnet_rx_mode_work); spin_lock_init(&vi->refill_lock); if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) { @@ -4110,6 +4155,8 @@ static int virtnet_probe(struct virtio_device *vdev) if (vi->has_rss || vi->has_rss_hash_report) virtnet_init_default_rss(vi); + enable_rx_mode_work(vi); + /* serialize netdev register + virtio_device_ready() with ndo_open() */ rtnl_lock(); @@ -4207,6 +4254,8 @@ static void virtnet_remove(struct virtio_device *vdev) /* Make sure no work handler is accessing the device. */ flush_work(&vi->config_work); + disable_rx_mode_work(vi); + flush_work(&vi->rx_mode_work); unregister_netdev(vi->dev);
This patch convert rx mode setting to be done in a workqueue, this is a must for allow to sleep when waiting for the cvq command to response since current code is executed under addr spin lock. Signed-off-by: Jason Wang <jasowang@redhat.com> --- Changes since V1: - use RTNL to synchronize rx mode worker --- drivers/net/virtio_net.c | 55 +++++++++++++++++++++++++++++++++++++--- 1 file changed, 52 insertions(+), 3 deletions(-)