diff mbox series

[v11,39/40] virtio_net: support tx queue resize

Message ID 20220629065656.54420-40-xuanzhuo@linux.alibaba.com (mailing list archive)
State New, archived
Headers show
Series virtio pci support VIRTIO_F_RING_RESET | expand

Commit Message

Xuan Zhuo June 29, 2022, 6:56 a.m. UTC
This patch implements the resize function of the tx queues.
Based on this function, it is possible to modify the ring num of the
queue.

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
 drivers/net/virtio_net.c | 48 ++++++++++++++++++++++++++++++++++++++++
 1 file changed, 48 insertions(+)

Comments

Jason Wang July 4, 2022, 3:45 a.m. UTC | #1
在 2022/6/29 14:56, Xuan Zhuo 写道:
> This patch implements the resize function of the tx queues.
> Based on this function, it is possible to modify the ring num of the
> queue.
>
> Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> ---
>   drivers/net/virtio_net.c | 48 ++++++++++++++++++++++++++++++++++++++++
>   1 file changed, 48 insertions(+)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 6ab16fd193e5..fd358462f802 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -135,6 +135,9 @@ struct send_queue {
>   	struct virtnet_sq_stats stats;
>   
>   	struct napi_struct napi;
> +
> +	/* Record whether sq is in reset state. */
> +	bool reset;
>   };
>   
>   /* Internal representation of a receive virtqueue */
> @@ -279,6 +282,7 @@ struct padded_vnet_hdr {
>   };
>   
>   static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
> +static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
>   
>   static bool is_xdp_frame(void *ptr)
>   {
> @@ -1603,6 +1607,11 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
>   		return;
>   
>   	if (__netif_tx_trylock(txq)) {
> +		if (READ_ONCE(sq->reset)) {
> +			__netif_tx_unlock(txq);
> +			return;
> +		}
> +
>   		do {
>   			virtqueue_disable_cb(sq->vq);
>   			free_old_xmit_skbs(sq, true);
> @@ -1868,6 +1877,45 @@ static int virtnet_rx_resize(struct virtnet_info *vi,
>   	return err;
>   }
>   
> +static int virtnet_tx_resize(struct virtnet_info *vi,
> +			     struct send_queue *sq, u32 ring_num)
> +{
> +	struct netdev_queue *txq;
> +	int err, qindex;
> +
> +	qindex = sq - vi->sq;
> +
> +	virtnet_napi_tx_disable(&sq->napi);
> +
> +	txq = netdev_get_tx_queue(vi->dev, qindex);
> +
> +	/* 1. wait all ximt complete
> +	 * 2. fix the race of netif_stop_subqueue() vs netif_start_subqueue()
> +	 */
> +	__netif_tx_lock_bh(txq);
> +
> +	/* Prevent rx poll from accessing sq. */
> +	WRITE_ONCE(sq->reset, true);


Can we simply disable RX NAPI here?

Thanks


> +
> +	/* Prevent the upper layer from trying to send packets. */
> +	netif_stop_subqueue(vi->dev, qindex);
> +
> +	__netif_tx_unlock_bh(txq);
> +
> +	err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf);
> +	if (err)
> +		netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err);
> +
> +	/* Memory barrier before set reset and start subqueue. */
> +	smp_mb();
> +
> +	WRITE_ONCE(sq->reset, false);
> +	netif_tx_wake_queue(txq);
> +
> +	virtnet_napi_tx_enable(vi, sq->vq, &sq->napi);
> +	return err;
> +}
> +
>   /*
>    * Send command via the control virtqueue and check status.  Commands
>    * supported by the hypervisor, as indicated by feature bits, should
Xuan Zhuo July 5, 2022, 2 a.m. UTC | #2
On Mon, 4 Jul 2022 11:45:52 +0800, Jason Wang <jasowang@redhat.com> wrote:
>
> 在 2022/6/29 14:56, Xuan Zhuo 写道:
> > This patch implements the resize function of the tx queues.
> > Based on this function, it is possible to modify the ring num of the
> > queue.
> >
> > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > ---
> >   drivers/net/virtio_net.c | 48 ++++++++++++++++++++++++++++++++++++++++
> >   1 file changed, 48 insertions(+)
> >
> > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > index 6ab16fd193e5..fd358462f802 100644
> > --- a/drivers/net/virtio_net.c
> > +++ b/drivers/net/virtio_net.c
> > @@ -135,6 +135,9 @@ struct send_queue {
> >   	struct virtnet_sq_stats stats;
> >
> >   	struct napi_struct napi;
> > +
> > +	/* Record whether sq is in reset state. */
> > +	bool reset;
> >   };
> >
> >   /* Internal representation of a receive virtqueue */
> > @@ -279,6 +282,7 @@ struct padded_vnet_hdr {
> >   };
> >
> >   static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
> > +static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
> >
> >   static bool is_xdp_frame(void *ptr)
> >   {
> > @@ -1603,6 +1607,11 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
> >   		return;
> >
> >   	if (__netif_tx_trylock(txq)) {
> > +		if (READ_ONCE(sq->reset)) {
> > +			__netif_tx_unlock(txq);
> > +			return;
> > +		}
> > +
> >   		do {
> >   			virtqueue_disable_cb(sq->vq);
> >   			free_old_xmit_skbs(sq, true);
> > @@ -1868,6 +1877,45 @@ static int virtnet_rx_resize(struct virtnet_info *vi,
> >   	return err;
> >   }
> >
> > +static int virtnet_tx_resize(struct virtnet_info *vi,
> > +			     struct send_queue *sq, u32 ring_num)
> > +{
> > +	struct netdev_queue *txq;
> > +	int err, qindex;
> > +
> > +	qindex = sq - vi->sq;
> > +
> > +	virtnet_napi_tx_disable(&sq->napi);
> > +
> > +	txq = netdev_get_tx_queue(vi->dev, qindex);
> > +
> > +	/* 1. wait all ximt complete
> > +	 * 2. fix the race of netif_stop_subqueue() vs netif_start_subqueue()
> > +	 */
> > +	__netif_tx_lock_bh(txq);
> > +
> > +	/* Prevent rx poll from accessing sq. */
> > +	WRITE_ONCE(sq->reset, true);
>
>
> Can we simply disable RX NAPI here?

Disable rx napi is indeed a simple solution. But I hope that when dealing with
tx, it will not affect rx.

Thanks.


>
> Thanks
>
>
> > +
> > +	/* Prevent the upper layer from trying to send packets. */
> > +	netif_stop_subqueue(vi->dev, qindex);
> > +
> > +	__netif_tx_unlock_bh(txq);
> > +
> > +	err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf);
> > +	if (err)
> > +		netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err);
> > +
> > +	/* Memory barrier before set reset and start subqueue. */
> > +	smp_mb();
> > +
> > +	WRITE_ONCE(sq->reset, false);
> > +	netif_tx_wake_queue(txq);
> > +
> > +	virtnet_napi_tx_enable(vi, sq->vq, &sq->napi);
> > +	return err;
> > +}
> > +
> >   /*
> >    * Send command via the control virtqueue and check status.  Commands
> >    * supported by the hypervisor, as indicated by feature bits, should
>
Jason Wang July 8, 2022, 6:23 a.m. UTC | #3
On Tue, Jul 5, 2022 at 10:01 AM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
>
> On Mon, 4 Jul 2022 11:45:52 +0800, Jason Wang <jasowang@redhat.com> wrote:
> >
> > 在 2022/6/29 14:56, Xuan Zhuo 写道:
> > > This patch implements the resize function of the tx queues.
> > > Based on this function, it is possible to modify the ring num of the
> > > queue.
> > >
> > > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > > ---
> > >   drivers/net/virtio_net.c | 48 ++++++++++++++++++++++++++++++++++++++++
> > >   1 file changed, 48 insertions(+)
> > >
> > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > index 6ab16fd193e5..fd358462f802 100644
> > > --- a/drivers/net/virtio_net.c
> > > +++ b/drivers/net/virtio_net.c
> > > @@ -135,6 +135,9 @@ struct send_queue {
> > >     struct virtnet_sq_stats stats;
> > >
> > >     struct napi_struct napi;
> > > +
> > > +   /* Record whether sq is in reset state. */
> > > +   bool reset;
> > >   };
> > >
> > >   /* Internal representation of a receive virtqueue */
> > > @@ -279,6 +282,7 @@ struct padded_vnet_hdr {
> > >   };
> > >
> > >   static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
> > > +static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
> > >
> > >   static bool is_xdp_frame(void *ptr)
> > >   {
> > > @@ -1603,6 +1607,11 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
> > >             return;
> > >
> > >     if (__netif_tx_trylock(txq)) {
> > > +           if (READ_ONCE(sq->reset)) {
> > > +                   __netif_tx_unlock(txq);
> > > +                   return;
> > > +           }
> > > +
> > >             do {
> > >                     virtqueue_disable_cb(sq->vq);
> > >                     free_old_xmit_skbs(sq, true);
> > > @@ -1868,6 +1877,45 @@ static int virtnet_rx_resize(struct virtnet_info *vi,
> > >     return err;
> > >   }
> > >
> > > +static int virtnet_tx_resize(struct virtnet_info *vi,
> > > +                        struct send_queue *sq, u32 ring_num)
> > > +{
> > > +   struct netdev_queue *txq;
> > > +   int err, qindex;
> > > +
> > > +   qindex = sq - vi->sq;
> > > +
> > > +   virtnet_napi_tx_disable(&sq->napi);
> > > +
> > > +   txq = netdev_get_tx_queue(vi->dev, qindex);
> > > +
> > > +   /* 1. wait all ximt complete
> > > +    * 2. fix the race of netif_stop_subqueue() vs netif_start_subqueue()
> > > +    */
> > > +   __netif_tx_lock_bh(txq);
> > > +
> > > +   /* Prevent rx poll from accessing sq. */
> > > +   WRITE_ONCE(sq->reset, true);
> >
> >
> > Can we simply disable RX NAPI here?
>
> Disable rx napi is indeed a simple solution. But I hope that when dealing with
> tx, it will not affect rx.

Ok, but I think we've already synchronized with tx lock here, isn't it?

Thanks

>
> Thanks.
>
>
> >
> > Thanks
> >
> >
> > > +
> > > +   /* Prevent the upper layer from trying to send packets. */
> > > +   netif_stop_subqueue(vi->dev, qindex);
> > > +
> > > +   __netif_tx_unlock_bh(txq);
> > > +
> > > +   err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf);
> > > +   if (err)
> > > +           netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err);
> > > +
> > > +   /* Memory barrier before set reset and start subqueue. */
> > > +   smp_mb();
> > > +
> > > +   WRITE_ONCE(sq->reset, false);
> > > +   netif_tx_wake_queue(txq);
> > > +
> > > +   virtnet_napi_tx_enable(vi, sq->vq, &sq->napi);
> > > +   return err;
> > > +}
> > > +
> > >   /*
> > >    * Send command via the control virtqueue and check status.  Commands
> > >    * supported by the hypervisor, as indicated by feature bits, should
> >
>
Xuan Zhuo July 15, 2022, 8:28 a.m. UTC | #4
On Fri, 8 Jul 2022 14:23:57 +0800, Jason Wang <jasowang@redhat.com> wrote:
> On Tue, Jul 5, 2022 at 10:01 AM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> >
> > On Mon, 4 Jul 2022 11:45:52 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > >
> > > 在 2022/6/29 14:56, Xuan Zhuo 写道:
> > > > This patch implements the resize function of the tx queues.
> > > > Based on this function, it is possible to modify the ring num of the
> > > > queue.
> > > >
> > > > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > > > ---
> > > >   drivers/net/virtio_net.c | 48 ++++++++++++++++++++++++++++++++++++++++
> > > >   1 file changed, 48 insertions(+)
> > > >
> > > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > > index 6ab16fd193e5..fd358462f802 100644
> > > > --- a/drivers/net/virtio_net.c
> > > > +++ b/drivers/net/virtio_net.c
> > > > @@ -135,6 +135,9 @@ struct send_queue {
> > > >     struct virtnet_sq_stats stats;
> > > >
> > > >     struct napi_struct napi;
> > > > +
> > > > +   /* Record whether sq is in reset state. */
> > > > +   bool reset;
> > > >   };
> > > >
> > > >   /* Internal representation of a receive virtqueue */
> > > > @@ -279,6 +282,7 @@ struct padded_vnet_hdr {
> > > >   };
> > > >
> > > >   static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
> > > > +static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
> > > >
> > > >   static bool is_xdp_frame(void *ptr)
> > > >   {
> > > > @@ -1603,6 +1607,11 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
> > > >             return;
> > > >
> > > >     if (__netif_tx_trylock(txq)) {
> > > > +           if (READ_ONCE(sq->reset)) {
> > > > +                   __netif_tx_unlock(txq);
> > > > +                   return;
> > > > +           }
> > > > +
> > > >             do {
> > > >                     virtqueue_disable_cb(sq->vq);
> > > >                     free_old_xmit_skbs(sq, true);
> > > > @@ -1868,6 +1877,45 @@ static int virtnet_rx_resize(struct virtnet_info *vi,
> > > >     return err;
> > > >   }
> > > >
> > > > +static int virtnet_tx_resize(struct virtnet_info *vi,
> > > > +                        struct send_queue *sq, u32 ring_num)
> > > > +{
> > > > +   struct netdev_queue *txq;
> > > > +   int err, qindex;
> > > > +
> > > > +   qindex = sq - vi->sq;
> > > > +
> > > > +   virtnet_napi_tx_disable(&sq->napi);
> > > > +
> > > > +   txq = netdev_get_tx_queue(vi->dev, qindex);
> > > > +
> > > > +   /* 1. wait all ximt complete
> > > > +    * 2. fix the race of netif_stop_subqueue() vs netif_start_subqueue()
> > > > +    */
> > > > +   __netif_tx_lock_bh(txq);
> > > > +
> > > > +   /* Prevent rx poll from accessing sq. */
> > > > +   WRITE_ONCE(sq->reset, true);
> > >
> > >
> > > Can we simply disable RX NAPI here?
> >
> > Disable rx napi is indeed a simple solution. But I hope that when dealing with
> > tx, it will not affect rx.
>
> Ok, but I think we've already synchronized with tx lock here, isn't it?

Yes, do you have any questions about WRITE_ONCE()? There is a set false operation
later, I did not use lock there, so I used WRITE/READ_ONCE
uniformly.

Thanks.

>
> Thanks
>
> >
> > Thanks.
> >
> >
> > >
> > > Thanks
> > >
> > >
> > > > +
> > > > +   /* Prevent the upper layer from trying to send packets. */
> > > > +   netif_stop_subqueue(vi->dev, qindex);
> > > > +
> > > > +   __netif_tx_unlock_bh(txq);
> > > > +
> > > > +   err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf);
> > > > +   if (err)
> > > > +           netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err);
> > > > +
> > > > +   /* Memory barrier before set reset and start subqueue. */
> > > > +   smp_mb();
> > > > +
> > > > +   WRITE_ONCE(sq->reset, false);
> > > > +   netif_tx_wake_queue(txq);
> > > > +
> > > > +   virtnet_napi_tx_enable(vi, sq->vq, &sq->napi);
> > > > +   return err;
> > > > +}
> > > > +
> > > >   /*
> > > >    * Send command via the control virtqueue and check status.  Commands
> > > >    * supported by the hypervisor, as indicated by feature bits, should
> > >
> >
>
Jason Wang July 18, 2022, 8:57 a.m. UTC | #5
On Fri, Jul 15, 2022 at 4:32 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
>
> On Fri, 8 Jul 2022 14:23:57 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > On Tue, Jul 5, 2022 at 10:01 AM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > >
> > > On Mon, 4 Jul 2022 11:45:52 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > > >
> > > > 在 2022/6/29 14:56, Xuan Zhuo 写道:
> > > > > This patch implements the resize function of the tx queues.
> > > > > Based on this function, it is possible to modify the ring num of the
> > > > > queue.
> > > > >
> > > > > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > > > > ---
> > > > >   drivers/net/virtio_net.c | 48 ++++++++++++++++++++++++++++++++++++++++
> > > > >   1 file changed, 48 insertions(+)
> > > > >
> > > > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > > > index 6ab16fd193e5..fd358462f802 100644
> > > > > --- a/drivers/net/virtio_net.c
> > > > > +++ b/drivers/net/virtio_net.c
> > > > > @@ -135,6 +135,9 @@ struct send_queue {
> > > > >     struct virtnet_sq_stats stats;
> > > > >
> > > > >     struct napi_struct napi;
> > > > > +
> > > > > +   /* Record whether sq is in reset state. */
> > > > > +   bool reset;
> > > > >   };
> > > > >
> > > > >   /* Internal representation of a receive virtqueue */
> > > > > @@ -279,6 +282,7 @@ struct padded_vnet_hdr {
> > > > >   };
> > > > >
> > > > >   static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
> > > > > +static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
> > > > >
> > > > >   static bool is_xdp_frame(void *ptr)
> > > > >   {
> > > > > @@ -1603,6 +1607,11 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
> > > > >             return;
> > > > >
> > > > >     if (__netif_tx_trylock(txq)) {
> > > > > +           if (READ_ONCE(sq->reset)) {
> > > > > +                   __netif_tx_unlock(txq);
> > > > > +                   return;
> > > > > +           }
> > > > > +
> > > > >             do {
> > > > >                     virtqueue_disable_cb(sq->vq);
> > > > >                     free_old_xmit_skbs(sq, true);
> > > > > @@ -1868,6 +1877,45 @@ static int virtnet_rx_resize(struct virtnet_info *vi,
> > > > >     return err;
> > > > >   }
> > > > >
> > > > > +static int virtnet_tx_resize(struct virtnet_info *vi,
> > > > > +                        struct send_queue *sq, u32 ring_num)
> > > > > +{
> > > > > +   struct netdev_queue *txq;
> > > > > +   int err, qindex;
> > > > > +
> > > > > +   qindex = sq - vi->sq;
> > > > > +
> > > > > +   virtnet_napi_tx_disable(&sq->napi);
> > > > > +
> > > > > +   txq = netdev_get_tx_queue(vi->dev, qindex);
> > > > > +
> > > > > +   /* 1. wait all ximt complete
> > > > > +    * 2. fix the race of netif_stop_subqueue() vs netif_start_subqueue()
> > > > > +    */
> > > > > +   __netif_tx_lock_bh(txq);
> > > > > +
> > > > > +   /* Prevent rx poll from accessing sq. */
> > > > > +   WRITE_ONCE(sq->reset, true);
> > > >
> > > >
> > > > Can we simply disable RX NAPI here?
> > >
> > > Disable rx napi is indeed a simple solution. But I hope that when dealing with
> > > tx, it will not affect rx.
> >
> > Ok, but I think we've already synchronized with tx lock here, isn't it?
>
> Yes, do you have any questions about WRITE_ONCE()? There is a set false operation
> later, I did not use lock there, so I used WRITE/READ_ONCE
> uniformly.

I mean, since we've already used tx locks somewhere, we'd better use
them here as well at least as a start.

Thanks

>
> Thanks.
>
> >
> > Thanks
> >
> > >
> > > Thanks.
> > >
> > >
> > > >
> > > > Thanks
> > > >
> > > >
> > > > > +
> > > > > +   /* Prevent the upper layer from trying to send packets. */
> > > > > +   netif_stop_subqueue(vi->dev, qindex);
> > > > > +
> > > > > +   __netif_tx_unlock_bh(txq);
> > > > > +
> > > > > +   err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf);
> > > > > +   if (err)
> > > > > +           netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err);
> > > > > +
> > > > > +   /* Memory barrier before set reset and start subqueue. */
> > > > > +   smp_mb();
> > > > > +
> > > > > +   WRITE_ONCE(sq->reset, false);
> > > > > +   netif_tx_wake_queue(txq);
> > > > > +
> > > > > +   virtnet_napi_tx_enable(vi, sq->vq, &sq->napi);
> > > > > +   return err;
> > > > > +}
> > > > > +
> > > > >   /*
> > > > >    * Send command via the control virtqueue and check status.  Commands
> > > > >    * supported by the hypervisor, as indicated by feature bits, should
> > > >
> > >
> >
>
Xuan Zhuo July 18, 2022, 9:11 a.m. UTC | #6
On Mon, 18 Jul 2022 16:57:53 +0800, Jason Wang <jasowang@redhat.com> wrote:
> On Fri, Jul 15, 2022 at 4:32 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> >
> > On Fri, 8 Jul 2022 14:23:57 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > > On Tue, Jul 5, 2022 at 10:01 AM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > > >
> > > > On Mon, 4 Jul 2022 11:45:52 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > > > >
> > > > > 在 2022/6/29 14:56, Xuan Zhuo 写道:
> > > > > > This patch implements the resize function of the tx queues.
> > > > > > Based on this function, it is possible to modify the ring num of the
> > > > > > queue.
> > > > > >
> > > > > > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > > > > > ---
> > > > > >   drivers/net/virtio_net.c | 48 ++++++++++++++++++++++++++++++++++++++++
> > > > > >   1 file changed, 48 insertions(+)
> > > > > >
> > > > > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > > > > index 6ab16fd193e5..fd358462f802 100644
> > > > > > --- a/drivers/net/virtio_net.c
> > > > > > +++ b/drivers/net/virtio_net.c
> > > > > > @@ -135,6 +135,9 @@ struct send_queue {
> > > > > >     struct virtnet_sq_stats stats;
> > > > > >
> > > > > >     struct napi_struct napi;
> > > > > > +
> > > > > > +   /* Record whether sq is in reset state. */
> > > > > > +   bool reset;
> > > > > >   };
> > > > > >
> > > > > >   /* Internal representation of a receive virtqueue */
> > > > > > @@ -279,6 +282,7 @@ struct padded_vnet_hdr {
> > > > > >   };
> > > > > >
> > > > > >   static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
> > > > > > +static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
> > > > > >
> > > > > >   static bool is_xdp_frame(void *ptr)
> > > > > >   {
> > > > > > @@ -1603,6 +1607,11 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
> > > > > >             return;
> > > > > >
> > > > > >     if (__netif_tx_trylock(txq)) {
> > > > > > +           if (READ_ONCE(sq->reset)) {
> > > > > > +                   __netif_tx_unlock(txq);
> > > > > > +                   return;
> > > > > > +           }
> > > > > > +
> > > > > >             do {
> > > > > >                     virtqueue_disable_cb(sq->vq);
> > > > > >                     free_old_xmit_skbs(sq, true);
> > > > > > @@ -1868,6 +1877,45 @@ static int virtnet_rx_resize(struct virtnet_info *vi,
> > > > > >     return err;
> > > > > >   }
> > > > > >
> > > > > > +static int virtnet_tx_resize(struct virtnet_info *vi,
> > > > > > +                        struct send_queue *sq, u32 ring_num)
> > > > > > +{
> > > > > > +   struct netdev_queue *txq;
> > > > > > +   int err, qindex;
> > > > > > +
> > > > > > +   qindex = sq - vi->sq;
> > > > > > +
> > > > > > +   virtnet_napi_tx_disable(&sq->napi);
> > > > > > +
> > > > > > +   txq = netdev_get_tx_queue(vi->dev, qindex);
> > > > > > +
> > > > > > +   /* 1. wait all ximt complete
> > > > > > +    * 2. fix the race of netif_stop_subqueue() vs netif_start_subqueue()
> > > > > > +    */
> > > > > > +   __netif_tx_lock_bh(txq);
> > > > > > +
> > > > > > +   /* Prevent rx poll from accessing sq. */
> > > > > > +   WRITE_ONCE(sq->reset, true);
> > > > >
> > > > >
> > > > > Can we simply disable RX NAPI here?
> > > >
> > > > Disable rx napi is indeed a simple solution. But I hope that when dealing with
> > > > tx, it will not affect rx.
> > >
> > > Ok, but I think we've already synchronized with tx lock here, isn't it?
> >
> > Yes, do you have any questions about WRITE_ONCE()? There is a set false operation
> > later, I did not use lock there, so I used WRITE/READ_ONCE
> > uniformly.
>
> I mean, since we've already used tx locks somewhere, we'd better use
> them here as well at least as a start.


OK. next version will fix.

Thanks.

+static int virtnet_tx_resize(struct virtnet_info *vi,
+			     struct send_queue *sq, u32 ring_num)
+{
+	struct netdev_queue *txq;
+	int err, qindex;
+
+	qindex = sq - vi->sq;
+
+	virtnet_napi_tx_disable(&sq->napi);
+
+	txq = netdev_get_tx_queue(vi->dev, qindex);
+
+	/* 1. wait all ximt complete
+	 * 2. fix the race of netif_stop_subqueue() vs netif_start_subqueue()
+	 */
+	__netif_tx_lock_bh(txq);
+
+	sq->reset = true;
+
+	/* Prevent the upper layer from trying to send packets. */
+	netif_stop_subqueue(vi->dev, qindex);
+
+	__netif_tx_unlock_bh(txq);
+
+	err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf);
+	if (err)
+		netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err);
+
+	__netif_tx_lock_bh(txq);
+	sq->reset = false;
+	netif_tx_wake_queue(txq);
+	__netif_tx_unlock_bh(txq);
+
+	virtnet_napi_tx_enable(vi, sq->vq, &sq->napi);
+	return err;
+}


>
> Thanks
>
> >
> > Thanks.
> >
> > >
> > > Thanks
> > >
> > > >
> > > > Thanks.
> > > >
> > > >
> > > > >
> > > > > Thanks
> > > > >
> > > > >
> > > > > > +
> > > > > > +   /* Prevent the upper layer from trying to send packets. */
> > > > > > +   netif_stop_subqueue(vi->dev, qindex);
> > > > > > +
> > > > > > +   __netif_tx_unlock_bh(txq);
> > > > > > +
> > > > > > +   err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf);
> > > > > > +   if (err)
> > > > > > +           netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err);
> > > > > > +
> > > > > > +   /* Memory barrier before set reset and start subqueue. */
> > > > > > +   smp_mb();
> > > > > > +
> > > > > > +   WRITE_ONCE(sq->reset, false);
> > > > > > +   netif_tx_wake_queue(txq);
> > > > > > +
> > > > > > +   virtnet_napi_tx_enable(vi, sq->vq, &sq->napi);
> > > > > > +   return err;
> > > > > > +}
> > > > > > +
> > > > > >   /*
> > > > > >    * Send command via the control virtqueue and check status.  Commands
> > > > > >    * supported by the hypervisor, as indicated by feature bits, should
> > > > >
> > > >
> > >
> >
>
diff mbox series

Patch

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 6ab16fd193e5..fd358462f802 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -135,6 +135,9 @@  struct send_queue {
 	struct virtnet_sq_stats stats;
 
 	struct napi_struct napi;
+
+	/* Record whether sq is in reset state. */
+	bool reset;
 };
 
 /* Internal representation of a receive virtqueue */
@@ -279,6 +282,7 @@  struct padded_vnet_hdr {
 };
 
 static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
+static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
 
 static bool is_xdp_frame(void *ptr)
 {
@@ -1603,6 +1607,11 @@  static void virtnet_poll_cleantx(struct receive_queue *rq)
 		return;
 
 	if (__netif_tx_trylock(txq)) {
+		if (READ_ONCE(sq->reset)) {
+			__netif_tx_unlock(txq);
+			return;
+		}
+
 		do {
 			virtqueue_disable_cb(sq->vq);
 			free_old_xmit_skbs(sq, true);
@@ -1868,6 +1877,45 @@  static int virtnet_rx_resize(struct virtnet_info *vi,
 	return err;
 }
 
+static int virtnet_tx_resize(struct virtnet_info *vi,
+			     struct send_queue *sq, u32 ring_num)
+{
+	struct netdev_queue *txq;
+	int err, qindex;
+
+	qindex = sq - vi->sq;
+
+	virtnet_napi_tx_disable(&sq->napi);
+
+	txq = netdev_get_tx_queue(vi->dev, qindex);
+
+	/* 1. wait all ximt complete
+	 * 2. fix the race of netif_stop_subqueue() vs netif_start_subqueue()
+	 */
+	__netif_tx_lock_bh(txq);
+
+	/* Prevent rx poll from accessing sq. */
+	WRITE_ONCE(sq->reset, true);
+
+	/* Prevent the upper layer from trying to send packets. */
+	netif_stop_subqueue(vi->dev, qindex);
+
+	__netif_tx_unlock_bh(txq);
+
+	err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf);
+	if (err)
+		netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err);
+
+	/* Memory barrier before set reset and start subqueue. */
+	smp_mb();
+
+	WRITE_ONCE(sq->reset, false);
+	netif_tx_wake_queue(txq);
+
+	virtnet_napi_tx_enable(vi, sq->vq, &sq->napi);
+	return err;
+}
+
 /*
  * Send command via the control virtqueue and check status.  Commands
  * supported by the hypervisor, as indicated by feature bits, should