diff mbox series

[net-next,v5,08/15] virtio_net: sq support premapped mode

Message ID 20240614063933.108811-9-xuanzhuo@linux.alibaba.com (mailing list archive)
State Superseded
Delegated to: Netdev Maintainers
Headers show
Series virtio-net: support AF_XDP zero copy | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit fail Errors and warnings before: 845 this patch: 847
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers success CCed 14 of 14 maintainers
netdev/build_clang success Errors and warnings before: 849 this patch: 849
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn fail Errors and warnings before: 849 this patch: 851
netdev/checkpatch warning WARNING: Do not crash the kernel unless it is absolutely unavoidable--use WARN_ON_ONCE() plus recovery code (if feasible) instead of BUG() or variants WARNING: line length of 81 exceeds 80 columns WARNING: line length of 83 exceeds 80 columns WARNING: line length of 89 exceeds 80 columns WARNING: line length of 91 exceeds 80 columns
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Xuan Zhuo June 14, 2024, 6:39 a.m. UTC
If the xsk is enabling, the xsk tx will share the send queue.
But the xsk requires that the send queue use the premapped mode.
So the send queue must support premapped mode when it is bound to
af-xdp.

* virtnet_sq_set_premapped(sq, true) is used to enable premapped mode.

    In this mode, the driver will record the dma info when skb or xdp
    frame is sent.

    Currently, the SQ premapped mode is operational only with af-xdp. In
    this mode, af-xdp, the kernel stack, and xdp tx/redirect will share
    the same SQ. Af-xdp independently manages its DMA. The kernel stack
    and xdp tx/redirect utilize this DMA metadata to manage the DMA
    info.

    If the indirect descriptor feature be supported, the volume of DMA
    details we need to maintain becomes quite substantial. Here, we have
    a cap on the amount of DMA info we manage.

    If the kernel stack and xdp tx/redirect attempt to use more
    descriptors, virtnet_add_outbuf() will return an -ENOMEM error. But
    the af-xdp can work continually.

* virtnet_sq_set_premapped(sq, false) is used to disable premapped mode.

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
 drivers/net/virtio_net.c | 228 ++++++++++++++++++++++++++++++++++++++-
 1 file changed, 224 insertions(+), 4 deletions(-)

Comments

Jason Wang June 17, 2024, 5 a.m. UTC | #1
On Fri, Jun 14, 2024 at 2:39 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
>
> If the xsk is enabling, the xsk tx will share the send queue.
> But the xsk requires that the send queue use the premapped mode.
> So the send queue must support premapped mode when it is bound to
> af-xdp.
>
> * virtnet_sq_set_premapped(sq, true) is used to enable premapped mode.
>
>     In this mode, the driver will record the dma info when skb or xdp
>     frame is sent.
>
>     Currently, the SQ premapped mode is operational only with af-xdp. In
>     this mode, af-xdp, the kernel stack, and xdp tx/redirect will share
>     the same SQ. Af-xdp independently manages its DMA. The kernel stack
>     and xdp tx/redirect utilize this DMA metadata to manage the DMA
>     info.
>
>     If the indirect descriptor feature be supported, the volume of DMA
>     details we need to maintain becomes quite substantial. Here, we have
>     a cap on the amount of DMA info we manage.
>
>     If the kernel stack and xdp tx/redirect attempt to use more
>     descriptors, virtnet_add_outbuf() will return an -ENOMEM error. But
>     the af-xdp can work continually.

Rethink of this whole logic, it looks like all the complication came
as we decided to go with a pre queue pre mapping flag. I wonder if
things could be simplified if we do that per buffer?

Then we don't need complex logic like dmainfo and cap.

>
> * virtnet_sq_set_premapped(sq, false) is used to disable premapped mode.
>
> Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> ---
>  drivers/net/virtio_net.c | 228 ++++++++++++++++++++++++++++++++++++++-
>  1 file changed, 224 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index e84a4624549b..88ab9ea1646f 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -25,6 +25,7 @@
>  #include <net/net_failover.h>
>  #include <net/netdev_rx_queue.h>
>  #include <net/netdev_queues.h>
> +#include <uapi/linux/virtio_ring.h>

Why do we need this?

>
>  static int napi_weight = NAPI_POLL_WEIGHT;
>  module_param(napi_weight, int, 0444);
> @@ -276,6 +277,26 @@ struct virtnet_rq_dma {
>         u16 need_sync;
>  };
>
> +struct virtnet_sq_dma {
> +       union {
> +               struct llist_node node;
> +               struct llist_head head;

If we want to cap the #dmas, could we simply use an array instead of
the list here?

> +               void *data;
> +       };
> +       dma_addr_t addr;
> +       u32 len;
> +       u8 num;
> +};
> +
> +struct virtnet_sq_dma_info {
> +       /* record for kfree */
> +       void *p;
> +
> +       u32 free_num;
> +
> +       struct llist_head free;
> +};
> +
>  /* Internal representation of a send virtqueue */
>  struct send_queue {
>         /* Virtqueue associated with this send _queue */
> @@ -295,6 +316,11 @@ struct send_queue {
>
>         /* Record whether sq is in reset state. */
>         bool reset;
> +
> +       /* SQ is premapped mode or not. */
> +       bool premapped;
> +
> +       struct virtnet_sq_dma_info dmainfo;
>  };
>
>  /* Internal representation of a receive virtqueue */
> @@ -492,9 +518,11 @@ static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
>  enum virtnet_xmit_type {
>         VIRTNET_XMIT_TYPE_SKB,
>         VIRTNET_XMIT_TYPE_XDP,
> +       VIRTNET_XMIT_TYPE_DMA,

I think the name is confusing, how about TYPE_PREMAPPED?

>  };
>
> -#define VIRTNET_XMIT_TYPE_MASK (VIRTNET_XMIT_TYPE_SKB | VIRTNET_XMIT_TYPE_XDP)
> +#define VIRTNET_XMIT_TYPE_MASK (VIRTNET_XMIT_TYPE_SKB | VIRTNET_XMIT_TYPE_XDP \
> +                               | VIRTNET_XMIT_TYPE_DMA)
>
>  static enum virtnet_xmit_type virtnet_xmit_ptr_strip(void **ptr)
>  {
> @@ -510,12 +538,180 @@ static void *virtnet_xmit_ptr_mix(void *ptr, enum virtnet_xmit_type type)
>         return (void *)((unsigned long)ptr | type);
>  }
>
> +static void virtnet_sq_unmap(struct send_queue *sq, void **data)
> +{
> +       struct virtnet_sq_dma *head, *tail, *p;
> +       int i;
> +
> +       head = *data;
> +
> +       p = head;
> +
> +       for (i = 0; i < head->num; ++i) {
> +               virtqueue_dma_unmap_page_attrs(sq->vq, p->addr, p->len,
> +                                              DMA_TO_DEVICE, 0);
> +               tail = p;
> +               p = llist_entry(llist_next(&p->node), struct virtnet_sq_dma, node);
> +       }
> +
> +       *data = tail->data;
> +
> +       __llist_add_batch(&head->node, &tail->node,  &sq->dmainfo.free);
> +
> +       sq->dmainfo.free_num += head->num;
> +}
> +
> +static void *virtnet_dma_chain_update(struct send_queue *sq,
> +                                     struct virtnet_sq_dma *head,
> +                                     struct virtnet_sq_dma *tail,
> +                                     u8 num, void *data)
> +{
> +       sq->dmainfo.free_num -= num;
> +       head->num = num;
> +
> +       tail->data = data;
> +
> +       return virtnet_xmit_ptr_mix(head, VIRTNET_XMIT_TYPE_DMA);
> +}
> +
> +static struct virtnet_sq_dma *virtnet_sq_map_sg(struct send_queue *sq, int num, void *data)
> +{
> +       struct virtnet_sq_dma *head = NULL, *p = NULL;
> +       struct scatterlist *sg;
> +       dma_addr_t addr;
> +       int i, err;
> +
> +       if (num > sq->dmainfo.free_num)
> +               return NULL;
> +
> +       for (i = 0; i < num; ++i) {
> +               sg = &sq->sg[i];
> +
> +               addr = virtqueue_dma_map_page_attrs(sq->vq, sg_page(sg),
> +                                                   sg->offset,
> +                                                   sg->length, DMA_TO_DEVICE,
> +                                                   0);
> +               err = virtqueue_dma_mapping_error(sq->vq, addr);
> +               if (err)
> +                       goto err;
> +
> +               sg->dma_address = addr;
> +
> +               p = llist_entry(llist_del_first(&sq->dmainfo.free),
> +                               struct virtnet_sq_dma, node);
> +
> +               p->addr = sg->dma_address;
> +               p->len = sg->length;

I may miss something, but I don't see how we cap the total number of dmainfos.

Thanks
Jason Wang June 17, 2024, 6:28 a.m. UTC | #2
On Mon, Jun 17, 2024 at 1:00 PM Jason Wang <jasowang@redhat.com> wrote:
>
> On Fri, Jun 14, 2024 at 2:39 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> >
> > If the xsk is enabling, the xsk tx will share the send queue.
> > But the xsk requires that the send queue use the premapped mode.
> > So the send queue must support premapped mode when it is bound to
> > af-xdp.
> >
> > * virtnet_sq_set_premapped(sq, true) is used to enable premapped mode.
> >
> >     In this mode, the driver will record the dma info when skb or xdp
> >     frame is sent.
> >
> >     Currently, the SQ premapped mode is operational only with af-xdp. In
> >     this mode, af-xdp, the kernel stack, and xdp tx/redirect will share
> >     the same SQ. Af-xdp independently manages its DMA. The kernel stack
> >     and xdp tx/redirect utilize this DMA metadata to manage the DMA
> >     info.
> >

Note that there's indeed a mode when we have exclusive XDP TX queue:

        /* XDP requires extra queues for XDP_TX */
        if (curr_qp + xdp_qp > vi->max_queue_pairs) {
                netdev_warn_once(dev, "XDP request %i queues but max
is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx
mode.\n",
                                 curr_qp + xdp_qp, vi->max_queue_pairs);
                xdp_qp = 0;
        }

So we need to mention how the code works in this patch.

Thanks
Xuan Zhuo June 17, 2024, 7:23 a.m. UTC | #3
On Mon, 17 Jun 2024 13:00:13 +0800, Jason Wang <jasowang@redhat.com> wrote:
> On Fri, Jun 14, 2024 at 2:39 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> >
> > If the xsk is enabling, the xsk tx will share the send queue.
> > But the xsk requires that the send queue use the premapped mode.
> > So the send queue must support premapped mode when it is bound to
> > af-xdp.
> >
> > * virtnet_sq_set_premapped(sq, true) is used to enable premapped mode.
> >
> >     In this mode, the driver will record the dma info when skb or xdp
> >     frame is sent.
> >
> >     Currently, the SQ premapped mode is operational only with af-xdp. In
> >     this mode, af-xdp, the kernel stack, and xdp tx/redirect will share
> >     the same SQ. Af-xdp independently manages its DMA. The kernel stack
> >     and xdp tx/redirect utilize this DMA metadata to manage the DMA
> >     info.
> >
> >     If the indirect descriptor feature be supported, the volume of DMA
> >     details we need to maintain becomes quite substantial. Here, we have
> >     a cap on the amount of DMA info we manage.
> >
> >     If the kernel stack and xdp tx/redirect attempt to use more
> >     descriptors, virtnet_add_outbuf() will return an -ENOMEM error. But
> >     the af-xdp can work continually.
>
> Rethink of this whole logic, it looks like all the complication came
> as we decided to go with a pre queue pre mapping flag. I wonder if
> things could be simplified if we do that per buffer?

YES. That will be simply.

Then this patch will be not needed. The virtio core must record the premapped
imfo to the virtio ring state or extra.

	 http://lore.kernel.org/all/20230517022249.20790-6-xuanzhuo@linux.alibaba.com

>
> Then we don't need complex logic like dmainfo and cap.

So the premapped mode and the internal dma mode can coexist.
Then we do not need to make the sq to support the premapped mode.


>
> >
> > * virtnet_sq_set_premapped(sq, false) is used to disable premapped mode.
> >
> > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > ---
> >  drivers/net/virtio_net.c | 228 ++++++++++++++++++++++++++++++++++++++-
> >  1 file changed, 224 insertions(+), 4 deletions(-)
> >
> > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > index e84a4624549b..88ab9ea1646f 100644
> > --- a/drivers/net/virtio_net.c
> > +++ b/drivers/net/virtio_net.c
> > @@ -25,6 +25,7 @@
> >  #include <net/net_failover.h>
> >  #include <net/netdev_rx_queue.h>
> >  #include <net/netdev_queues.h>
> > +#include <uapi/linux/virtio_ring.h>
>
> Why do we need this?

for using VIRTIO_RING_F_INDIRECT_DESC


>
> >
> >  static int napi_weight = NAPI_POLL_WEIGHT;
> >  module_param(napi_weight, int, 0444);
> > @@ -276,6 +277,26 @@ struct virtnet_rq_dma {
> >         u16 need_sync;
> >  };
> >
> > +struct virtnet_sq_dma {
> > +       union {
> > +               struct llist_node node;
> > +               struct llist_head head;
>
> If we want to cap the #dmas, could we simply use an array instead of
> the list here?
>
> > +               void *data;
> > +       };
> > +       dma_addr_t addr;
> > +       u32 len;
> > +       u8 num;
> > +};
> > +
> > +struct virtnet_sq_dma_info {
> > +       /* record for kfree */
> > +       void *p;
> > +
> > +       u32 free_num;
> > +
> > +       struct llist_head free;
> > +};
> > +
> >  /* Internal representation of a send virtqueue */
> >  struct send_queue {
> >         /* Virtqueue associated with this send _queue */
> > @@ -295,6 +316,11 @@ struct send_queue {
> >
> >         /* Record whether sq is in reset state. */
> >         bool reset;
> > +
> > +       /* SQ is premapped mode or not. */
> > +       bool premapped;
> > +
> > +       struct virtnet_sq_dma_info dmainfo;
> >  };
> >
> >  /* Internal representation of a receive virtqueue */
> > @@ -492,9 +518,11 @@ static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
> >  enum virtnet_xmit_type {
> >         VIRTNET_XMIT_TYPE_SKB,
> >         VIRTNET_XMIT_TYPE_XDP,
> > +       VIRTNET_XMIT_TYPE_DMA,
>
> I think the name is confusing, how about TYPE_PREMAPPED?
>
> >  };
> >
> > -#define VIRTNET_XMIT_TYPE_MASK (VIRTNET_XMIT_TYPE_SKB | VIRTNET_XMIT_TYPE_XDP)
> > +#define VIRTNET_XMIT_TYPE_MASK (VIRTNET_XMIT_TYPE_SKB | VIRTNET_XMIT_TYPE_XDP \
> > +                               | VIRTNET_XMIT_TYPE_DMA)
> >
> >  static enum virtnet_xmit_type virtnet_xmit_ptr_strip(void **ptr)
> >  {
> > @@ -510,12 +538,180 @@ static void *virtnet_xmit_ptr_mix(void *ptr, enum virtnet_xmit_type type)
> >         return (void *)((unsigned long)ptr | type);
> >  }
> >
> > +static void virtnet_sq_unmap(struct send_queue *sq, void **data)
> > +{
> > +       struct virtnet_sq_dma *head, *tail, *p;
> > +       int i;
> > +
> > +       head = *data;
> > +
> > +       p = head;
> > +
> > +       for (i = 0; i < head->num; ++i) {
> > +               virtqueue_dma_unmap_page_attrs(sq->vq, p->addr, p->len,
> > +                                              DMA_TO_DEVICE, 0);
> > +               tail = p;
> > +               p = llist_entry(llist_next(&p->node), struct virtnet_sq_dma, node);
> > +       }
> > +
> > +       *data = tail->data;
> > +
> > +       __llist_add_batch(&head->node, &tail->node,  &sq->dmainfo.free);
> > +
> > +       sq->dmainfo.free_num += head->num;
> > +}
> > +
> > +static void *virtnet_dma_chain_update(struct send_queue *sq,
> > +                                     struct virtnet_sq_dma *head,
> > +                                     struct virtnet_sq_dma *tail,
> > +                                     u8 num, void *data)
> > +{
> > +       sq->dmainfo.free_num -= num;
> > +       head->num = num;
> > +
> > +       tail->data = data;
> > +
> > +       return virtnet_xmit_ptr_mix(head, VIRTNET_XMIT_TYPE_DMA);
> > +}
> > +
> > +static struct virtnet_sq_dma *virtnet_sq_map_sg(struct send_queue *sq, int num, void *data)
> > +{
> > +       struct virtnet_sq_dma *head = NULL, *p = NULL;
> > +       struct scatterlist *sg;
> > +       dma_addr_t addr;
> > +       int i, err;
> > +
> > +       if (num > sq->dmainfo.free_num)
> > +               return NULL;
> > +
> > +       for (i = 0; i < num; ++i) {
> > +               sg = &sq->sg[i];
> > +
> > +               addr = virtqueue_dma_map_page_attrs(sq->vq, sg_page(sg),
> > +                                                   sg->offset,
> > +                                                   sg->length, DMA_TO_DEVICE,
> > +                                                   0);
> > +               err = virtqueue_dma_mapping_error(sq->vq, addr);
> > +               if (err)
> > +                       goto err;
> > +
> > +               sg->dma_address = addr;
> > +
> > +               p = llist_entry(llist_del_first(&sq->dmainfo.free),
> > +                               struct virtnet_sq_dma, node);
> > +
> > +               p->addr = sg->dma_address;
> > +               p->len = sg->length;
>
> I may miss something, but I don't see how we cap the total number of dmainfos.

static void *virtnet_dma_chain_update(struct send_queue *sq,
                                     struct virtnet_sq_dma *head,
                                     struct virtnet_sq_dma *tail,
                                     u8 num, void *data)
{
       sq->dmainfo.free_num -= num;
->       head->num = num;

       tail->data = data;

       return virtnet_xmit_ptr_mix(head, VIRTNET_XMIT_TYPE_DMA);
}



Thanks.

>
> Thanks
>
Xuan Zhuo June 17, 2024, 7:40 a.m. UTC | #4
On Mon, 17 Jun 2024 14:28:05 +0800, Jason Wang <jasowang@redhat.com> wrote:
> On Mon, Jun 17, 2024 at 1:00 PM Jason Wang <jasowang@redhat.com> wrote:
> >
> > On Fri, Jun 14, 2024 at 2:39 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > >
> > > If the xsk is enabling, the xsk tx will share the send queue.
> > > But the xsk requires that the send queue use the premapped mode.
> > > So the send queue must support premapped mode when it is bound to
> > > af-xdp.
> > >
> > > * virtnet_sq_set_premapped(sq, true) is used to enable premapped mode.
> > >
> > >     In this mode, the driver will record the dma info when skb or xdp
> > >     frame is sent.
> > >
> > >     Currently, the SQ premapped mode is operational only with af-xdp. In
> > >     this mode, af-xdp, the kernel stack, and xdp tx/redirect will share
> > >     the same SQ. Af-xdp independently manages its DMA. The kernel stack
> > >     and xdp tx/redirect utilize this DMA metadata to manage the DMA
> > >     info.
> > >
>
> Note that there's indeed a mode when we have exclusive XDP TX queue:
>
>         /* XDP requires extra queues for XDP_TX */
>         if (curr_qp + xdp_qp > vi->max_queue_pairs) {
>                 netdev_warn_once(dev, "XDP request %i queues but max
> is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx
> mode.\n",
>                                  curr_qp + xdp_qp, vi->max_queue_pairs);
>                 xdp_qp = 0;
>         }
>
> So we need to mention how the code works in this patch.

Sorry, I do not get it.

Could you say more?

Thanks.


>
> Thanks
>
Jason Wang June 18, 2024, 12:57 a.m. UTC | #5
On Mon, Jun 17, 2024 at 3:39 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
>
> On Mon, 17 Jun 2024 13:00:13 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > On Fri, Jun 14, 2024 at 2:39 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > >
> > > If the xsk is enabling, the xsk tx will share the send queue.
> > > But the xsk requires that the send queue use the premapped mode.
> > > So the send queue must support premapped mode when it is bound to
> > > af-xdp.
> > >
> > > * virtnet_sq_set_premapped(sq, true) is used to enable premapped mode.
> > >
> > >     In this mode, the driver will record the dma info when skb or xdp
> > >     frame is sent.
> > >
> > >     Currently, the SQ premapped mode is operational only with af-xdp. In
> > >     this mode, af-xdp, the kernel stack, and xdp tx/redirect will share
> > >     the same SQ. Af-xdp independently manages its DMA. The kernel stack
> > >     and xdp tx/redirect utilize this DMA metadata to manage the DMA
> > >     info.
> > >
> > >     If the indirect descriptor feature be supported, the volume of DMA
> > >     details we need to maintain becomes quite substantial. Here, we have
> > >     a cap on the amount of DMA info we manage.
> > >
> > >     If the kernel stack and xdp tx/redirect attempt to use more
> > >     descriptors, virtnet_add_outbuf() will return an -ENOMEM error. But
> > >     the af-xdp can work continually.
> >
> > Rethink of this whole logic, it looks like all the complication came
> > as we decided to go with a pre queue pre mapping flag. I wonder if
> > things could be simplified if we do that per buffer?
>
> YES. That will be simply.
>
> Then this patch will be not needed. The virtio core must record the premapped
> imfo to the virtio ring state or extra.
>
>          http://lore.kernel.org/all/20230517022249.20790-6-xuanzhuo@linux.alibaba.com

Yes, something like this. I think it's worthwhile to re-consider that
approach. If my memory is correct, we haven't spotted the complicated
issues we need to deal with like this patch.

>
> >
> > Then we don't need complex logic like dmainfo and cap.
>
> So the premapped mode and the internal dma mode can coexist.
> Then we do not need to make the sq to support the premapped mode.

Probably.

>
>
> >
> > >
> > > * virtnet_sq_set_premapped(sq, false) is used to disable premapped mode.
> > >
> > > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > > ---
> > >  drivers/net/virtio_net.c | 228 ++++++++++++++++++++++++++++++++++++++-
> > >  1 file changed, 224 insertions(+), 4 deletions(-)
> > >
> > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > index e84a4624549b..88ab9ea1646f 100644
> > > --- a/drivers/net/virtio_net.c
> > > +++ b/drivers/net/virtio_net.c
> > > @@ -25,6 +25,7 @@
> > >  #include <net/net_failover.h>
> > >  #include <net/netdev_rx_queue.h>
> > >  #include <net/netdev_queues.h>
> > > +#include <uapi/linux/virtio_ring.h>
> >
> > Why do we need this?
>
> for using VIRTIO_RING_F_INDIRECT_DESC

Ok. It's probably a hint that something like layer violation happens.
A specific driver should not know details about the ring layout ...

>
>
> >
> > >
> > >  static int napi_weight = NAPI_POLL_WEIGHT;
> > >  module_param(napi_weight, int, 0444);
> > > @@ -276,6 +277,26 @@ struct virtnet_rq_dma {
> > >         u16 need_sync;
> > >  };
> > >
> > > +struct virtnet_sq_dma {
> > > +       union {
> > > +               struct llist_node node;
> > > +               struct llist_head head;
> >
> > If we want to cap the #dmas, could we simply use an array instead of
> > the list here?
> >
> > > +               void *data;
> > > +       };
> > > +       dma_addr_t addr;
> > > +       u32 len;
> > > +       u8 num;
> > > +};
> > > +
> > > +struct virtnet_sq_dma_info {
> > > +       /* record for kfree */
> > > +       void *p;
> > > +
> > > +       u32 free_num;
> > > +
> > > +       struct llist_head free;
> > > +};
> > > +
> > >  /* Internal representation of a send virtqueue */
> > >  struct send_queue {
> > >         /* Virtqueue associated with this send _queue */
> > > @@ -295,6 +316,11 @@ struct send_queue {
> > >
> > >         /* Record whether sq is in reset state. */
> > >         bool reset;
> > > +
> > > +       /* SQ is premapped mode or not. */
> > > +       bool premapped;
> > > +
> > > +       struct virtnet_sq_dma_info dmainfo;
> > >  };
> > >
> > >  /* Internal representation of a receive virtqueue */
> > > @@ -492,9 +518,11 @@ static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
> > >  enum virtnet_xmit_type {
> > >         VIRTNET_XMIT_TYPE_SKB,
> > >         VIRTNET_XMIT_TYPE_XDP,
> > > +       VIRTNET_XMIT_TYPE_DMA,
> >
> > I think the name is confusing, how about TYPE_PREMAPPED?
> >
> > >  };
> > >
> > > -#define VIRTNET_XMIT_TYPE_MASK (VIRTNET_XMIT_TYPE_SKB | VIRTNET_XMIT_TYPE_XDP)
> > > +#define VIRTNET_XMIT_TYPE_MASK (VIRTNET_XMIT_TYPE_SKB | VIRTNET_XMIT_TYPE_XDP \
> > > +                               | VIRTNET_XMIT_TYPE_DMA)
> > >
> > >  static enum virtnet_xmit_type virtnet_xmit_ptr_strip(void **ptr)
> > >  {
> > > @@ -510,12 +538,180 @@ static void *virtnet_xmit_ptr_mix(void *ptr, enum virtnet_xmit_type type)
> > >         return (void *)((unsigned long)ptr | type);
> > >  }
> > >
> > > +static void virtnet_sq_unmap(struct send_queue *sq, void **data)
> > > +{
> > > +       struct virtnet_sq_dma *head, *tail, *p;
> > > +       int i;
> > > +
> > > +       head = *data;
> > > +
> > > +       p = head;
> > > +
> > > +       for (i = 0; i < head->num; ++i) {
> > > +               virtqueue_dma_unmap_page_attrs(sq->vq, p->addr, p->len,
> > > +                                              DMA_TO_DEVICE, 0);
> > > +               tail = p;
> > > +               p = llist_entry(llist_next(&p->node), struct virtnet_sq_dma, node);
> > > +       }
> > > +
> > > +       *data = tail->data;
> > > +
> > > +       __llist_add_batch(&head->node, &tail->node,  &sq->dmainfo.free);
> > > +
> > > +       sq->dmainfo.free_num += head->num;
> > > +}
> > > +
> > > +static void *virtnet_dma_chain_update(struct send_queue *sq,
> > > +                                     struct virtnet_sq_dma *head,
> > > +                                     struct virtnet_sq_dma *tail,
> > > +                                     u8 num, void *data)
> > > +{
> > > +       sq->dmainfo.free_num -= num;
> > > +       head->num = num;
> > > +
> > > +       tail->data = data;
> > > +
> > > +       return virtnet_xmit_ptr_mix(head, VIRTNET_XMIT_TYPE_DMA);
> > > +}
> > > +
> > > +static struct virtnet_sq_dma *virtnet_sq_map_sg(struct send_queue *sq, int num, void *data)
> > > +{
> > > +       struct virtnet_sq_dma *head = NULL, *p = NULL;
> > > +       struct scatterlist *sg;
> > > +       dma_addr_t addr;
> > > +       int i, err;
> > > +
> > > +       if (num > sq->dmainfo.free_num)
> > > +               return NULL;
> > > +
> > > +       for (i = 0; i < num; ++i) {
> > > +               sg = &sq->sg[i];
> > > +
> > > +               addr = virtqueue_dma_map_page_attrs(sq->vq, sg_page(sg),
> > > +                                                   sg->offset,
> > > +                                                   sg->length, DMA_TO_DEVICE,
> > > +                                                   0);
> > > +               err = virtqueue_dma_mapping_error(sq->vq, addr);
> > > +               if (err)
> > > +                       goto err;
> > > +
> > > +               sg->dma_address = addr;
> > > +
> > > +               p = llist_entry(llist_del_first(&sq->dmainfo.free),
> > > +                               struct virtnet_sq_dma, node);
> > > +
> > > +               p->addr = sg->dma_address;
> > > +               p->len = sg->length;
> >
> > I may miss something, but I don't see how we cap the total number of dmainfos.
>
> static void *virtnet_dma_chain_update(struct send_queue *sq,
>                                      struct virtnet_sq_dma *head,
>                                      struct virtnet_sq_dma *tail,
>                                      u8 num, void *data)
> {
>        sq->dmainfo.free_num -= num;
> ->       head->num = num;
>
>        tail->data = data;
>
>        return virtnet_xmit_ptr_mix(head, VIRTNET_XMIT_TYPE_DMA);
> }

Ok, speak too fast I guess it should be more like:

 if (num > sq->dmainfo.free_num)
               return NULL;

Thanks

>
>
>
> Thanks.
>
> >
> > Thanks
> >
>
Jason Wang June 18, 2024, 12:59 a.m. UTC | #6
On Tue, Jun 18, 2024 at 8:57 AM Jason Wang <jasowang@redhat.com> wrote:
>
> On Mon, Jun 17, 2024 at 3:39 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> >
> > On Mon, 17 Jun 2024 13:00:13 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > > On Fri, Jun 14, 2024 at 2:39 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > > >
> > > > If the xsk is enabling, the xsk tx will share the send queue.
> > > > But the xsk requires that the send queue use the premapped mode.
> > > > So the send queue must support premapped mode when it is bound to
> > > > af-xdp.
> > > >
> > > > * virtnet_sq_set_premapped(sq, true) is used to enable premapped mode.
> > > >
> > > >     In this mode, the driver will record the dma info when skb or xdp
> > > >     frame is sent.
> > > >
> > > >     Currently, the SQ premapped mode is operational only with af-xdp. In
> > > >     this mode, af-xdp, the kernel stack, and xdp tx/redirect will share
> > > >     the same SQ. Af-xdp independently manages its DMA. The kernel stack
> > > >     and xdp tx/redirect utilize this DMA metadata to manage the DMA
> > > >     info.
> > > >
> > > >     If the indirect descriptor feature be supported, the volume of DMA
> > > >     details we need to maintain becomes quite substantial. Here, we have
> > > >     a cap on the amount of DMA info we manage.
> > > >
> > > >     If the kernel stack and xdp tx/redirect attempt to use more
> > > >     descriptors, virtnet_add_outbuf() will return an -ENOMEM error. But
> > > >     the af-xdp can work continually.
> > >
> > > Rethink of this whole logic, it looks like all the complication came
> > > as we decided to go with a pre queue pre mapping flag. I wonder if
> > > things could be simplified if we do that per buffer?
> >
> > YES. That will be simply.
> >
> > Then this patch will be not needed. The virtio core must record the premapped
> > imfo to the virtio ring state or extra.
> >
> >          http://lore.kernel.org/all/20230517022249.20790-6-xuanzhuo@linux.alibaba.com
>
> Yes, something like this. I think it's worthwhile to re-consider that
> approach. If my memory is correct, we haven't spotted the complicated
> issues we need to deal with like this patch.

Btw, it would be even nicer to piggyback with some existing fields.

Thanks

>
> >
> > >
> > > Then we don't need complex logic like dmainfo and cap.
> >
> > So the premapped mode and the internal dma mode can coexist.
> > Then we do not need to make the sq to support the premapped mode.
>
> Probably.
>
> >
> >
> > >
> > > >
> > > > * virtnet_sq_set_premapped(sq, false) is used to disable premapped mode.
> > > >
> > > > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > > > ---
> > > >  drivers/net/virtio_net.c | 228 ++++++++++++++++++++++++++++++++++++++-
> > > >  1 file changed, 224 insertions(+), 4 deletions(-)
> > > >
> > > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > > index e84a4624549b..88ab9ea1646f 100644
> > > > --- a/drivers/net/virtio_net.c
> > > > +++ b/drivers/net/virtio_net.c
> > > > @@ -25,6 +25,7 @@
> > > >  #include <net/net_failover.h>
> > > >  #include <net/netdev_rx_queue.h>
> > > >  #include <net/netdev_queues.h>
> > > > +#include <uapi/linux/virtio_ring.h>
> > >
> > > Why do we need this?
> >
> > for using VIRTIO_RING_F_INDIRECT_DESC
>
> Ok. It's probably a hint that something like layer violation happens.
> A specific driver should not know details about the ring layout ...
>
> >
> >
> > >
> > > >
> > > >  static int napi_weight = NAPI_POLL_WEIGHT;
> > > >  module_param(napi_weight, int, 0444);
> > > > @@ -276,6 +277,26 @@ struct virtnet_rq_dma {
> > > >         u16 need_sync;
> > > >  };
> > > >
> > > > +struct virtnet_sq_dma {
> > > > +       union {
> > > > +               struct llist_node node;
> > > > +               struct llist_head head;
> > >
> > > If we want to cap the #dmas, could we simply use an array instead of
> > > the list here?
> > >
> > > > +               void *data;
> > > > +       };
> > > > +       dma_addr_t addr;
> > > > +       u32 len;
> > > > +       u8 num;
> > > > +};
> > > > +
> > > > +struct virtnet_sq_dma_info {
> > > > +       /* record for kfree */
> > > > +       void *p;
> > > > +
> > > > +       u32 free_num;
> > > > +
> > > > +       struct llist_head free;
> > > > +};
> > > > +
> > > >  /* Internal representation of a send virtqueue */
> > > >  struct send_queue {
> > > >         /* Virtqueue associated with this send _queue */
> > > > @@ -295,6 +316,11 @@ struct send_queue {
> > > >
> > > >         /* Record whether sq is in reset state. */
> > > >         bool reset;
> > > > +
> > > > +       /* SQ is premapped mode or not. */
> > > > +       bool premapped;
> > > > +
> > > > +       struct virtnet_sq_dma_info dmainfo;
> > > >  };
> > > >
> > > >  /* Internal representation of a receive virtqueue */
> > > > @@ -492,9 +518,11 @@ static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
> > > >  enum virtnet_xmit_type {
> > > >         VIRTNET_XMIT_TYPE_SKB,
> > > >         VIRTNET_XMIT_TYPE_XDP,
> > > > +       VIRTNET_XMIT_TYPE_DMA,
> > >
> > > I think the name is confusing, how about TYPE_PREMAPPED?
> > >
> > > >  };
> > > >
> > > > -#define VIRTNET_XMIT_TYPE_MASK (VIRTNET_XMIT_TYPE_SKB | VIRTNET_XMIT_TYPE_XDP)
> > > > +#define VIRTNET_XMIT_TYPE_MASK (VIRTNET_XMIT_TYPE_SKB | VIRTNET_XMIT_TYPE_XDP \
> > > > +                               | VIRTNET_XMIT_TYPE_DMA)
> > > >
> > > >  static enum virtnet_xmit_type virtnet_xmit_ptr_strip(void **ptr)
> > > >  {
> > > > @@ -510,12 +538,180 @@ static void *virtnet_xmit_ptr_mix(void *ptr, enum virtnet_xmit_type type)
> > > >         return (void *)((unsigned long)ptr | type);
> > > >  }
> > > >
> > > > +static void virtnet_sq_unmap(struct send_queue *sq, void **data)
> > > > +{
> > > > +       struct virtnet_sq_dma *head, *tail, *p;
> > > > +       int i;
> > > > +
> > > > +       head = *data;
> > > > +
> > > > +       p = head;
> > > > +
> > > > +       for (i = 0; i < head->num; ++i) {
> > > > +               virtqueue_dma_unmap_page_attrs(sq->vq, p->addr, p->len,
> > > > +                                              DMA_TO_DEVICE, 0);
> > > > +               tail = p;
> > > > +               p = llist_entry(llist_next(&p->node), struct virtnet_sq_dma, node);
> > > > +       }
> > > > +
> > > > +       *data = tail->data;
> > > > +
> > > > +       __llist_add_batch(&head->node, &tail->node,  &sq->dmainfo.free);
> > > > +
> > > > +       sq->dmainfo.free_num += head->num;
> > > > +}
> > > > +
> > > > +static void *virtnet_dma_chain_update(struct send_queue *sq,
> > > > +                                     struct virtnet_sq_dma *head,
> > > > +                                     struct virtnet_sq_dma *tail,
> > > > +                                     u8 num, void *data)
> > > > +{
> > > > +       sq->dmainfo.free_num -= num;
> > > > +       head->num = num;
> > > > +
> > > > +       tail->data = data;
> > > > +
> > > > +       return virtnet_xmit_ptr_mix(head, VIRTNET_XMIT_TYPE_DMA);
> > > > +}
> > > > +
> > > > +static struct virtnet_sq_dma *virtnet_sq_map_sg(struct send_queue *sq, int num, void *data)
> > > > +{
> > > > +       struct virtnet_sq_dma *head = NULL, *p = NULL;
> > > > +       struct scatterlist *sg;
> > > > +       dma_addr_t addr;
> > > > +       int i, err;
> > > > +
> > > > +       if (num > sq->dmainfo.free_num)
> > > > +               return NULL;
> > > > +
> > > > +       for (i = 0; i < num; ++i) {
> > > > +               sg = &sq->sg[i];
> > > > +
> > > > +               addr = virtqueue_dma_map_page_attrs(sq->vq, sg_page(sg),
> > > > +                                                   sg->offset,
> > > > +                                                   sg->length, DMA_TO_DEVICE,
> > > > +                                                   0);
> > > > +               err = virtqueue_dma_mapping_error(sq->vq, addr);
> > > > +               if (err)
> > > > +                       goto err;
> > > > +
> > > > +               sg->dma_address = addr;
> > > > +
> > > > +               p = llist_entry(llist_del_first(&sq->dmainfo.free),
> > > > +                               struct virtnet_sq_dma, node);
> > > > +
> > > > +               p->addr = sg->dma_address;
> > > > +               p->len = sg->length;
> > >
> > > I may miss something, but I don't see how we cap the total number of dmainfos.
> >
> > static void *virtnet_dma_chain_update(struct send_queue *sq,
> >                                      struct virtnet_sq_dma *head,
> >                                      struct virtnet_sq_dma *tail,
> >                                      u8 num, void *data)
> > {
> >        sq->dmainfo.free_num -= num;
> > ->       head->num = num;
> >
> >        tail->data = data;
> >
> >        return virtnet_xmit_ptr_mix(head, VIRTNET_XMIT_TYPE_DMA);
> > }
>
> Ok, speak too fast I guess it should be more like:
>
>  if (num > sq->dmainfo.free_num)
>                return NULL;
>
> Thanks
>
> >
> >
> >
> > Thanks.
> >
> > >
> > > Thanks
> > >
> >
Jason Wang June 18, 2024, 1 a.m. UTC | #7
On Mon, Jun 17, 2024 at 3:41 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
>
> On Mon, 17 Jun 2024 14:28:05 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > On Mon, Jun 17, 2024 at 1:00 PM Jason Wang <jasowang@redhat.com> wrote:
> > >
> > > On Fri, Jun 14, 2024 at 2:39 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > > >
> > > > If the xsk is enabling, the xsk tx will share the send queue.
> > > > But the xsk requires that the send queue use the premapped mode.
> > > > So the send queue must support premapped mode when it is bound to
> > > > af-xdp.
> > > >
> > > > * virtnet_sq_set_premapped(sq, true) is used to enable premapped mode.
> > > >
> > > >     In this mode, the driver will record the dma info when skb or xdp
> > > >     frame is sent.
> > > >
> > > >     Currently, the SQ premapped mode is operational only with af-xdp. In
> > > >     this mode, af-xdp, the kernel stack, and xdp tx/redirect will share
> > > >     the same SQ. Af-xdp independently manages its DMA. The kernel stack
> > > >     and xdp tx/redirect utilize this DMA metadata to manage the DMA
> > > >     info.
> > > >
> >
> > Note that there's indeed a mode when we have exclusive XDP TX queue:
> >
> >         /* XDP requires extra queues for XDP_TX */
> >         if (curr_qp + xdp_qp > vi->max_queue_pairs) {
> >                 netdev_warn_once(dev, "XDP request %i queues but max
> > is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx
> > mode.\n",
> >                                  curr_qp + xdp_qp, vi->max_queue_pairs);
> >                 xdp_qp = 0;
> >         }
> >
> > So we need to mention how the code works in this patch.
>
> Sorry, I do not get it.
>
> Could you say more?

I meant in the commit log, you said:

"""
In this mode, af-xdp, the kernel stack, and xdp tx/redirect will share
the same SQ.
"""

is not correct if we have sufficient queue pairs.

We need to tweak it and explain if the code can still work if we have
exclusive XDP TX queues.

Thanks

>
> Thanks.
>
>
> >
> > Thanks
> >
>
Xuan Zhuo June 18, 2024, 1:31 a.m. UTC | #8
On Tue, 18 Jun 2024 09:00:56 +0800, Jason Wang <jasowang@redhat.com> wrote:
> On Mon, Jun 17, 2024 at 3:41 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> >
> > On Mon, 17 Jun 2024 14:28:05 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > > On Mon, Jun 17, 2024 at 1:00 PM Jason Wang <jasowang@redhat.com> wrote:
> > > >
> > > > On Fri, Jun 14, 2024 at 2:39 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > > > >
> > > > > If the xsk is enabling, the xsk tx will share the send queue.
> > > > > But the xsk requires that the send queue use the premapped mode.
> > > > > So the send queue must support premapped mode when it is bound to
> > > > > af-xdp.
> > > > >
> > > > > * virtnet_sq_set_premapped(sq, true) is used to enable premapped mode.
> > > > >
> > > > >     In this mode, the driver will record the dma info when skb or xdp
> > > > >     frame is sent.
> > > > >
> > > > >     Currently, the SQ premapped mode is operational only with af-xdp. In
> > > > >     this mode, af-xdp, the kernel stack, and xdp tx/redirect will share
> > > > >     the same SQ. Af-xdp independently manages its DMA. The kernel stack
> > > > >     and xdp tx/redirect utilize this DMA metadata to manage the DMA
> > > > >     info.
> > > > >
> > >
> > > Note that there's indeed a mode when we have exclusive XDP TX queue:
> > >
> > >         /* XDP requires extra queues for XDP_TX */
> > >         if (curr_qp + xdp_qp > vi->max_queue_pairs) {
> > >                 netdev_warn_once(dev, "XDP request %i queues but max
> > > is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx
> > > mode.\n",
> > >                                  curr_qp + xdp_qp, vi->max_queue_pairs);
> > >                 xdp_qp = 0;
> > >         }
> > >
> > > So we need to mention how the code works in this patch.
> >
> > Sorry, I do not get it.
> >
> > Could you say more?
>
> I meant in the commit log, you said:
>
> """
> In this mode, af-xdp, the kernel stack, and xdp tx/redirect will share
> the same SQ.
> """
>
> is not correct if we have sufficient queue pairs.
>
> We need to tweak it and explain if the code can still work if we have
> exclusive XDP TX queues.


YES, it can work.

I will explain in next version.

Thanks.


>
> Thanks
>
> >
> > Thanks.
> >
> >
> > >
> > > Thanks
> > >
> >
>
Xuan Zhuo June 18, 2024, 1:34 a.m. UTC | #9
On Tue, 18 Jun 2024 08:57:52 +0800, Jason Wang <jasowang@redhat.com> wrote:
> On Mon, Jun 17, 2024 at 3:39 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> >
> > On Mon, 17 Jun 2024 13:00:13 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > > On Fri, Jun 14, 2024 at 2:39 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > > >
> > > > If the xsk is enabling, the xsk tx will share the send queue.
> > > > But the xsk requires that the send queue use the premapped mode.
> > > > So the send queue must support premapped mode when it is bound to
> > > > af-xdp.
> > > >
> > > > * virtnet_sq_set_premapped(sq, true) is used to enable premapped mode.
> > > >
> > > >     In this mode, the driver will record the dma info when skb or xdp
> > > >     frame is sent.
> > > >
> > > >     Currently, the SQ premapped mode is operational only with af-xdp. In
> > > >     this mode, af-xdp, the kernel stack, and xdp tx/redirect will share
> > > >     the same SQ. Af-xdp independently manages its DMA. The kernel stack
> > > >     and xdp tx/redirect utilize this DMA metadata to manage the DMA
> > > >     info.
> > > >
> > > >     If the indirect descriptor feature be supported, the volume of DMA
> > > >     details we need to maintain becomes quite substantial. Here, we have
> > > >     a cap on the amount of DMA info we manage.
> > > >
> > > >     If the kernel stack and xdp tx/redirect attempt to use more
> > > >     descriptors, virtnet_add_outbuf() will return an -ENOMEM error. But
> > > >     the af-xdp can work continually.
> > >
> > > Rethink of this whole logic, it looks like all the complication came
> > > as we decided to go with a pre queue pre mapping flag. I wonder if
> > > things could be simplified if we do that per buffer?
> >
> > YES. That will be simply.
> >
> > Then this patch will be not needed. The virtio core must record the premapped
> > imfo to the virtio ring state or extra.
> >
> >          http://lore.kernel.org/all/20230517022249.20790-6-xuanzhuo@linux.alibaba.com
>
> Yes, something like this. I think it's worthwhile to re-consider that
> approach. If my memory is correct, we haven't spotted the complicated
> issues we need to deal with like this patch.
>
> >
> > >
> > > Then we don't need complex logic like dmainfo and cap.
> >
> > So the premapped mode and the internal dma mode can coexist.
> > Then we do not need to make the sq to support the premapped mode.
>
> Probably.
>
> >
> >
> > >
> > > >
> > > > * virtnet_sq_set_premapped(sq, false) is used to disable premapped mode.
> > > >
> > > > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > > > ---
> > > >  drivers/net/virtio_net.c | 228 ++++++++++++++++++++++++++++++++++++++-
> > > >  1 file changed, 224 insertions(+), 4 deletions(-)
> > > >
> > > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > > index e84a4624549b..88ab9ea1646f 100644
> > > > --- a/drivers/net/virtio_net.c
> > > > +++ b/drivers/net/virtio_net.c
> > > > @@ -25,6 +25,7 @@
> > > >  #include <net/net_failover.h>
> > > >  #include <net/netdev_rx_queue.h>
> > > >  #include <net/netdev_queues.h>
> > > > +#include <uapi/linux/virtio_ring.h>
> > >
> > > Why do we need this?
> >
> > for using VIRTIO_RING_F_INDIRECT_DESC
>
> Ok. It's probably a hint that something like layer violation happens.
> A specific driver should not know details about the ring layout ...


But the blk device did the same thing.


>
> >
> >
> > >
> > > >
> > > >  static int napi_weight = NAPI_POLL_WEIGHT;
> > > >  module_param(napi_weight, int, 0444);
> > > > @@ -276,6 +277,26 @@ struct virtnet_rq_dma {
> > > >         u16 need_sync;
> > > >  };
> > > >
> > > > +struct virtnet_sq_dma {
> > > > +       union {
> > > > +               struct llist_node node;
> > > > +               struct llist_head head;
> > >
> > > If we want to cap the #dmas, could we simply use an array instead of
> > > the list here?
> > >
> > > > +               void *data;
> > > > +       };
> > > > +       dma_addr_t addr;
> > > > +       u32 len;
> > > > +       u8 num;
> > > > +};
> > > > +
> > > > +struct virtnet_sq_dma_info {
> > > > +       /* record for kfree */
> > > > +       void *p;
> > > > +
> > > > +       u32 free_num;
> > > > +
> > > > +       struct llist_head free;
> > > > +};
> > > > +
> > > >  /* Internal representation of a send virtqueue */
> > > >  struct send_queue {
> > > >         /* Virtqueue associated with this send _queue */
> > > > @@ -295,6 +316,11 @@ struct send_queue {
> > > >
> > > >         /* Record whether sq is in reset state. */
> > > >         bool reset;
> > > > +
> > > > +       /* SQ is premapped mode or not. */
> > > > +       bool premapped;
> > > > +
> > > > +       struct virtnet_sq_dma_info dmainfo;
> > > >  };
> > > >
> > > >  /* Internal representation of a receive virtqueue */
> > > > @@ -492,9 +518,11 @@ static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
> > > >  enum virtnet_xmit_type {
> > > >         VIRTNET_XMIT_TYPE_SKB,
> > > >         VIRTNET_XMIT_TYPE_XDP,
> > > > +       VIRTNET_XMIT_TYPE_DMA,
> > >
> > > I think the name is confusing, how about TYPE_PREMAPPED?
> > >
> > > >  };
> > > >
> > > > -#define VIRTNET_XMIT_TYPE_MASK (VIRTNET_XMIT_TYPE_SKB | VIRTNET_XMIT_TYPE_XDP)
> > > > +#define VIRTNET_XMIT_TYPE_MASK (VIRTNET_XMIT_TYPE_SKB | VIRTNET_XMIT_TYPE_XDP \
> > > > +                               | VIRTNET_XMIT_TYPE_DMA)
> > > >
> > > >  static enum virtnet_xmit_type virtnet_xmit_ptr_strip(void **ptr)
> > > >  {
> > > > @@ -510,12 +538,180 @@ static void *virtnet_xmit_ptr_mix(void *ptr, enum virtnet_xmit_type type)
> > > >         return (void *)((unsigned long)ptr | type);
> > > >  }
> > > >
> > > > +static void virtnet_sq_unmap(struct send_queue *sq, void **data)
> > > > +{
> > > > +       struct virtnet_sq_dma *head, *tail, *p;
> > > > +       int i;
> > > > +
> > > > +       head = *data;
> > > > +
> > > > +       p = head;
> > > > +
> > > > +       for (i = 0; i < head->num; ++i) {
> > > > +               virtqueue_dma_unmap_page_attrs(sq->vq, p->addr, p->len,
> > > > +                                              DMA_TO_DEVICE, 0);
> > > > +               tail = p;
> > > > +               p = llist_entry(llist_next(&p->node), struct virtnet_sq_dma, node);
> > > > +       }
> > > > +
> > > > +       *data = tail->data;
> > > > +
> > > > +       __llist_add_batch(&head->node, &tail->node,  &sq->dmainfo.free);
> > > > +
> > > > +       sq->dmainfo.free_num += head->num;
> > > > +}
> > > > +
> > > > +static void *virtnet_dma_chain_update(struct send_queue *sq,
> > > > +                                     struct virtnet_sq_dma *head,
> > > > +                                     struct virtnet_sq_dma *tail,
> > > > +                                     u8 num, void *data)
> > > > +{
> > > > +       sq->dmainfo.free_num -= num;
> > > > +       head->num = num;
> > > > +
> > > > +       tail->data = data;
> > > > +
> > > > +       return virtnet_xmit_ptr_mix(head, VIRTNET_XMIT_TYPE_DMA);
> > > > +}
> > > > +
> > > > +static struct virtnet_sq_dma *virtnet_sq_map_sg(struct send_queue *sq, int num, void *data)
> > > > +{
> > > > +       struct virtnet_sq_dma *head = NULL, *p = NULL;
> > > > +       struct scatterlist *sg;
> > > > +       dma_addr_t addr;
> > > > +       int i, err;
> > > > +
> > > > +       if (num > sq->dmainfo.free_num)
> > > > +               return NULL;
> > > > +
> > > > +       for (i = 0; i < num; ++i) {
> > > > +               sg = &sq->sg[i];
> > > > +
> > > > +               addr = virtqueue_dma_map_page_attrs(sq->vq, sg_page(sg),
> > > > +                                                   sg->offset,
> > > > +                                                   sg->length, DMA_TO_DEVICE,
> > > > +                                                   0);
> > > > +               err = virtqueue_dma_mapping_error(sq->vq, addr);
> > > > +               if (err)
> > > > +                       goto err;
> > > > +
> > > > +               sg->dma_address = addr;
> > > > +
> > > > +               p = llist_entry(llist_del_first(&sq->dmainfo.free),
> > > > +                               struct virtnet_sq_dma, node);
> > > > +
> > > > +               p->addr = sg->dma_address;
> > > > +               p->len = sg->length;
> > >
> > > I may miss something, but I don't see how we cap the total number of dmainfos.
> >
> > static void *virtnet_dma_chain_update(struct send_queue *sq,
> >                                      struct virtnet_sq_dma *head,
> >                                      struct virtnet_sq_dma *tail,
> >                                      u8 num, void *data)
> > {
> >        sq->dmainfo.free_num -= num;
> > ->       head->num = num;
> >
> >        tail->data = data;
> >
> >        return virtnet_xmit_ptr_mix(head, VIRTNET_XMIT_TYPE_DMA);
> > }
>
> Ok, speak too fast I guess it should be more like:
>
>  if (num > sq->dmainfo.free_num)
>                return NULL;


static struct virtnet_sq_dma *virtnet_sq_map_sg(struct send_queue *sq, int num, void *data)
{
       struct virtnet_sq_dma *head = NULL, *p = NULL;
       struct scatterlist *sg;
       dma_addr_t addr;
       int i, err;

       if (num > sq->dmainfo.free_num)
               return NULL;

Do you mean this?

Thanks.

>
> Thanks
>
> >
> >
> >
> > Thanks.
> >
> > >
> > > Thanks
> > >
> >
>
diff mbox series

Patch

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index e84a4624549b..88ab9ea1646f 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -25,6 +25,7 @@ 
 #include <net/net_failover.h>
 #include <net/netdev_rx_queue.h>
 #include <net/netdev_queues.h>
+#include <uapi/linux/virtio_ring.h>
 
 static int napi_weight = NAPI_POLL_WEIGHT;
 module_param(napi_weight, int, 0444);
@@ -276,6 +277,26 @@  struct virtnet_rq_dma {
 	u16 need_sync;
 };
 
+struct virtnet_sq_dma {
+	union {
+		struct llist_node node;
+		struct llist_head head;
+		void *data;
+	};
+	dma_addr_t addr;
+	u32 len;
+	u8 num;
+};
+
+struct virtnet_sq_dma_info {
+	/* record for kfree */
+	void *p;
+
+	u32 free_num;
+
+	struct llist_head free;
+};
+
 /* Internal representation of a send virtqueue */
 struct send_queue {
 	/* Virtqueue associated with this send _queue */
@@ -295,6 +316,11 @@  struct send_queue {
 
 	/* Record whether sq is in reset state. */
 	bool reset;
+
+	/* SQ is premapped mode or not. */
+	bool premapped;
+
+	struct virtnet_sq_dma_info dmainfo;
 };
 
 /* Internal representation of a receive virtqueue */
@@ -492,9 +518,11 @@  static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
 enum virtnet_xmit_type {
 	VIRTNET_XMIT_TYPE_SKB,
 	VIRTNET_XMIT_TYPE_XDP,
+	VIRTNET_XMIT_TYPE_DMA,
 };
 
-#define VIRTNET_XMIT_TYPE_MASK (VIRTNET_XMIT_TYPE_SKB | VIRTNET_XMIT_TYPE_XDP)
+#define VIRTNET_XMIT_TYPE_MASK (VIRTNET_XMIT_TYPE_SKB | VIRTNET_XMIT_TYPE_XDP \
+				| VIRTNET_XMIT_TYPE_DMA)
 
 static enum virtnet_xmit_type virtnet_xmit_ptr_strip(void **ptr)
 {
@@ -510,12 +538,180 @@  static void *virtnet_xmit_ptr_mix(void *ptr, enum virtnet_xmit_type type)
 	return (void *)((unsigned long)ptr | type);
 }
 
+static void virtnet_sq_unmap(struct send_queue *sq, void **data)
+{
+	struct virtnet_sq_dma *head, *tail, *p;
+	int i;
+
+	head = *data;
+
+	p = head;
+
+	for (i = 0; i < head->num; ++i) {
+		virtqueue_dma_unmap_page_attrs(sq->vq, p->addr, p->len,
+					       DMA_TO_DEVICE, 0);
+		tail = p;
+		p = llist_entry(llist_next(&p->node), struct virtnet_sq_dma, node);
+	}
+
+	*data = tail->data;
+
+	__llist_add_batch(&head->node, &tail->node,  &sq->dmainfo.free);
+
+	sq->dmainfo.free_num += head->num;
+}
+
+static void *virtnet_dma_chain_update(struct send_queue *sq,
+				      struct virtnet_sq_dma *head,
+				      struct virtnet_sq_dma *tail,
+				      u8 num, void *data)
+{
+	sq->dmainfo.free_num -= num;
+	head->num = num;
+
+	tail->data = data;
+
+	return virtnet_xmit_ptr_mix(head, VIRTNET_XMIT_TYPE_DMA);
+}
+
+static struct virtnet_sq_dma *virtnet_sq_map_sg(struct send_queue *sq, int num, void *data)
+{
+	struct virtnet_sq_dma *head = NULL, *p = NULL;
+	struct scatterlist *sg;
+	dma_addr_t addr;
+	int i, err;
+
+	if (num > sq->dmainfo.free_num)
+		return NULL;
+
+	for (i = 0; i < num; ++i) {
+		sg = &sq->sg[i];
+
+		addr = virtqueue_dma_map_page_attrs(sq->vq, sg_page(sg),
+						    sg->offset,
+						    sg->length, DMA_TO_DEVICE,
+						    0);
+		err = virtqueue_dma_mapping_error(sq->vq, addr);
+		if (err)
+			goto err;
+
+		sg->dma_address = addr;
+
+		p = llist_entry(llist_del_first(&sq->dmainfo.free),
+				struct virtnet_sq_dma, node);
+
+		p->addr = sg->dma_address;
+		p->len = sg->length;
+
+		if (head)
+			__llist_add(&p->node, &head->head);
+		else
+			head = p;
+	}
+
+	return virtnet_dma_chain_update(sq, head, p, num, data);
+
+err:
+	if (i) {
+		virtnet_dma_chain_update(sq, head, p, i, data);
+		virtnet_sq_unmap(sq, (void **)&head);
+	}
+
+	return NULL;
+}
+
 static int virtnet_add_outbuf(struct send_queue *sq, int num, void *data,
 			      enum virtnet_xmit_type type)
 {
-	return virtqueue_add_outbuf(sq->vq, sq->sg, num,
-				    virtnet_xmit_ptr_mix(data, type),
-				    GFP_ATOMIC);
+	int ret;
+
+	data = virtnet_xmit_ptr_mix(data, type);
+
+	if (sq->premapped) {
+		data = virtnet_sq_map_sg(sq, num, data);
+		if (!data)
+			return -ENOMEM;
+	}
+
+	ret = virtqueue_add_outbuf(sq->vq, sq->sg, num, data, GFP_ATOMIC);
+	if (ret && sq->premapped) {
+		virtnet_xmit_ptr_strip(&data);
+		virtnet_sq_unmap(sq, &data);
+	}
+
+	return ret;
+}
+
+static int virtnet_sq_alloc_dma_meta(struct send_queue *sq)
+{
+	struct virtnet_sq_dma *d;
+	int num, i;
+
+	num = virtqueue_get_vring_size(sq->vq);
+
+	/* Currently, the SQ premapped mode is operational only with af-xdp. In
+	 * this mode, af-xdp, the kernel stack, and xdp tx/redirect will share
+	 * the same SQ. Af-xdp independently manages its DMA. The kernel stack
+	 * and xdp tx/redirect utilize this DMA metadata to manage the DMA info.
+	 *
+	 * If the indirect descriptor feature be supported, the volume of DMA
+	 * details we need to maintain becomes quite substantial. Here, we have
+	 * a cap on the amount of DMA info we manage, effectively limiting it to
+	 * twice the size of the ring buffer.
+	 *
+	 * If the kernel stack and xdp tx/redirect attempt to use more
+	 * descriptors than allowed by this double ring buffer size,
+	 * virtnet_add_outbuf() will return an -ENOMEM error. But the af-xdp can
+	 * work continually.
+	 */
+	if (virtio_has_feature(sq->vq->vdev, VIRTIO_RING_F_INDIRECT_DESC))
+		num = num * 2;
+
+	sq->dmainfo.p = kvcalloc(num, sizeof(struct virtnet_sq_dma), GFP_KERNEL);
+	if (!sq->dmainfo.p)
+		return -ENOMEM;
+
+	init_llist_head(&sq->dmainfo.free);
+
+	sq->dmainfo.free_num = num;
+
+	for (i = 0; i < num; ++i) {
+		d = sq->dmainfo.p + sizeof(struct virtnet_sq_dma) * i;
+
+		__llist_add(&d->node, &sq->dmainfo.free);
+	}
+
+	return 0;
+}
+
+static void virtnet_sq_free_dma_meta(struct send_queue *sq)
+{
+	kvfree(sq->dmainfo.p);
+
+	sq->dmainfo.p = NULL;
+	sq->dmainfo.free_num = 0;
+}
+
+/* This function must be called immediately after creating the vq, or after vq
+ * reset, and before adding any buffers to it.
+ */
+static __maybe_unused int virtnet_sq_set_premapped(struct send_queue *sq, bool premapped)
+{
+	if (premapped) {
+		int r;
+
+		r = virtnet_sq_alloc_dma_meta(sq);
+
+		if (r)
+			return r;
+	} else {
+		virtnet_sq_free_dma_meta(sq);
+	}
+
+	BUG_ON(virtqueue_set_dma_premapped(sq->vq, premapped));
+
+	sq->premapped = premapped;
+	return 0;
 }
 
 static void __free_old_xmit(struct send_queue *sq, bool in_napi,
@@ -529,6 +725,7 @@  static void __free_old_xmit(struct send_queue *sq, bool in_napi,
 	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
 		++stats->packets;
 
+retry:
 		switch (virtnet_xmit_ptr_strip(&ptr)) {
 		case VIRTNET_XMIT_TYPE_SKB:
 			skb = ptr;
@@ -545,6 +742,16 @@  static void __free_old_xmit(struct send_queue *sq, bool in_napi,
 			stats->bytes += xdp_get_frame_len(frame);
 			xdp_return_frame(frame);
 			break;
+
+		case VIRTNET_XMIT_TYPE_DMA:
+			virtnet_sq_unmap(sq, &ptr);
+
+			/* For TYPE_DMA, the ptr pointed to the virtnet_sq_dma
+			 * struct. After the virtnet_sq_unmap, the ptr points to
+			 * the skb or xdp pointer | TYPE. So we call the strip
+			 * func again.
+			 */
+			goto retry;
 		}
 	}
 }
@@ -5232,6 +5439,8 @@  static void virtnet_free_queues(struct virtnet_info *vi)
 	for (i = 0; i < vi->max_queue_pairs; i++) {
 		__netif_napi_del(&vi->rq[i].napi);
 		__netif_napi_del(&vi->sq[i].napi);
+
+		virtnet_sq_free_dma_meta(&vi->sq[i]);
 	}
 
 	/* We called __netif_napi_del(),
@@ -5280,6 +5489,13 @@  static void free_receive_page_frags(struct virtnet_info *vi)
 
 static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
 {
+	struct virtnet_info *vi = vq->vdev->priv;
+	struct send_queue *sq;
+	int i = vq2rxq(vq);
+
+	sq = &vi->sq[i];
+
+retry:
 	switch (virtnet_xmit_ptr_strip(&buf)) {
 	case VIRTNET_XMIT_TYPE_SKB:
 		dev_kfree_skb(buf);
@@ -5288,6 +5504,10 @@  static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
 	case VIRTNET_XMIT_TYPE_XDP:
 		xdp_return_frame(buf);
 		break;
+
+	case VIRTNET_XMIT_TYPE_DMA:
+		virtnet_sq_unmap(sq, &buf);
+		goto retry;
 	}
 }