diff mbox series

[net-next,v2,06/13] virtio-net: rq submits premapped per-buffer

Message ID 20241030082453.97310-7-xuanzhuo@linux.alibaba.com (mailing list archive)
State Superseded
Delegated to: Netdev Maintainers
Headers show
Series virtio-net: support AF_XDP zero copy (tx) | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 5 this patch: 5
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers success CCed 11 of 11 maintainers
netdev/build_clang success Errors and warnings before: 3 this patch: 3
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 4 this patch: 4
netdev/checkpatch warning WARNING: line length of 84 exceeds 80 columns
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Xuan Zhuo Oct. 30, 2024, 8:24 a.m. UTC
virtio-net rq submits premapped per-buffer by setting sg page to NULL;

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
 drivers/net/virtio_net.c | 24 +++++++++++++-----------
 1 file changed, 13 insertions(+), 11 deletions(-)

Comments

Jason Wang Nov. 5, 2024, 3:23 a.m. UTC | #1
On Wed, Oct 30, 2024 at 4:25 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
>
> virtio-net rq submits premapped per-buffer by setting sg page to NULL;
>
> Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> ---
>  drivers/net/virtio_net.c | 24 +++++++++++++-----------
>  1 file changed, 13 insertions(+), 11 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 792e9eadbfc3..09757fa408bd 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -542,6 +542,12 @@ static struct sk_buff *ptr_to_skb(void *ptr)
>         return (struct sk_buff *)((unsigned long)ptr & ~VIRTIO_ORPHAN_FLAG);
>  }
>
> +static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len)
> +{
> +       sg->dma_address = addr;
> +       sg->length = len;

This may work but I think it's better to reuse existing dma sg helpers like:

sg_dma_address(sg) = addr;
sg_dma_length(sg) = len;

And we probably need to fix the virtio core which only uses
sg_dma_address() but not sg_dma_length().

This helps us to avoid future issues when CONFIG_NEED_SG_DMA_LENGTH is set.

Others look good.

Thanks

> +}
> +
>  static void __free_old_xmit(struct send_queue *sq, struct netdev_queue *txq,
>                             bool in_napi, struct virtnet_sq_free_stats *stats)
>  {
> @@ -915,8 +921,7 @@ static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
>         addr = dma->addr - sizeof(*dma) + offset;
>
>         sg_init_table(rq->sg, 1);
> -       rq->sg[0].dma_address = addr;
> -       rq->sg[0].length = len;
> +       sg_fill_dma(rq->sg, addr, len);
>  }
>
>  static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
> @@ -1068,12 +1073,6 @@ static void check_sq_full_and_disable(struct virtnet_info *vi,
>         }
>  }
>
> -static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len)
> -{
> -       sg->dma_address = addr;
> -       sg->length = len;
> -}
> -
>  static struct xdp_buff *buf_to_xdp(struct virtnet_info *vi,
>                                    struct receive_queue *rq, void *buf, u32 len)
>  {
> @@ -1354,7 +1353,8 @@ static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue
>                 sg_init_table(rq->sg, 1);
>                 sg_fill_dma(rq->sg, addr, len);
>
> -               err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, xsk_buffs[i], gfp);
> +               err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1, xsk_buffs[i],
> +                                                   NULL, true, gfp);
>                 if (err)
>                         goto err;
>         }
> @@ -2431,7 +2431,8 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
>
>         virtnet_rq_init_one_sg(rq, buf, vi->hdr_len + GOOD_PACKET_LEN);
>
> -       err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
> +       err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1, buf, ctx,
> +                                           rq->do_dma, gfp);
>         if (err < 0) {
>                 if (rq->do_dma)
>                         virtnet_rq_unmap(rq, buf, 0);
> @@ -2546,7 +2547,8 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
>         virtnet_rq_init_one_sg(rq, buf, len);
>
>         ctx = mergeable_len_to_ctx(len + room, headroom);
> -       err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
> +       err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1, buf, ctx,
> +                                           rq->do_dma, gfp);
>         if (err < 0) {
>                 if (rq->do_dma)
>                         virtnet_rq_unmap(rq, buf, 0);
> --
> 2.32.0.3.g01195cf9f
>
Xuan Zhuo Nov. 5, 2024, 7:09 a.m. UTC | #2
On Tue, 5 Nov 2024 11:23:50 +0800, Jason Wang <jasowang@redhat.com> wrote:
> On Wed, Oct 30, 2024 at 4:25 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> >
> > virtio-net rq submits premapped per-buffer by setting sg page to NULL;
> >
> > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > ---
> >  drivers/net/virtio_net.c | 24 +++++++++++++-----------
> >  1 file changed, 13 insertions(+), 11 deletions(-)
> >
> > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > index 792e9eadbfc3..09757fa408bd 100644
> > --- a/drivers/net/virtio_net.c
> > +++ b/drivers/net/virtio_net.c
> > @@ -542,6 +542,12 @@ static struct sk_buff *ptr_to_skb(void *ptr)
> >         return (struct sk_buff *)((unsigned long)ptr & ~VIRTIO_ORPHAN_FLAG);
> >  }
> >
> > +static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len)
> > +{
> > +       sg->dma_address = addr;
> > +       sg->length = len;
>
> This may work but I think it's better to reuse existing dma sg helpers like:
>
> sg_dma_address(sg) = addr;
> sg_dma_length(sg) = len;
>
> And we probably need to fix the virtio core which only uses
> sg_dma_address() but not sg_dma_length().
>
> This helps us to avoid future issues when CONFIG_NEED_SG_DMA_LENGTH is set.


I don't think so.

For no-premapped mode, we pass the sg as no-dma sg to virtio core,
so the virtio core uses the sg->length directly.
If virtio core do dma map for sg, we do not use the dma_mag_sg_attrs(),
so we must use sg->length directly.

In this case, for the driver, we can not use sg_dma_length(),
if CONFIG_NEED_SG_DMA_LENGTH is set, sg_dma_length() will set sg->dma_length,
but virtio core use sg->length.

For sg->dma_address, it is ok for me to use sg_dma_address or not.
But for consistency to sg->length, I use the sg->dma_address directly.

I noticed this is special, so I put them into an independent function.

Thanks.

>
> Others look good.
>
> Thanks
>
> > +}
> > +
> >  static void __free_old_xmit(struct send_queue *sq, struct netdev_queue *txq,
> >                             bool in_napi, struct virtnet_sq_free_stats *stats)
> >  {
> > @@ -915,8 +921,7 @@ static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
> >         addr = dma->addr - sizeof(*dma) + offset;
> >
> >         sg_init_table(rq->sg, 1);
> > -       rq->sg[0].dma_address = addr;
> > -       rq->sg[0].length = len;
> > +       sg_fill_dma(rq->sg, addr, len);
> >  }
> >
> >  static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
> > @@ -1068,12 +1073,6 @@ static void check_sq_full_and_disable(struct virtnet_info *vi,
> >         }
> >  }
> >
> > -static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len)
> > -{
> > -       sg->dma_address = addr;
> > -       sg->length = len;
> > -}
> > -
> >  static struct xdp_buff *buf_to_xdp(struct virtnet_info *vi,
> >                                    struct receive_queue *rq, void *buf, u32 len)
> >  {
> > @@ -1354,7 +1353,8 @@ static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue
> >                 sg_init_table(rq->sg, 1);
> >                 sg_fill_dma(rq->sg, addr, len);
> >
> > -               err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, xsk_buffs[i], gfp);
> > +               err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1, xsk_buffs[i],
> > +                                                   NULL, true, gfp);
> >                 if (err)
> >                         goto err;
> >         }
> > @@ -2431,7 +2431,8 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
> >
> >         virtnet_rq_init_one_sg(rq, buf, vi->hdr_len + GOOD_PACKET_LEN);
> >
> > -       err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
> > +       err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1, buf, ctx,
> > +                                           rq->do_dma, gfp);
> >         if (err < 0) {
> >                 if (rq->do_dma)
> >                         virtnet_rq_unmap(rq, buf, 0);
> > @@ -2546,7 +2547,8 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
> >         virtnet_rq_init_one_sg(rq, buf, len);
> >
> >         ctx = mergeable_len_to_ctx(len + room, headroom);
> > -       err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
> > +       err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1, buf, ctx,
> > +                                           rq->do_dma, gfp);
> >         if (err < 0) {
> >                 if (rq->do_dma)
> >                         virtnet_rq_unmap(rq, buf, 0);
> > --
> > 2.32.0.3.g01195cf9f
> >
>
Jason Wang Nov. 6, 2024, 1:56 a.m. UTC | #3
On Tue, Nov 5, 2024 at 3:23 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
>
> On Tue, 5 Nov 2024 11:23:50 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > On Wed, Oct 30, 2024 at 4:25 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > >
> > > virtio-net rq submits premapped per-buffer by setting sg page to NULL;
> > >
> > > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > > ---
> > >  drivers/net/virtio_net.c | 24 +++++++++++++-----------
> > >  1 file changed, 13 insertions(+), 11 deletions(-)
> > >
> > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > index 792e9eadbfc3..09757fa408bd 100644
> > > --- a/drivers/net/virtio_net.c
> > > +++ b/drivers/net/virtio_net.c
> > > @@ -542,6 +542,12 @@ static struct sk_buff *ptr_to_skb(void *ptr)
> > >         return (struct sk_buff *)((unsigned long)ptr & ~VIRTIO_ORPHAN_FLAG);
> > >  }
> > >
> > > +static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len)
> > > +{
> > > +       sg->dma_address = addr;
> > > +       sg->length = len;
> >
> > This may work but I think it's better to reuse existing dma sg helpers like:
> >
> > sg_dma_address(sg) = addr;
> > sg_dma_length(sg) = len;
> >
> > And we probably need to fix the virtio core which only uses
> > sg_dma_address() but not sg_dma_length().
> >
> > This helps us to avoid future issues when CONFIG_NEED_SG_DMA_LENGTH is set.
>
>
> I don't think so.
>
> For no-premapped mode, we pass the sg as no-dma sg to virtio core,
> so the virtio core uses the sg->length directly.

This is fine.

> If virtio core do dma map for sg, we do not use the dma_mag_sg_attrs(),
> so we must use sg->length directly.

I meant it's a hack. It may work now but will be a bug in the future.

For example, I'm playing a prototype to do pre mapping for virtio-blk,
the idea is to move the expensive DMA mappings in the case of swiotlb
etc to be done outside the pre virtqueue lock. In that case, the
driver may want to use dma_map_sg() instead of dma_map_page().

I'd suppose we will finally go with the way where DMA mappings needs
to be handled by the driver, and dma_map_sg() is faster than per sg
dma_map_page() anyhow.

>
> In this case, for the driver, we can not use sg_dma_length(),
> if CONFIG_NEED_SG_DMA_LENGTH is set, sg_dma_length() will set sg->dma_length,
> but virtio core use sg->length.

Well, we just need a minor tweak to get the length from
vring_map_one_sg(), then everything should be fine?

if (sg_is_premapped) {
      *addr = sg_dma_address(sg);
      *len = sg_dma_len(sg);
}

>
> For sg->dma_address, it is ok for me to use sg_dma_address or not.
> But for consistency to sg->length, I use the sg->dma_address directly.
>
> I noticed this is special, so I put them into an independent function.
>
> Thanks.

Actually, the code like sg_fill_dma() calls for a virtqueue dma
mapping helper, I think we've agreed that core needs to hide DMA
details from the driver.  That is something like
virtqueue_dma_map_sg() etc.

Thanks

>
> >
> > Others look good.
> >
> > Thanks
> >
> > > +}
> > > +
> > >  static void __free_old_xmit(struct send_queue *sq, struct netdev_queue *txq,
> > >                             bool in_napi, struct virtnet_sq_free_stats *stats)
> > >  {
> > > @@ -915,8 +921,7 @@ static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
> > >         addr = dma->addr - sizeof(*dma) + offset;
> > >
> > >         sg_init_table(rq->sg, 1);
> > > -       rq->sg[0].dma_address = addr;
> > > -       rq->sg[0].length = len;
> > > +       sg_fill_dma(rq->sg, addr, len);
> > >  }
> > >
> > >  static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
> > > @@ -1068,12 +1073,6 @@ static void check_sq_full_and_disable(struct virtnet_info *vi,
> > >         }
> > >  }
> > >
> > > -static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len)
> > > -{
> > > -       sg->dma_address = addr;
> > > -       sg->length = len;
> > > -}
> > > -
> > >  static struct xdp_buff *buf_to_xdp(struct virtnet_info *vi,
> > >                                    struct receive_queue *rq, void *buf, u32 len)
> > >  {
> > > @@ -1354,7 +1353,8 @@ static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue
> > >                 sg_init_table(rq->sg, 1);
> > >                 sg_fill_dma(rq->sg, addr, len);
> > >
> > > -               err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, xsk_buffs[i], gfp);
> > > +               err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1, xsk_buffs[i],
> > > +                                                   NULL, true, gfp);
> > >                 if (err)
> > >                         goto err;
> > >         }
> > > @@ -2431,7 +2431,8 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
> > >
> > >         virtnet_rq_init_one_sg(rq, buf, vi->hdr_len + GOOD_PACKET_LEN);
> > >
> > > -       err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
> > > +       err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1, buf, ctx,
> > > +                                           rq->do_dma, gfp);
> > >         if (err < 0) {
> > >                 if (rq->do_dma)
> > >                         virtnet_rq_unmap(rq, buf, 0);
> > > @@ -2546,7 +2547,8 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
> > >         virtnet_rq_init_one_sg(rq, buf, len);
> > >
> > >         ctx = mergeable_len_to_ctx(len + room, headroom);
> > > -       err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
> > > +       err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1, buf, ctx,
> > > +                                           rq->do_dma, gfp);
> > >         if (err < 0) {
> > >                 if (rq->do_dma)
> > >                         virtnet_rq_unmap(rq, buf, 0);
> > > --
> > > 2.32.0.3.g01195cf9f
> > >
> >
>
Xuan Zhuo Nov. 6, 2024, 5:55 a.m. UTC | #4
On Wed, 6 Nov 2024 09:56:55 +0800, Jason Wang <jasowang@redhat.com> wrote:
> On Tue, Nov 5, 2024 at 3:23 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> >
> > On Tue, 5 Nov 2024 11:23:50 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > > On Wed, Oct 30, 2024 at 4:25 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > > >
> > > > virtio-net rq submits premapped per-buffer by setting sg page to NULL;
> > > >
> > > > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > > > ---
> > > >  drivers/net/virtio_net.c | 24 +++++++++++++-----------
> > > >  1 file changed, 13 insertions(+), 11 deletions(-)
> > > >
> > > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > > index 792e9eadbfc3..09757fa408bd 100644
> > > > --- a/drivers/net/virtio_net.c
> > > > +++ b/drivers/net/virtio_net.c
> > > > @@ -542,6 +542,12 @@ static struct sk_buff *ptr_to_skb(void *ptr)
> > > >         return (struct sk_buff *)((unsigned long)ptr & ~VIRTIO_ORPHAN_FLAG);
> > > >  }
> > > >
> > > > +static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len)
> > > > +{
> > > > +       sg->dma_address = addr;
> > > > +       sg->length = len;
> > >
> > > This may work but I think it's better to reuse existing dma sg helpers like:
> > >
> > > sg_dma_address(sg) = addr;
> > > sg_dma_length(sg) = len;
> > >
> > > And we probably need to fix the virtio core which only uses
> > > sg_dma_address() but not sg_dma_length().
> > >
> > > This helps us to avoid future issues when CONFIG_NEED_SG_DMA_LENGTH is set.
> >
> >
> > I don't think so.
> >
> > For no-premapped mode, we pass the sg as no-dma sg to virtio core,
> > so the virtio core uses the sg->length directly.
>
> This is fine.
>
> > If virtio core do dma map for sg, we do not use the dma_mag_sg_attrs(),
> > so we must use sg->length directly.
>
> I meant it's a hack. It may work now but will be a bug in the future.
>
> For example, I'm playing a prototype to do pre mapping for virtio-blk,
> the idea is to move the expensive DMA mappings in the case of swiotlb
> etc to be done outside the pre virtqueue lock. In that case, the
> driver may want to use dma_map_sg() instead of dma_map_page().
>
> I'd suppose we will finally go with the way where DMA mappings needs
> to be handled by the driver, and dma_map_sg() is faster than per sg
> dma_map_page() anyhow.
>
> >
> > In this case, for the driver, we can not use sg_dma_length(),
> > if CONFIG_NEED_SG_DMA_LENGTH is set, sg_dma_length() will set sg->dma_length,
> > but virtio core use sg->length.
>
> Well, we just need a minor tweak to get the length from
> vring_map_one_sg(), then everything should be fine?
>
> if (sg_is_premapped) {
>       *addr = sg_dma_address(sg);
>       *len = sg_dma_len(sg);
> }

For now, let us start from:

 if (sg_is_premapped) {
       *addr = sg_dma_address(sg);
       sg->length = sg_dma_len(sg);
 }

Then virtio core needs to be refactor to use dma_map_sg() in future.

Thanks.


>
> >
> > For sg->dma_address, it is ok for me to use sg_dma_address or not.
> > But for consistency to sg->length, I use the sg->dma_address directly.
> >
> > I noticed this is special, so I put them into an independent function.
> >
> > Thanks.
>
> Actually, the code like sg_fill_dma() calls for a virtqueue dma
> mapping helper, I think we've agreed that core needs to hide DMA
> details from the driver.  That is something like
> virtqueue_dma_map_sg() etc.
>
> Thanks
>
> >
> > >
> > > Others look good.
> > >
> > > Thanks
> > >
> > > > +}
> > > > +
> > > >  static void __free_old_xmit(struct send_queue *sq, struct netdev_queue *txq,
> > > >                             bool in_napi, struct virtnet_sq_free_stats *stats)
> > > >  {
> > > > @@ -915,8 +921,7 @@ static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
> > > >         addr = dma->addr - sizeof(*dma) + offset;
> > > >
> > > >         sg_init_table(rq->sg, 1);
> > > > -       rq->sg[0].dma_address = addr;
> > > > -       rq->sg[0].length = len;
> > > > +       sg_fill_dma(rq->sg, addr, len);
> > > >  }
> > > >
> > > >  static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
> > > > @@ -1068,12 +1073,6 @@ static void check_sq_full_and_disable(struct virtnet_info *vi,
> > > >         }
> > > >  }
> > > >
> > > > -static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len)
> > > > -{
> > > > -       sg->dma_address = addr;
> > > > -       sg->length = len;
> > > > -}
> > > > -
> > > >  static struct xdp_buff *buf_to_xdp(struct virtnet_info *vi,
> > > >                                    struct receive_queue *rq, void *buf, u32 len)
> > > >  {
> > > > @@ -1354,7 +1353,8 @@ static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue
> > > >                 sg_init_table(rq->sg, 1);
> > > >                 sg_fill_dma(rq->sg, addr, len);
> > > >
> > > > -               err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, xsk_buffs[i], gfp);
> > > > +               err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1, xsk_buffs[i],
> > > > +                                                   NULL, true, gfp);
> > > >                 if (err)
> > > >                         goto err;
> > > >         }
> > > > @@ -2431,7 +2431,8 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
> > > >
> > > >         virtnet_rq_init_one_sg(rq, buf, vi->hdr_len + GOOD_PACKET_LEN);
> > > >
> > > > -       err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
> > > > +       err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1, buf, ctx,
> > > > +                                           rq->do_dma, gfp);
> > > >         if (err < 0) {
> > > >                 if (rq->do_dma)
> > > >                         virtnet_rq_unmap(rq, buf, 0);
> > > > @@ -2546,7 +2547,8 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
> > > >         virtnet_rq_init_one_sg(rq, buf, len);
> > > >
> > > >         ctx = mergeable_len_to_ctx(len + room, headroom);
> > > > -       err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
> > > > +       err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1, buf, ctx,
> > > > +                                           rq->do_dma, gfp);
> > > >         if (err < 0) {
> > > >                 if (rq->do_dma)
> > > >                         virtnet_rq_unmap(rq, buf, 0);
> > > > --
> > > > 2.32.0.3.g01195cf9f
> > > >
> > >
> >
>
diff mbox series

Patch

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 792e9eadbfc3..09757fa408bd 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -542,6 +542,12 @@  static struct sk_buff *ptr_to_skb(void *ptr)
 	return (struct sk_buff *)((unsigned long)ptr & ~VIRTIO_ORPHAN_FLAG);
 }
 
+static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len)
+{
+	sg->dma_address = addr;
+	sg->length = len;
+}
+
 static void __free_old_xmit(struct send_queue *sq, struct netdev_queue *txq,
 			    bool in_napi, struct virtnet_sq_free_stats *stats)
 {
@@ -915,8 +921,7 @@  static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
 	addr = dma->addr - sizeof(*dma) + offset;
 
 	sg_init_table(rq->sg, 1);
-	rq->sg[0].dma_address = addr;
-	rq->sg[0].length = len;
+	sg_fill_dma(rq->sg, addr, len);
 }
 
 static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
@@ -1068,12 +1073,6 @@  static void check_sq_full_and_disable(struct virtnet_info *vi,
 	}
 }
 
-static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len)
-{
-	sg->dma_address = addr;
-	sg->length = len;
-}
-
 static struct xdp_buff *buf_to_xdp(struct virtnet_info *vi,
 				   struct receive_queue *rq, void *buf, u32 len)
 {
@@ -1354,7 +1353,8 @@  static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue
 		sg_init_table(rq->sg, 1);
 		sg_fill_dma(rq->sg, addr, len);
 
-		err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, xsk_buffs[i], gfp);
+		err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1, xsk_buffs[i],
+						    NULL, true, gfp);
 		if (err)
 			goto err;
 	}
@@ -2431,7 +2431,8 @@  static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
 
 	virtnet_rq_init_one_sg(rq, buf, vi->hdr_len + GOOD_PACKET_LEN);
 
-	err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
+	err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1, buf, ctx,
+					    rq->do_dma, gfp);
 	if (err < 0) {
 		if (rq->do_dma)
 			virtnet_rq_unmap(rq, buf, 0);
@@ -2546,7 +2547,8 @@  static int add_recvbuf_mergeable(struct virtnet_info *vi,
 	virtnet_rq_init_one_sg(rq, buf, len);
 
 	ctx = mergeable_len_to_ctx(len + room, headroom);
-	err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
+	err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1, buf, ctx,
+					    rq->do_dma, gfp);
 	if (err < 0) {
 		if (rq->do_dma)
 			virtnet_rq_unmap(rq, buf, 0);