diff mbox series

[net-next,v6,08/10] virtio_net: xsk: rx: support recv small mode

Message ID 20240618075643.24867-9-xuanzhuo@linux.alibaba.com (mailing list archive)
State Changes Requested
Delegated to: Netdev Maintainers
Headers show
Series virtio-net: support AF_XDP zero copy | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 845 this patch: 845
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers success CCed 14 of 14 maintainers
netdev/build_clang success Errors and warnings before: 849 this patch: 849
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 849 this patch: 849
netdev/checkpatch warning WARNING: line length of 83 exceeds 80 columns WARNING: line length of 90 exceeds 80 columns WARNING: line length of 96 exceeds 80 columns WARNING: line length of 97 exceeds 80 columns
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Xuan Zhuo June 18, 2024, 7:56 a.m. UTC
In the process:
1. We may need to copy data to create skb for XDP_PASS.
2. We may need to call xsk_buff_free() to release the buffer.
3. The handle for xdp_buff is difference from the buffer.

If we pushed this logic into existing receive handle(merge and small),
we would have to maintain code scattered inside merge and small (and big).
So I think it is a good choice for us to put the xsk code into an
independent function.

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
 drivers/net/virtio_net.c | 135 ++++++++++++++++++++++++++++++++++++++-
 1 file changed, 133 insertions(+), 2 deletions(-)

Comments

Jason Wang June 28, 2024, 2:19 a.m. UTC | #1
On Tue, Jun 18, 2024 at 3:57 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
>
> In the process:
> 1. We may need to copy data to create skb for XDP_PASS.
> 2. We may need to call xsk_buff_free() to release the buffer.
> 3. The handle for xdp_buff is difference from the buffer.
>
> If we pushed this logic into existing receive handle(merge and small),
> we would have to maintain code scattered inside merge and small (and big).
> So I think it is a good choice for us to put the xsk code into an
> independent function.

I think it's better to try to reuse the existing functions.

More below:

>
> Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> ---
>  drivers/net/virtio_net.c | 135 ++++++++++++++++++++++++++++++++++++++-
>  1 file changed, 133 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 2ac5668a94ce..06608d696e2e 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -500,6 +500,10 @@ struct virtio_net_common_hdr {
>  };
>
>  static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
> +static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
> +                              struct net_device *dev,
> +                              unsigned int *xdp_xmit,
> +                              struct virtnet_rq_stats *stats);
>
>  static bool is_xdp_frame(void *ptr)
>  {
> @@ -1040,6 +1044,120 @@ static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len)
>         sg->length = len;
>  }
>
> +static struct xdp_buff *buf_to_xdp(struct virtnet_info *vi,
> +                                  struct receive_queue *rq, void *buf, u32 len)
> +{
> +       struct xdp_buff *xdp;
> +       u32 bufsize;
> +
> +       xdp = (struct xdp_buff *)buf;
> +
> +       bufsize = xsk_pool_get_rx_frame_size(rq->xsk.pool) + vi->hdr_len;
> +
> +       if (unlikely(len > bufsize)) {
> +               pr_debug("%s: rx error: len %u exceeds truesize %u\n",
> +                        vi->dev->name, len, bufsize);
> +               DEV_STATS_INC(vi->dev, rx_length_errors);
> +               xsk_buff_free(xdp);
> +               return NULL;
> +       }
> +
> +       xsk_buff_set_size(xdp, len);
> +       xsk_buff_dma_sync_for_cpu(xdp);
> +
> +       return xdp;
> +}
> +
> +static struct sk_buff *xdp_construct_skb(struct receive_queue *rq,
> +                                        struct xdp_buff *xdp)
> +{

So we have a similar caller which is receive_small_build_skb(). Any
chance to reuse that?

> +       unsigned int metasize = xdp->data - xdp->data_meta;
> +       struct sk_buff *skb;
> +       unsigned int size;
> +
> +       size = xdp->data_end - xdp->data_hard_start;
> +       skb = napi_alloc_skb(&rq->napi, size);
> +       if (unlikely(!skb)) {
> +               xsk_buff_free(xdp);
> +               return NULL;
> +       }
> +
> +       skb_reserve(skb, xdp->data_meta - xdp->data_hard_start);
> +
> +       size = xdp->data_end - xdp->data_meta;
> +       memcpy(__skb_put(skb, size), xdp->data_meta, size);
> +
> +       if (metasize) {
> +               __skb_pull(skb, metasize);
> +               skb_metadata_set(skb, metasize);
> +       }
> +
> +       xsk_buff_free(xdp);
> +
> +       return skb;
> +}
> +
> +static struct sk_buff *virtnet_receive_xsk_small(struct net_device *dev, struct virtnet_info *vi,
> +                                                struct receive_queue *rq, struct xdp_buff *xdp,
> +                                                unsigned int *xdp_xmit,
> +                                                struct virtnet_rq_stats *stats)
> +{
> +       struct bpf_prog *prog;
> +       u32 ret;
> +
> +       ret = XDP_PASS;
> +       rcu_read_lock();
> +       prog = rcu_dereference(rq->xdp_prog);
> +       if (prog)
> +               ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit, stats);
> +       rcu_read_unlock();
> +
> +       switch (ret) {
> +       case XDP_PASS:
> +               return xdp_construct_skb(rq, xdp);
> +
> +       case XDP_TX:
> +       case XDP_REDIRECT:
> +               return NULL;
> +
> +       default:
> +               /* drop packet */
> +               xsk_buff_free(xdp);
> +               u64_stats_inc(&stats->drops);
> +               return NULL;
> +       }
> +}
> +
> +static struct sk_buff *virtnet_receive_xsk_buf(struct virtnet_info *vi, struct receive_queue *rq,
> +                                              void *buf, u32 len,
> +                                              unsigned int *xdp_xmit,
> +                                              struct virtnet_rq_stats *stats)
> +{
> +       struct net_device *dev = vi->dev;
> +       struct sk_buff *skb = NULL;
> +       struct xdp_buff *xdp;
> +
> +       len -= vi->hdr_len;
> +
> +       u64_stats_add(&stats->bytes, len);
> +
> +       xdp = buf_to_xdp(vi, rq, buf, len);
> +       if (!xdp)
> +               return NULL;
> +
> +       if (unlikely(len < ETH_HLEN)) {
> +               pr_debug("%s: short packet %i\n", dev->name, len);
> +               DEV_STATS_INC(dev, rx_length_errors);
> +               xsk_buff_free(xdp);
> +               return NULL;
> +       }
> +
> +       if (!vi->mergeable_rx_bufs)
> +               skb = virtnet_receive_xsk_small(dev, vi, rq, xdp, xdp_xmit, stats);
> +
> +       return skb;
> +}
> +
>  static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue *rq,
>                                    struct xsk_buff_pool *pool, gfp_t gfp)
>  {
> @@ -2363,9 +2481,22 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
>         void *buf;
>         int i;
>
> -       if (!vi->big_packets || vi->mergeable_rx_bufs) {
> -               void *ctx;
> +       if (rq->xsk.pool) {
> +               struct sk_buff *skb;
> +
> +               while (packets < budget) {
> +                       buf = virtqueue_get_buf(rq->vq, &len);
> +                       if (!buf)
> +                               break;
>
> +                       skb = virtnet_receive_xsk_buf(vi, rq, buf, len, xdp_xmit, &stats);
> +                       if (skb)
> +                               virtnet_receive_done(vi, rq, skb);
> +
> +                       packets++;
> +               }

If reusing turns out to be hard, I'd rather add new paths in receive_small().

> +       } else if (!vi->big_packets || vi->mergeable_rx_bufs) {
> +               void *ctx;
>                 while (packets < budget &&
>                        (buf = virtnet_rq_get_buf(rq, &len, &ctx))) {
>                         receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
> --
> 2.32.0.3.g01195cf9f
>

Thanks
Xuan Zhuo June 28, 2024, 5:44 a.m. UTC | #2
On Fri, 28 Jun 2024 10:19:41 +0800, Jason Wang <jasowang@redhat.com> wrote:
> On Tue, Jun 18, 2024 at 3:57 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> >
> > In the process:
> > 1. We may need to copy data to create skb for XDP_PASS.
> > 2. We may need to call xsk_buff_free() to release the buffer.
> > 3. The handle for xdp_buff is difference from the buffer.
> >
> > If we pushed this logic into existing receive handle(merge and small),
> > we would have to maintain code scattered inside merge and small (and big).
> > So I think it is a good choice for us to put the xsk code into an
> > independent function.
>
> I think it's better to try to reuse the existing functions.
>
> More below:
>
> >
> > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > ---
> >  drivers/net/virtio_net.c | 135 ++++++++++++++++++++++++++++++++++++++-
> >  1 file changed, 133 insertions(+), 2 deletions(-)
> >
> > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > index 2ac5668a94ce..06608d696e2e 100644
> > --- a/drivers/net/virtio_net.c
> > +++ b/drivers/net/virtio_net.c
> > @@ -500,6 +500,10 @@ struct virtio_net_common_hdr {
> >  };
> >
> >  static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
> > +static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
> > +                              struct net_device *dev,
> > +                              unsigned int *xdp_xmit,
> > +                              struct virtnet_rq_stats *stats);
> >
> >  static bool is_xdp_frame(void *ptr)
> >  {
> > @@ -1040,6 +1044,120 @@ static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len)
> >         sg->length = len;
> >  }
> >
> > +static struct xdp_buff *buf_to_xdp(struct virtnet_info *vi,
> > +                                  struct receive_queue *rq, void *buf, u32 len)
> > +{
> > +       struct xdp_buff *xdp;
> > +       u32 bufsize;
> > +
> > +       xdp = (struct xdp_buff *)buf;
> > +
> > +       bufsize = xsk_pool_get_rx_frame_size(rq->xsk.pool) + vi->hdr_len;
> > +
> > +       if (unlikely(len > bufsize)) {
> > +               pr_debug("%s: rx error: len %u exceeds truesize %u\n",
> > +                        vi->dev->name, len, bufsize);
> > +               DEV_STATS_INC(vi->dev, rx_length_errors);
> > +               xsk_buff_free(xdp);
> > +               return NULL;
> > +       }
> > +
> > +       xsk_buff_set_size(xdp, len);
> > +       xsk_buff_dma_sync_for_cpu(xdp);
> > +
> > +       return xdp;
> > +}
> > +
> > +static struct sk_buff *xdp_construct_skb(struct receive_queue *rq,
> > +                                        struct xdp_buff *xdp)
> > +{
>
> So we have a similar caller which is receive_small_build_skb(). Any
> chance to reuse that?

receive_small_build_skb works with build_skb.

Here we need to copy the packet from the xsk buffer to the skb buffer.
So I do not think we can reuse it.


>
> > +       unsigned int metasize = xdp->data - xdp->data_meta;
> > +       struct sk_buff *skb;
> > +       unsigned int size;
> > +
> > +       size = xdp->data_end - xdp->data_hard_start;
> > +       skb = napi_alloc_skb(&rq->napi, size);
> > +       if (unlikely(!skb)) {
> > +               xsk_buff_free(xdp);
> > +               return NULL;
> > +       }
> > +
> > +       skb_reserve(skb, xdp->data_meta - xdp->data_hard_start);
> > +
> > +       size = xdp->data_end - xdp->data_meta;
> > +       memcpy(__skb_put(skb, size), xdp->data_meta, size);
> > +
> > +       if (metasize) {
> > +               __skb_pull(skb, metasize);
> > +               skb_metadata_set(skb, metasize);
> > +       }
> > +
> > +       xsk_buff_free(xdp);
> > +
> > +       return skb;
> > +}
> > +
> > +static struct sk_buff *virtnet_receive_xsk_small(struct net_device *dev, struct virtnet_info *vi,
> > +                                                struct receive_queue *rq, struct xdp_buff *xdp,
> > +                                                unsigned int *xdp_xmit,
> > +                                                struct virtnet_rq_stats *stats)
> > +{
> > +       struct bpf_prog *prog;
> > +       u32 ret;
> > +
> > +       ret = XDP_PASS;
> > +       rcu_read_lock();
> > +       prog = rcu_dereference(rq->xdp_prog);
> > +       if (prog)
> > +               ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit, stats);
> > +       rcu_read_unlock();
> > +
> > +       switch (ret) {
> > +       case XDP_PASS:
> > +               return xdp_construct_skb(rq, xdp);
> > +
> > +       case XDP_TX:
> > +       case XDP_REDIRECT:
> > +               return NULL;
> > +
> > +       default:
> > +               /* drop packet */
> > +               xsk_buff_free(xdp);
> > +               u64_stats_inc(&stats->drops);
> > +               return NULL;
> > +       }
> > +}
> > +
> > +static struct sk_buff *virtnet_receive_xsk_buf(struct virtnet_info *vi, struct receive_queue *rq,
> > +                                              void *buf, u32 len,
> > +                                              unsigned int *xdp_xmit,
> > +                                              struct virtnet_rq_stats *stats)
> > +{
> > +       struct net_device *dev = vi->dev;
> > +       struct sk_buff *skb = NULL;
> > +       struct xdp_buff *xdp;
> > +
> > +       len -= vi->hdr_len;
> > +
> > +       u64_stats_add(&stats->bytes, len);
> > +
> > +       xdp = buf_to_xdp(vi, rq, buf, len);
> > +       if (!xdp)
> > +               return NULL;
> > +
> > +       if (unlikely(len < ETH_HLEN)) {
> > +               pr_debug("%s: short packet %i\n", dev->name, len);
> > +               DEV_STATS_INC(dev, rx_length_errors);
> > +               xsk_buff_free(xdp);
> > +               return NULL;
> > +       }
> > +
> > +       if (!vi->mergeable_rx_bufs)
> > +               skb = virtnet_receive_xsk_small(dev, vi, rq, xdp, xdp_xmit, stats);
> > +
> > +       return skb;
> > +}
> > +
> >  static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue *rq,
> >                                    struct xsk_buff_pool *pool, gfp_t gfp)
> >  {
> > @@ -2363,9 +2481,22 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
> >         void *buf;
> >         int i;
> >
> > -       if (!vi->big_packets || vi->mergeable_rx_bufs) {
> > -               void *ctx;
> > +       if (rq->xsk.pool) {
> > +               struct sk_buff *skb;
> > +
> > +               while (packets < budget) {
> > +                       buf = virtqueue_get_buf(rq->vq, &len);
> > +                       if (!buf)
> > +                               break;
> >
> > +                       skb = virtnet_receive_xsk_buf(vi, rq, buf, len, xdp_xmit, &stats);
> > +                       if (skb)
> > +                               virtnet_receive_done(vi, rq, skb);
> > +
> > +                       packets++;
> > +               }
>
> If reusing turns out to be hard, I'd rather add new paths in receive_small().

The exist function is called after virtnet_rq_get_buf(), that will do dma unmap.
But for xsk, the dma unmap is not need. So xsk receive handle should use
virtqueue_get_buf directly.

Thanks.

>
> > +       } else if (!vi->big_packets || vi->mergeable_rx_bufs) {
> > +               void *ctx;
> >                 while (packets < budget &&
> >                        (buf = virtnet_rq_get_buf(rq, &len, &ctx))) {
> >                         receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
> > --
> > 2.32.0.3.g01195cf9f
> >
>
> Thanks
>
Jason Wang July 1, 2024, 3:20 a.m. UTC | #3
On Fri, Jun 28, 2024 at 1:48 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
>
> On Fri, 28 Jun 2024 10:19:41 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > On Tue, Jun 18, 2024 at 3:57 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > >
> > > In the process:
> > > 1. We may need to copy data to create skb for XDP_PASS.
> > > 2. We may need to call xsk_buff_free() to release the buffer.
> > > 3. The handle for xdp_buff is difference from the buffer.
> > >
> > > If we pushed this logic into existing receive handle(merge and small),
> > > we would have to maintain code scattered inside merge and small (and big).
> > > So I think it is a good choice for us to put the xsk code into an
> > > independent function.
> >
> > I think it's better to try to reuse the existing functions.
> >
> > More below:
> >
> > >
> > > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > > ---
> > >  drivers/net/virtio_net.c | 135 ++++++++++++++++++++++++++++++++++++++-
> > >  1 file changed, 133 insertions(+), 2 deletions(-)
> > >
> > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > index 2ac5668a94ce..06608d696e2e 100644
> > > --- a/drivers/net/virtio_net.c
> > > +++ b/drivers/net/virtio_net.c
> > > @@ -500,6 +500,10 @@ struct virtio_net_common_hdr {
> > >  };
> > >
> > >  static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
> > > +static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
> > > +                              struct net_device *dev,
> > > +                              unsigned int *xdp_xmit,
> > > +                              struct virtnet_rq_stats *stats);
> > >
> > >  static bool is_xdp_frame(void *ptr)
> > >  {
> > > @@ -1040,6 +1044,120 @@ static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len)
> > >         sg->length = len;
> > >  }
> > >
> > > +static struct xdp_buff *buf_to_xdp(struct virtnet_info *vi,
> > > +                                  struct receive_queue *rq, void *buf, u32 len)
> > > +{
> > > +       struct xdp_buff *xdp;
> > > +       u32 bufsize;
> > > +
> > > +       xdp = (struct xdp_buff *)buf;
> > > +
> > > +       bufsize = xsk_pool_get_rx_frame_size(rq->xsk.pool) + vi->hdr_len;
> > > +
> > > +       if (unlikely(len > bufsize)) {
> > > +               pr_debug("%s: rx error: len %u exceeds truesize %u\n",
> > > +                        vi->dev->name, len, bufsize);
> > > +               DEV_STATS_INC(vi->dev, rx_length_errors);
> > > +               xsk_buff_free(xdp);
> > > +               return NULL;
> > > +       }
> > > +
> > > +       xsk_buff_set_size(xdp, len);
> > > +       xsk_buff_dma_sync_for_cpu(xdp);
> > > +
> > > +       return xdp;
> > > +}
> > > +
> > > +static struct sk_buff *xdp_construct_skb(struct receive_queue *rq,
> > > +                                        struct xdp_buff *xdp)
> > > +{
> >
> > So we have a similar caller which is receive_small_build_skb(). Any
> > chance to reuse that?
>
> receive_small_build_skb works with build_skb.

RIght.

>
> Here we need to copy the packet from the xsk buffer to the skb buffer.
> So I do not think we can reuse it.
>

Let's rename this to xsk_construct_skb() ?

>
> >
> > > +       unsigned int metasize = xdp->data - xdp->data_meta;
> > > +       struct sk_buff *skb;
> > > +       unsigned int size;
> > > +
> > > +       size = xdp->data_end - xdp->data_hard_start;
> > > +       skb = napi_alloc_skb(&rq->napi, size);
> > > +       if (unlikely(!skb)) {
> > > +               xsk_buff_free(xdp);
> > > +               return NULL;
> > > +       }
> > > +
> > > +       skb_reserve(skb, xdp->data_meta - xdp->data_hard_start);
> > > +
> > > +       size = xdp->data_end - xdp->data_meta;
> > > +       memcpy(__skb_put(skb, size), xdp->data_meta, size);
> > > +
> > > +       if (metasize) {
> > > +               __skb_pull(skb, metasize);
> > > +               skb_metadata_set(skb, metasize);
> > > +       }
> > > +
> > > +       xsk_buff_free(xdp);
> > > +
> > > +       return skb;
> > > +}
> > > +
> > > +static struct sk_buff *virtnet_receive_xsk_small(struct net_device *dev, struct virtnet_info *vi,
> > > +                                                struct receive_queue *rq, struct xdp_buff *xdp,
> > > +                                                unsigned int *xdp_xmit,
> > > +                                                struct virtnet_rq_stats *stats)
> > > +{
> > > +       struct bpf_prog *prog;
> > > +       u32 ret;
> > > +
> > > +       ret = XDP_PASS;
> > > +       rcu_read_lock();
> > > +       prog = rcu_dereference(rq->xdp_prog);
> > > +       if (prog)
> > > +               ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit, stats);
> > > +       rcu_read_unlock();
> > > +
> > > +       switch (ret) {
> > > +       case XDP_PASS:
> > > +               return xdp_construct_skb(rq, xdp);
> > > +
> > > +       case XDP_TX:
> > > +       case XDP_REDIRECT:
> > > +               return NULL;
> > > +
> > > +       default:
> > > +               /* drop packet */
> > > +               xsk_buff_free(xdp);
> > > +               u64_stats_inc(&stats->drops);
> > > +               return NULL;
> > > +       }
> > > +}
> > > +
> > > +static struct sk_buff *virtnet_receive_xsk_buf(struct virtnet_info *vi, struct receive_queue *rq,
> > > +                                              void *buf, u32 len,
> > > +                                              unsigned int *xdp_xmit,
> > > +                                              struct virtnet_rq_stats *stats)
> > > +{
> > > +       struct net_device *dev = vi->dev;
> > > +       struct sk_buff *skb = NULL;
> > > +       struct xdp_buff *xdp;
> > > +
> > > +       len -= vi->hdr_len;
> > > +
> > > +       u64_stats_add(&stats->bytes, len);
> > > +
> > > +       xdp = buf_to_xdp(vi, rq, buf, len);
> > > +       if (!xdp)
> > > +               return NULL;
> > > +
> > > +       if (unlikely(len < ETH_HLEN)) {
> > > +               pr_debug("%s: short packet %i\n", dev->name, len);
> > > +               DEV_STATS_INC(dev, rx_length_errors);
> > > +               xsk_buff_free(xdp);
> > > +               return NULL;
> > > +       }
> > > +
> > > +       if (!vi->mergeable_rx_bufs)
> > > +               skb = virtnet_receive_xsk_small(dev, vi, rq, xdp, xdp_xmit, stats);
> > > +
> > > +       return skb;
> > > +}
> > > +
> > >  static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue *rq,
> > >                                    struct xsk_buff_pool *pool, gfp_t gfp)
> > >  {
> > > @@ -2363,9 +2481,22 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
> > >         void *buf;
> > >         int i;
> > >
> > > -       if (!vi->big_packets || vi->mergeable_rx_bufs) {
> > > -               void *ctx;
> > > +       if (rq->xsk.pool) {
> > > +               struct sk_buff *skb;
> > > +
> > > +               while (packets < budget) {
> > > +                       buf = virtqueue_get_buf(rq->vq, &len);
> > > +                       if (!buf)
> > > +                               break;
> > >
> > > +                       skb = virtnet_receive_xsk_buf(vi, rq, buf, len, xdp_xmit, &stats);
> > > +                       if (skb)
> > > +                               virtnet_receive_done(vi, rq, skb);
> > > +
> > > +                       packets++;
> > > +               }
> >
> > If reusing turns out to be hard, I'd rather add new paths in receive_small().
>
> The exist function is called after virtnet_rq_get_buf(), that will do dma unmap.
> But for xsk, the dma unmap is not need. So xsk receive handle should use
> virtqueue_get_buf directly.

Probably but if it's just virtnet_rq_get_buf() we can simply did:

static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
{
        void *buf;

        buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
        if (buf && rq->xsk.pool)
                virtnet_rq_unmap(rq, buf, *len);

        return buf;
}

Thanks

>
> Thanks.
>
> >
> > > +       } else if (!vi->big_packets || vi->mergeable_rx_bufs) {
> > > +               void *ctx;
> > >                 while (packets < budget &&
> > >                        (buf = virtnet_rq_get_buf(rq, &len, &ctx))) {
> > >                         receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
> > > --
> > > 2.32.0.3.g01195cf9f
> > >
> >
> > Thanks
> >
>
Jason Wang July 1, 2024, 3:25 a.m. UTC | #4
On Mon, Jul 1, 2024 at 11:20 AM Jason Wang <jasowang@redhat.com> wrote:
>
> On Fri, Jun 28, 2024 at 1:48 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> >
> > On Fri, 28 Jun 2024 10:19:41 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > > On Tue, Jun 18, 2024 at 3:57 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > > >
> > > > In the process:
> > > > 1. We may need to copy data to create skb for XDP_PASS.
> > > > 2. We may need to call xsk_buff_free() to release the buffer.
> > > > 3. The handle for xdp_buff is difference from the buffer.
> > > >
> > > > If we pushed this logic into existing receive handle(merge and small),
> > > > we would have to maintain code scattered inside merge and small (and big).
> > > > So I think it is a good choice for us to put the xsk code into an
> > > > independent function.
> > >
> > > I think it's better to try to reuse the existing functions.
> > >
> > > More below:
> > >
> > > >
> > > > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > > > ---
> > > >  drivers/net/virtio_net.c | 135 ++++++++++++++++++++++++++++++++++++++-
> > > >  1 file changed, 133 insertions(+), 2 deletions(-)
> > > >
> > > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > > index 2ac5668a94ce..06608d696e2e 100644
> > > > --- a/drivers/net/virtio_net.c
> > > > +++ b/drivers/net/virtio_net.c
> > > > @@ -500,6 +500,10 @@ struct virtio_net_common_hdr {
> > > >  };
> > > >
> > > >  static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
> > > > +static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
> > > > +                              struct net_device *dev,
> > > > +                              unsigned int *xdp_xmit,
> > > > +                              struct virtnet_rq_stats *stats);
> > > >
> > > >  static bool is_xdp_frame(void *ptr)
> > > >  {
> > > > @@ -1040,6 +1044,120 @@ static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len)
> > > >         sg->length = len;
> > > >  }
> > > >
> > > > +static struct xdp_buff *buf_to_xdp(struct virtnet_info *vi,
> > > > +                                  struct receive_queue *rq, void *buf, u32 len)
> > > > +{
> > > > +       struct xdp_buff *xdp;
> > > > +       u32 bufsize;
> > > > +
> > > > +       xdp = (struct xdp_buff *)buf;
> > > > +
> > > > +       bufsize = xsk_pool_get_rx_frame_size(rq->xsk.pool) + vi->hdr_len;
> > > > +
> > > > +       if (unlikely(len > bufsize)) {
> > > > +               pr_debug("%s: rx error: len %u exceeds truesize %u\n",
> > > > +                        vi->dev->name, len, bufsize);
> > > > +               DEV_STATS_INC(vi->dev, rx_length_errors);
> > > > +               xsk_buff_free(xdp);
> > > > +               return NULL;
> > > > +       }
> > > > +
> > > > +       xsk_buff_set_size(xdp, len);
> > > > +       xsk_buff_dma_sync_for_cpu(xdp);
> > > > +
> > > > +       return xdp;
> > > > +}
> > > > +
> > > > +static struct sk_buff *xdp_construct_skb(struct receive_queue *rq,
> > > > +                                        struct xdp_buff *xdp)
> > > > +{
> > >
> > > So we have a similar caller which is receive_small_build_skb(). Any
> > > chance to reuse that?
> >
> > receive_small_build_skb works with build_skb.
>
> RIght.
>
> >
> > Here we need to copy the packet from the xsk buffer to the skb buffer.
> > So I do not think we can reuse it.
> >
>
> Let's rename this to xsk_construct_skb() ?
>
> >
> > >
> > > > +       unsigned int metasize = xdp->data - xdp->data_meta;
> > > > +       struct sk_buff *skb;
> > > > +       unsigned int size;
> > > > +
> > > > +       size = xdp->data_end - xdp->data_hard_start;
> > > > +       skb = napi_alloc_skb(&rq->napi, size);
> > > > +       if (unlikely(!skb)) {
> > > > +               xsk_buff_free(xdp);
> > > > +               return NULL;
> > > > +       }
> > > > +
> > > > +       skb_reserve(skb, xdp->data_meta - xdp->data_hard_start);
> > > > +
> > > > +       size = xdp->data_end - xdp->data_meta;
> > > > +       memcpy(__skb_put(skb, size), xdp->data_meta, size);
> > > > +
> > > > +       if (metasize) {
> > > > +               __skb_pull(skb, metasize);
> > > > +               skb_metadata_set(skb, metasize);
> > > > +       }
> > > > +
> > > > +       xsk_buff_free(xdp);
> > > > +
> > > > +       return skb;
> > > > +}
> > > > +
> > > > +static struct sk_buff *virtnet_receive_xsk_small(struct net_device *dev, struct virtnet_info *vi,
> > > > +                                                struct receive_queue *rq, struct xdp_buff *xdp,
> > > > +                                                unsigned int *xdp_xmit,
> > > > +                                                struct virtnet_rq_stats *stats)
> > > > +{
> > > > +       struct bpf_prog *prog;
> > > > +       u32 ret;
> > > > +
> > > > +       ret = XDP_PASS;
> > > > +       rcu_read_lock();
> > > > +       prog = rcu_dereference(rq->xdp_prog);
> > > > +       if (prog)
> > > > +               ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit, stats);
> > > > +       rcu_read_unlock();
> > > > +
> > > > +       switch (ret) {
> > > > +       case XDP_PASS:
> > > > +               return xdp_construct_skb(rq, xdp);
> > > > +
> > > > +       case XDP_TX:
> > > > +       case XDP_REDIRECT:
> > > > +               return NULL;
> > > > +
> > > > +       default:
> > > > +               /* drop packet */
> > > > +               xsk_buff_free(xdp);
> > > > +               u64_stats_inc(&stats->drops);
> > > > +               return NULL;
> > > > +       }
> > > > +}
> > > > +
> > > > +static struct sk_buff *virtnet_receive_xsk_buf(struct virtnet_info *vi, struct receive_queue *rq,
> > > > +                                              void *buf, u32 len,
> > > > +                                              unsigned int *xdp_xmit,
> > > > +                                              struct virtnet_rq_stats *stats)
> > > > +{
> > > > +       struct net_device *dev = vi->dev;
> > > > +       struct sk_buff *skb = NULL;
> > > > +       struct xdp_buff *xdp;
> > > > +
> > > > +       len -= vi->hdr_len;
> > > > +
> > > > +       u64_stats_add(&stats->bytes, len);
> > > > +
> > > > +       xdp = buf_to_xdp(vi, rq, buf, len);
> > > > +       if (!xdp)
> > > > +               return NULL;
> > > > +
> > > > +       if (unlikely(len < ETH_HLEN)) {
> > > > +               pr_debug("%s: short packet %i\n", dev->name, len);
> > > > +               DEV_STATS_INC(dev, rx_length_errors);
> > > > +               xsk_buff_free(xdp);
> > > > +               return NULL;
> > > > +       }
> > > > +
> > > > +       if (!vi->mergeable_rx_bufs)
> > > > +               skb = virtnet_receive_xsk_small(dev, vi, rq, xdp, xdp_xmit, stats);
> > > > +
> > > > +       return skb;
> > > > +}
> > > > +
> > > >  static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue *rq,
> > > >                                    struct xsk_buff_pool *pool, gfp_t gfp)
> > > >  {
> > > > @@ -2363,9 +2481,22 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
> > > >         void *buf;
> > > >         int i;
> > > >
> > > > -       if (!vi->big_packets || vi->mergeable_rx_bufs) {
> > > > -               void *ctx;
> > > > +       if (rq->xsk.pool) {
> > > > +               struct sk_buff *skb;
> > > > +
> > > > +               while (packets < budget) {
> > > > +                       buf = virtqueue_get_buf(rq->vq, &len);
> > > > +                       if (!buf)
> > > > +                               break;
> > > >
> > > > +                       skb = virtnet_receive_xsk_buf(vi, rq, buf, len, xdp_xmit, &stats);
> > > > +                       if (skb)
> > > > +                               virtnet_receive_done(vi, rq, skb);
> > > > +
> > > > +                       packets++;
> > > > +               }
> > >
> > > If reusing turns out to be hard, I'd rather add new paths in receive_small().
> >
> > The exist function is called after virtnet_rq_get_buf(), that will do dma unmap.
> > But for xsk, the dma unmap is not need. So xsk receive handle should use
> > virtqueue_get_buf directly.
>
> Probably but if it's just virtnet_rq_get_buf() we can simply did:
>
> static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
> {
>         void *buf;
>
>         buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
>         if (buf && rq->xsk.pool)
>                 virtnet_rq_unmap(rq, buf, *len);
>
>         return buf;
> }

Or maybe it would be much more clearer if we did:

static int virtnet_receive()
{

if (rq->xsk.pool)
        virtnet_receive_xsk()
else
        virtnet_receive_xxx()
...
}

Thanks

>
> Thanks
>
> >
> > Thanks.
> >
> > >
> > > > +       } else if (!vi->big_packets || vi->mergeable_rx_bufs) {
> > > > +               void *ctx;
> > > >                 while (packets < budget &&
> > > >                        (buf = virtnet_rq_get_buf(rq, &len, &ctx))) {
> > > >                         receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
> > > > --
> > > > 2.32.0.3.g01195cf9f
> > > >
> > >
> > > Thanks
> > >
> >
Xuan Zhuo July 1, 2024, 8:32 a.m. UTC | #5
On Mon, 1 Jul 2024 11:25:37 +0800, Jason Wang <jasowang@redhat.com> wrote:
> On Mon, Jul 1, 2024 at 11:20 AM Jason Wang <jasowang@redhat.com> wrote:
> >
> > On Fri, Jun 28, 2024 at 1:48 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > >
> > > On Fri, 28 Jun 2024 10:19:41 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > > > On Tue, Jun 18, 2024 at 3:57 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > > > >
> > > > > In the process:
> > > > > 1. We may need to copy data to create skb for XDP_PASS.
> > > > > 2. We may need to call xsk_buff_free() to release the buffer.
> > > > > 3. The handle for xdp_buff is difference from the buffer.
> > > > >
> > > > > If we pushed this logic into existing receive handle(merge and small),
> > > > > we would have to maintain code scattered inside merge and small (and big).
> > > > > So I think it is a good choice for us to put the xsk code into an
> > > > > independent function.
> > > >
> > > > I think it's better to try to reuse the existing functions.
> > > >
> > > > More below:
> > > >
> > > > >
> > > > > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > > > > ---
> > > > >  drivers/net/virtio_net.c | 135 ++++++++++++++++++++++++++++++++++++++-
> > > > >  1 file changed, 133 insertions(+), 2 deletions(-)
> > > > >
> > > > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > > > index 2ac5668a94ce..06608d696e2e 100644
> > > > > --- a/drivers/net/virtio_net.c
> > > > > +++ b/drivers/net/virtio_net.c
> > > > > @@ -500,6 +500,10 @@ struct virtio_net_common_hdr {
> > > > >  };
> > > > >
> > > > >  static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
> > > > > +static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
> > > > > +                              struct net_device *dev,
> > > > > +                              unsigned int *xdp_xmit,
> > > > > +                              struct virtnet_rq_stats *stats);
> > > > >
> > > > >  static bool is_xdp_frame(void *ptr)
> > > > >  {
> > > > > @@ -1040,6 +1044,120 @@ static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len)
> > > > >         sg->length = len;
> > > > >  }
> > > > >
> > > > > +static struct xdp_buff *buf_to_xdp(struct virtnet_info *vi,
> > > > > +                                  struct receive_queue *rq, void *buf, u32 len)
> > > > > +{
> > > > > +       struct xdp_buff *xdp;
> > > > > +       u32 bufsize;
> > > > > +
> > > > > +       xdp = (struct xdp_buff *)buf;
> > > > > +
> > > > > +       bufsize = xsk_pool_get_rx_frame_size(rq->xsk.pool) + vi->hdr_len;
> > > > > +
> > > > > +       if (unlikely(len > bufsize)) {
> > > > > +               pr_debug("%s: rx error: len %u exceeds truesize %u\n",
> > > > > +                        vi->dev->name, len, bufsize);
> > > > > +               DEV_STATS_INC(vi->dev, rx_length_errors);
> > > > > +               xsk_buff_free(xdp);
> > > > > +               return NULL;
> > > > > +       }
> > > > > +
> > > > > +       xsk_buff_set_size(xdp, len);
> > > > > +       xsk_buff_dma_sync_for_cpu(xdp);
> > > > > +
> > > > > +       return xdp;
> > > > > +}
> > > > > +
> > > > > +static struct sk_buff *xdp_construct_skb(struct receive_queue *rq,
> > > > > +                                        struct xdp_buff *xdp)
> > > > > +{
> > > >
> > > > So we have a similar caller which is receive_small_build_skb(). Any
> > > > chance to reuse that?
> > >
> > > receive_small_build_skb works with build_skb.
> >
> > RIght.
> >
> > >
> > > Here we need to copy the packet from the xsk buffer to the skb buffer.
> > > So I do not think we can reuse it.
> > >
> >
> > Let's rename this to xsk_construct_skb() ?
> >
> > >
> > > >
> > > > > +       unsigned int metasize = xdp->data - xdp->data_meta;
> > > > > +       struct sk_buff *skb;
> > > > > +       unsigned int size;
> > > > > +
> > > > > +       size = xdp->data_end - xdp->data_hard_start;
> > > > > +       skb = napi_alloc_skb(&rq->napi, size);
> > > > > +       if (unlikely(!skb)) {
> > > > > +               xsk_buff_free(xdp);
> > > > > +               return NULL;
> > > > > +       }
> > > > > +
> > > > > +       skb_reserve(skb, xdp->data_meta - xdp->data_hard_start);
> > > > > +
> > > > > +       size = xdp->data_end - xdp->data_meta;
> > > > > +       memcpy(__skb_put(skb, size), xdp->data_meta, size);
> > > > > +
> > > > > +       if (metasize) {
> > > > > +               __skb_pull(skb, metasize);
> > > > > +               skb_metadata_set(skb, metasize);
> > > > > +       }
> > > > > +
> > > > > +       xsk_buff_free(xdp);
> > > > > +
> > > > > +       return skb;
> > > > > +}
> > > > > +
> > > > > +static struct sk_buff *virtnet_receive_xsk_small(struct net_device *dev, struct virtnet_info *vi,
> > > > > +                                                struct receive_queue *rq, struct xdp_buff *xdp,
> > > > > +                                                unsigned int *xdp_xmit,
> > > > > +                                                struct virtnet_rq_stats *stats)
> > > > > +{
> > > > > +       struct bpf_prog *prog;
> > > > > +       u32 ret;
> > > > > +
> > > > > +       ret = XDP_PASS;
> > > > > +       rcu_read_lock();
> > > > > +       prog = rcu_dereference(rq->xdp_prog);
> > > > > +       if (prog)
> > > > > +               ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit, stats);
> > > > > +       rcu_read_unlock();
> > > > > +
> > > > > +       switch (ret) {
> > > > > +       case XDP_PASS:
> > > > > +               return xdp_construct_skb(rq, xdp);
> > > > > +
> > > > > +       case XDP_TX:
> > > > > +       case XDP_REDIRECT:
> > > > > +               return NULL;
> > > > > +
> > > > > +       default:
> > > > > +               /* drop packet */
> > > > > +               xsk_buff_free(xdp);
> > > > > +               u64_stats_inc(&stats->drops);
> > > > > +               return NULL;
> > > > > +       }
> > > > > +}
> > > > > +
> > > > > +static struct sk_buff *virtnet_receive_xsk_buf(struct virtnet_info *vi, struct receive_queue *rq,
> > > > > +                                              void *buf, u32 len,
> > > > > +                                              unsigned int *xdp_xmit,
> > > > > +                                              struct virtnet_rq_stats *stats)
> > > > > +{
> > > > > +       struct net_device *dev = vi->dev;
> > > > > +       struct sk_buff *skb = NULL;
> > > > > +       struct xdp_buff *xdp;
> > > > > +
> > > > > +       len -= vi->hdr_len;
> > > > > +
> > > > > +       u64_stats_add(&stats->bytes, len);
> > > > > +
> > > > > +       xdp = buf_to_xdp(vi, rq, buf, len);
> > > > > +       if (!xdp)
> > > > > +               return NULL;
> > > > > +
> > > > > +       if (unlikely(len < ETH_HLEN)) {
> > > > > +               pr_debug("%s: short packet %i\n", dev->name, len);
> > > > > +               DEV_STATS_INC(dev, rx_length_errors);
> > > > > +               xsk_buff_free(xdp);
> > > > > +               return NULL;
> > > > > +       }
> > > > > +
> > > > > +       if (!vi->mergeable_rx_bufs)
> > > > > +               skb = virtnet_receive_xsk_small(dev, vi, rq, xdp, xdp_xmit, stats);
> > > > > +
> > > > > +       return skb;
> > > > > +}
> > > > > +
> > > > >  static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue *rq,
> > > > >                                    struct xsk_buff_pool *pool, gfp_t gfp)
> > > > >  {
> > > > > @@ -2363,9 +2481,22 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
> > > > >         void *buf;
> > > > >         int i;
> > > > >
> > > > > -       if (!vi->big_packets || vi->mergeable_rx_bufs) {
> > > > > -               void *ctx;
> > > > > +       if (rq->xsk.pool) {
> > > > > +               struct sk_buff *skb;
> > > > > +
> > > > > +               while (packets < budget) {
> > > > > +                       buf = virtqueue_get_buf(rq->vq, &len);
> > > > > +                       if (!buf)
> > > > > +                               break;
> > > > >
> > > > > +                       skb = virtnet_receive_xsk_buf(vi, rq, buf, len, xdp_xmit, &stats);
> > > > > +                       if (skb)
> > > > > +                               virtnet_receive_done(vi, rq, skb);
> > > > > +
> > > > > +                       packets++;
> > > > > +               }
> > > >
> > > > If reusing turns out to be hard, I'd rather add new paths in receive_small().
> > >
> > > The exist function is called after virtnet_rq_get_buf(), that will do dma unmap.
> > > But for xsk, the dma unmap is not need. So xsk receive handle should use
> > > virtqueue_get_buf directly.
> >
> > Probably but if it's just virtnet_rq_get_buf() we can simply did:
> >
> > static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
> > {
> >         void *buf;
> >
> >         buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
> >         if (buf && rq->xsk.pool)
> >                 virtnet_rq_unmap(rq, buf, *len);
> >
> >         return buf;
> > }
>
> Or maybe it would be much more clearer if we did:
>
> static int virtnet_receive()
> {
>
> if (rq->xsk.pool)
>         virtnet_receive_xsk()
> else
>         virtnet_receive_xxx()
> ...
> }

I like this.

Thanks.


>
> Thanks
>
> >
> > Thanks
> >
> > >
> > > Thanks.
> > >
> > > >
> > > > > +       } else if (!vi->big_packets || vi->mergeable_rx_bufs) {
> > > > > +               void *ctx;
> > > > >                 while (packets < budget &&
> > > > >                        (buf = virtnet_rq_get_buf(rq, &len, &ctx))) {
> > > > >                         receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
> > > > > --
> > > > > 2.32.0.3.g01195cf9f
> > > > >
> > > >
> > > > Thanks
> > > >
> > >
>
diff mbox series

Patch

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 2ac5668a94ce..06608d696e2e 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -500,6 +500,10 @@  struct virtio_net_common_hdr {
 };
 
 static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
+static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
+			       struct net_device *dev,
+			       unsigned int *xdp_xmit,
+			       struct virtnet_rq_stats *stats);
 
 static bool is_xdp_frame(void *ptr)
 {
@@ -1040,6 +1044,120 @@  static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len)
 	sg->length = len;
 }
 
+static struct xdp_buff *buf_to_xdp(struct virtnet_info *vi,
+				   struct receive_queue *rq, void *buf, u32 len)
+{
+	struct xdp_buff *xdp;
+	u32 bufsize;
+
+	xdp = (struct xdp_buff *)buf;
+
+	bufsize = xsk_pool_get_rx_frame_size(rq->xsk.pool) + vi->hdr_len;
+
+	if (unlikely(len > bufsize)) {
+		pr_debug("%s: rx error: len %u exceeds truesize %u\n",
+			 vi->dev->name, len, bufsize);
+		DEV_STATS_INC(vi->dev, rx_length_errors);
+		xsk_buff_free(xdp);
+		return NULL;
+	}
+
+	xsk_buff_set_size(xdp, len);
+	xsk_buff_dma_sync_for_cpu(xdp);
+
+	return xdp;
+}
+
+static struct sk_buff *xdp_construct_skb(struct receive_queue *rq,
+					 struct xdp_buff *xdp)
+{
+	unsigned int metasize = xdp->data - xdp->data_meta;
+	struct sk_buff *skb;
+	unsigned int size;
+
+	size = xdp->data_end - xdp->data_hard_start;
+	skb = napi_alloc_skb(&rq->napi, size);
+	if (unlikely(!skb)) {
+		xsk_buff_free(xdp);
+		return NULL;
+	}
+
+	skb_reserve(skb, xdp->data_meta - xdp->data_hard_start);
+
+	size = xdp->data_end - xdp->data_meta;
+	memcpy(__skb_put(skb, size), xdp->data_meta, size);
+
+	if (metasize) {
+		__skb_pull(skb, metasize);
+		skb_metadata_set(skb, metasize);
+	}
+
+	xsk_buff_free(xdp);
+
+	return skb;
+}
+
+static struct sk_buff *virtnet_receive_xsk_small(struct net_device *dev, struct virtnet_info *vi,
+						 struct receive_queue *rq, struct xdp_buff *xdp,
+						 unsigned int *xdp_xmit,
+						 struct virtnet_rq_stats *stats)
+{
+	struct bpf_prog *prog;
+	u32 ret;
+
+	ret = XDP_PASS;
+	rcu_read_lock();
+	prog = rcu_dereference(rq->xdp_prog);
+	if (prog)
+		ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit, stats);
+	rcu_read_unlock();
+
+	switch (ret) {
+	case XDP_PASS:
+		return xdp_construct_skb(rq, xdp);
+
+	case XDP_TX:
+	case XDP_REDIRECT:
+		return NULL;
+
+	default:
+		/* drop packet */
+		xsk_buff_free(xdp);
+		u64_stats_inc(&stats->drops);
+		return NULL;
+	}
+}
+
+static struct sk_buff *virtnet_receive_xsk_buf(struct virtnet_info *vi, struct receive_queue *rq,
+					       void *buf, u32 len,
+					       unsigned int *xdp_xmit,
+					       struct virtnet_rq_stats *stats)
+{
+	struct net_device *dev = vi->dev;
+	struct sk_buff *skb = NULL;
+	struct xdp_buff *xdp;
+
+	len -= vi->hdr_len;
+
+	u64_stats_add(&stats->bytes, len);
+
+	xdp = buf_to_xdp(vi, rq, buf, len);
+	if (!xdp)
+		return NULL;
+
+	if (unlikely(len < ETH_HLEN)) {
+		pr_debug("%s: short packet %i\n", dev->name, len);
+		DEV_STATS_INC(dev, rx_length_errors);
+		xsk_buff_free(xdp);
+		return NULL;
+	}
+
+	if (!vi->mergeable_rx_bufs)
+		skb = virtnet_receive_xsk_small(dev, vi, rq, xdp, xdp_xmit, stats);
+
+	return skb;
+}
+
 static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue *rq,
 				   struct xsk_buff_pool *pool, gfp_t gfp)
 {
@@ -2363,9 +2481,22 @@  static int virtnet_receive(struct receive_queue *rq, int budget,
 	void *buf;
 	int i;
 
-	if (!vi->big_packets || vi->mergeable_rx_bufs) {
-		void *ctx;
+	if (rq->xsk.pool) {
+		struct sk_buff *skb;
+
+		while (packets < budget) {
+			buf = virtqueue_get_buf(rq->vq, &len);
+			if (!buf)
+				break;
 
+			skb = virtnet_receive_xsk_buf(vi, rq, buf, len, xdp_xmit, &stats);
+			if (skb)
+				virtnet_receive_done(vi, rq, skb);
+
+			packets++;
+		}
+	} else if (!vi->big_packets || vi->mergeable_rx_bufs) {
+		void *ctx;
 		while (packets < budget &&
 		       (buf = virtnet_rq_get_buf(rq, &len, &ctx))) {
 			receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);