Message ID | 20240614063933.108811-16-xuanzhuo@linux.alibaba.com (mailing list archive) |
---|---|
State | Superseded |
Delegated to: | Netdev Maintainers |
Headers | show |
Series | virtio-net: support AF_XDP zero copy | expand |
On Fri, Jun 14, 2024 at 2:39 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote: > > The virtnet_xdp_handler() is re-used. But > > 1. We need to copy data to create skb for XDP_PASS. > 2. We need to call xsk_buff_free() to release the buffer. > 3. The handle for xdp_buff is difference. > > If we pushed this logic into existing receive handle(merge and small), > we would have to maintain code scattered inside merge and small (and big). > So I think it is a good choice for us to put the xsk code into an > independent function. > > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com> > --- > drivers/net/virtio_net.c | 142 +++++++++++++++++++++++++++++++++++++-- > 1 file changed, 138 insertions(+), 4 deletions(-) > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c > index 4e5645d8bb7d..72c4d2f0c0ea 100644 > --- a/drivers/net/virtio_net.c > +++ b/drivers/net/virtio_net.c > @@ -534,8 +534,10 @@ struct virtio_net_common_hdr { > > static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf); > static void virtnet_xsk_completed(struct send_queue *sq, int num); > -static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue *rq, > - struct xsk_buff_pool *pool, gfp_t gfp); > +static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp, > + struct net_device *dev, > + unsigned int *xdp_xmit, > + struct virtnet_rq_stats *stats); > > enum virtnet_xmit_type { > VIRTNET_XMIT_TYPE_SKB, > @@ -1218,6 +1220,11 @@ static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf) > > rq = &vi->rq[i]; > > + if (rq->xsk.pool) { > + xsk_buff_free((struct xdp_buff *)buf); > + return; > + } > + > if (!vi->big_packets || vi->mergeable_rx_bufs) > virtnet_rq_unmap(rq, buf, 0); > > @@ -1308,6 +1315,120 @@ static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len) > sg->length = len; > } > > +static struct xdp_buff *buf_to_xdp(struct virtnet_info *vi, > + struct receive_queue *rq, void *buf, u32 len) > +{ > + struct xdp_buff *xdp; > + u32 bufsize; > + > + xdp = (struct xdp_buff *)buf; > + > + bufsize = xsk_pool_get_rx_frame_size(rq->xsk.pool) + vi->hdr_len; > + > + if (unlikely(len > bufsize)) { > + pr_debug("%s: rx error: len %u exceeds truesize %u\n", > + vi->dev->name, len, bufsize); > + DEV_STATS_INC(vi->dev, rx_length_errors); > + xsk_buff_free(xdp); > + return NULL; > + } > + > + xsk_buff_set_size(xdp, len); > + xsk_buff_dma_sync_for_cpu(xdp); > + > + return xdp; > +} > + > +static struct sk_buff *xdp_construct_skb(struct receive_queue *rq, > + struct xdp_buff *xdp) > +{ > + unsigned int metasize = xdp->data - xdp->data_meta; > + struct sk_buff *skb; > + unsigned int size; > + > + size = xdp->data_end - xdp->data_hard_start; > + skb = napi_alloc_skb(&rq->napi, size); > + if (unlikely(!skb)) { > + xsk_buff_free(xdp); > + return NULL; > + } > + > + skb_reserve(skb, xdp->data_meta - xdp->data_hard_start); > + > + size = xdp->data_end - xdp->data_meta; > + memcpy(__skb_put(skb, size), xdp->data_meta, size); > + > + if (metasize) { > + __skb_pull(skb, metasize); > + skb_metadata_set(skb, metasize); > + } > + > + xsk_buff_free(xdp); > + > + return skb; > +} > + > +static struct sk_buff *virtnet_receive_xsk_small(struct net_device *dev, struct virtnet_info *vi, > + struct receive_queue *rq, struct xdp_buff *xdp, > + unsigned int *xdp_xmit, > + struct virtnet_rq_stats *stats) > +{ > + struct bpf_prog *prog; > + u32 ret; > + > + ret = XDP_PASS; > + rcu_read_lock(); > + prog = rcu_dereference(rq->xdp_prog); > + if (prog) > + ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit, stats); > + rcu_read_unlock(); > + > + switch (ret) { > + case XDP_PASS: > + return xdp_construct_skb(rq, xdp); > + > + case XDP_TX: > + case XDP_REDIRECT: > + return NULL; > + > + default: > + /* drop packet */ > + xsk_buff_free(xdp); > + u64_stats_inc(&stats->drops); > + return NULL; > + } > +} Let's use a separate patch for this to decouple new functions with refactoring. Or even use a separate series for rx zerocopy. > + > +static struct sk_buff *virtnet_receive_xsk_buf(struct virtnet_info *vi, struct receive_queue *rq, > + void *buf, u32 len, > + unsigned int *xdp_xmit, > + struct virtnet_rq_stats *stats) > +{ > + struct net_device *dev = vi->dev; > + struct sk_buff *skb = NULL; > + struct xdp_buff *xdp; > + > + len -= vi->hdr_len; > + > + u64_stats_add(&stats->bytes, len); > + > + xdp = buf_to_xdp(vi, rq, buf, len); > + if (!xdp) > + return NULL; Don't we need to check if XDP is enabled before those operations? > + > + if (unlikely(len < ETH_HLEN)) { > + pr_debug("%s: short packet %i\n", dev->name, len); > + DEV_STATS_INC(dev, rx_length_errors); > + xsk_buff_free(xdp); > + return NULL; > + } > + > + if (!vi->mergeable_rx_bufs) > + skb = virtnet_receive_xsk_small(dev, vi, rq, xdp, xdp_xmit, stats); > + > + return skb; > +} > + > static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue *rq, > struct xsk_buff_pool *pool, gfp_t gfp) > { > @@ -2713,9 +2834,22 @@ static int virtnet_receive(struct receive_queue *rq, int budget, > void *buf; > int i; > > - if (!vi->big_packets || vi->mergeable_rx_bufs) { > - void *ctx; > + if (rq->xsk.pool) { > + struct sk_buff *skb; > + > + while (packets < budget) { > + buf = virtqueue_get_buf(rq->vq, &len); > + if (!buf) > + break; > > + skb = virtnet_receive_xsk_buf(vi, rq, buf, len, xdp_xmit, &stats); The function name is confusing for example, xsk might not be even enabled. > + if (skb) > + virtnet_receive_done(vi, rq, skb); > + > + packets++; > + } > + } else if (!vi->big_packets || vi->mergeable_rx_bufs) { > + void *ctx; > while (packets < budget && > (buf = virtnet_rq_get_buf(rq, &len, &ctx))) { > receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats); > -- > 2.32.0.3.g01195cf9f > Thanks
On Mon, 17 Jun 2024 15:10:48 +0800, Jason Wang <jasowang@redhat.com> wrote: > On Fri, Jun 14, 2024 at 2:39 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote: > > > > The virtnet_xdp_handler() is re-used. But > > > > 1. We need to copy data to create skb for XDP_PASS. > > 2. We need to call xsk_buff_free() to release the buffer. > > 3. The handle for xdp_buff is difference. > > > > If we pushed this logic into existing receive handle(merge and small), > > we would have to maintain code scattered inside merge and small (and big). > > So I think it is a good choice for us to put the xsk code into an > > independent function. > > > > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com> > > --- > > drivers/net/virtio_net.c | 142 +++++++++++++++++++++++++++++++++++++-- > > 1 file changed, 138 insertions(+), 4 deletions(-) > > > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c > > index 4e5645d8bb7d..72c4d2f0c0ea 100644 > > --- a/drivers/net/virtio_net.c > > +++ b/drivers/net/virtio_net.c > > @@ -534,8 +534,10 @@ struct virtio_net_common_hdr { > > > > static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf); > > static void virtnet_xsk_completed(struct send_queue *sq, int num); > > -static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue *rq, > > - struct xsk_buff_pool *pool, gfp_t gfp); > > +static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp, > > + struct net_device *dev, > > + unsigned int *xdp_xmit, > > + struct virtnet_rq_stats *stats); > > > > enum virtnet_xmit_type { > > VIRTNET_XMIT_TYPE_SKB, > > @@ -1218,6 +1220,11 @@ static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf) > > > > rq = &vi->rq[i]; > > > > + if (rq->xsk.pool) { > > + xsk_buff_free((struct xdp_buff *)buf); > > + return; > > + } > > + > > if (!vi->big_packets || vi->mergeable_rx_bufs) > > virtnet_rq_unmap(rq, buf, 0); > > > > @@ -1308,6 +1315,120 @@ static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len) > > sg->length = len; > > } > > > > +static struct xdp_buff *buf_to_xdp(struct virtnet_info *vi, > > + struct receive_queue *rq, void *buf, u32 len) > > +{ > > + struct xdp_buff *xdp; > > + u32 bufsize; > > + > > + xdp = (struct xdp_buff *)buf; > > + > > + bufsize = xsk_pool_get_rx_frame_size(rq->xsk.pool) + vi->hdr_len; > > + > > + if (unlikely(len > bufsize)) { > > + pr_debug("%s: rx error: len %u exceeds truesize %u\n", > > + vi->dev->name, len, bufsize); > > + DEV_STATS_INC(vi->dev, rx_length_errors); > > + xsk_buff_free(xdp); > > + return NULL; > > + } > > + > > + xsk_buff_set_size(xdp, len); > > + xsk_buff_dma_sync_for_cpu(xdp); > > + > > + return xdp; > > +} > > + > > +static struct sk_buff *xdp_construct_skb(struct receive_queue *rq, > > + struct xdp_buff *xdp) > > +{ > > + unsigned int metasize = xdp->data - xdp->data_meta; > > + struct sk_buff *skb; > > + unsigned int size; > > + > > + size = xdp->data_end - xdp->data_hard_start; > > + skb = napi_alloc_skb(&rq->napi, size); > > + if (unlikely(!skb)) { > > + xsk_buff_free(xdp); > > + return NULL; > > + } > > + > > + skb_reserve(skb, xdp->data_meta - xdp->data_hard_start); > > + > > + size = xdp->data_end - xdp->data_meta; > > + memcpy(__skb_put(skb, size), xdp->data_meta, size); > > + > > + if (metasize) { > > + __skb_pull(skb, metasize); > > + skb_metadata_set(skb, metasize); > > + } > > + > > + xsk_buff_free(xdp); > > + > > + return skb; > > +} > > + > > +static struct sk_buff *virtnet_receive_xsk_small(struct net_device *dev, struct virtnet_info *vi, > > + struct receive_queue *rq, struct xdp_buff *xdp, > > + unsigned int *xdp_xmit, > > + struct virtnet_rq_stats *stats) > > +{ > > + struct bpf_prog *prog; > > + u32 ret; > > + > > + ret = XDP_PASS; > > + rcu_read_lock(); > > + prog = rcu_dereference(rq->xdp_prog); > > + if (prog) > > + ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit, stats); > > + rcu_read_unlock(); > > + > > + switch (ret) { > > + case XDP_PASS: > > + return xdp_construct_skb(rq, xdp); > > + > > + case XDP_TX: > > + case XDP_REDIRECT: > > + return NULL; > > + > > + default: > > + /* drop packet */ > > + xsk_buff_free(xdp); > > + u64_stats_inc(&stats->drops); > > + return NULL; > > + } > > +} > > Let's use a separate patch for this to decouple new functions with refactoring. > > Or even use a separate series for rx zerocopy. This is for xsk. I can not get you. > > > + > > +static struct sk_buff *virtnet_receive_xsk_buf(struct virtnet_info *vi, struct receive_queue *rq, > > + void *buf, u32 len, > > + unsigned int *xdp_xmit, > > + struct virtnet_rq_stats *stats) > > +{ > > + struct net_device *dev = vi->dev; > > + struct sk_buff *skb = NULL; > > + struct xdp_buff *xdp; > > + > > + len -= vi->hdr_len; > > + > > + u64_stats_add(&stats->bytes, len); > > + > > + xdp = buf_to_xdp(vi, rq, buf, len); > > + if (!xdp) > > + return NULL; > > Don't we need to check if XDP is enabled before those operations? > > > + > > + if (unlikely(len < ETH_HLEN)) { > > + pr_debug("%s: short packet %i\n", dev->name, len); > > + DEV_STATS_INC(dev, rx_length_errors); > > + xsk_buff_free(xdp); > > + return NULL; > > + } > > + > > + if (!vi->mergeable_rx_bufs) > > + skb = virtnet_receive_xsk_small(dev, vi, rq, xdp, xdp_xmit, stats); > > + > > + return skb; > > +} > > + > > static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue *rq, > > struct xsk_buff_pool *pool, gfp_t gfp) > > { > > @@ -2713,9 +2834,22 @@ static int virtnet_receive(struct receive_queue *rq, int budget, > > void *buf; > > int i; > > > > - if (!vi->big_packets || vi->mergeable_rx_bufs) { > > - void *ctx; > > + if (rq->xsk.pool) { > > + struct sk_buff *skb; > > + > > + while (packets < budget) { > > + buf = virtqueue_get_buf(rq->vq, &len); > > + if (!buf) > > + break; > > > > + skb = virtnet_receive_xsk_buf(vi, rq, buf, len, xdp_xmit, &stats); > > The function name is confusing for example, xsk might not be even enabled. If rq->xsk.pool is true, the xsk is enable. Thanks. > > > + if (skb) > > + virtnet_receive_done(vi, rq, skb); > > + > > + packets++; > > + } > > + } else if (!vi->big_packets || vi->mergeable_rx_bufs) { > > + void *ctx; > > while (packets < budget && > > (buf = virtnet_rq_get_buf(rq, &len, &ctx))) { > > receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats); > > -- > > 2.32.0.3.g01195cf9f > > > > Thanks >
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 4e5645d8bb7d..72c4d2f0c0ea 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -534,8 +534,10 @@ struct virtio_net_common_hdr { static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf); static void virtnet_xsk_completed(struct send_queue *sq, int num); -static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue *rq, - struct xsk_buff_pool *pool, gfp_t gfp); +static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp, + struct net_device *dev, + unsigned int *xdp_xmit, + struct virtnet_rq_stats *stats); enum virtnet_xmit_type { VIRTNET_XMIT_TYPE_SKB, @@ -1218,6 +1220,11 @@ static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf) rq = &vi->rq[i]; + if (rq->xsk.pool) { + xsk_buff_free((struct xdp_buff *)buf); + return; + } + if (!vi->big_packets || vi->mergeable_rx_bufs) virtnet_rq_unmap(rq, buf, 0); @@ -1308,6 +1315,120 @@ static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len) sg->length = len; } +static struct xdp_buff *buf_to_xdp(struct virtnet_info *vi, + struct receive_queue *rq, void *buf, u32 len) +{ + struct xdp_buff *xdp; + u32 bufsize; + + xdp = (struct xdp_buff *)buf; + + bufsize = xsk_pool_get_rx_frame_size(rq->xsk.pool) + vi->hdr_len; + + if (unlikely(len > bufsize)) { + pr_debug("%s: rx error: len %u exceeds truesize %u\n", + vi->dev->name, len, bufsize); + DEV_STATS_INC(vi->dev, rx_length_errors); + xsk_buff_free(xdp); + return NULL; + } + + xsk_buff_set_size(xdp, len); + xsk_buff_dma_sync_for_cpu(xdp); + + return xdp; +} + +static struct sk_buff *xdp_construct_skb(struct receive_queue *rq, + struct xdp_buff *xdp) +{ + unsigned int metasize = xdp->data - xdp->data_meta; + struct sk_buff *skb; + unsigned int size; + + size = xdp->data_end - xdp->data_hard_start; + skb = napi_alloc_skb(&rq->napi, size); + if (unlikely(!skb)) { + xsk_buff_free(xdp); + return NULL; + } + + skb_reserve(skb, xdp->data_meta - xdp->data_hard_start); + + size = xdp->data_end - xdp->data_meta; + memcpy(__skb_put(skb, size), xdp->data_meta, size); + + if (metasize) { + __skb_pull(skb, metasize); + skb_metadata_set(skb, metasize); + } + + xsk_buff_free(xdp); + + return skb; +} + +static struct sk_buff *virtnet_receive_xsk_small(struct net_device *dev, struct virtnet_info *vi, + struct receive_queue *rq, struct xdp_buff *xdp, + unsigned int *xdp_xmit, + struct virtnet_rq_stats *stats) +{ + struct bpf_prog *prog; + u32 ret; + + ret = XDP_PASS; + rcu_read_lock(); + prog = rcu_dereference(rq->xdp_prog); + if (prog) + ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit, stats); + rcu_read_unlock(); + + switch (ret) { + case XDP_PASS: + return xdp_construct_skb(rq, xdp); + + case XDP_TX: + case XDP_REDIRECT: + return NULL; + + default: + /* drop packet */ + xsk_buff_free(xdp); + u64_stats_inc(&stats->drops); + return NULL; + } +} + +static struct sk_buff *virtnet_receive_xsk_buf(struct virtnet_info *vi, struct receive_queue *rq, + void *buf, u32 len, + unsigned int *xdp_xmit, + struct virtnet_rq_stats *stats) +{ + struct net_device *dev = vi->dev; + struct sk_buff *skb = NULL; + struct xdp_buff *xdp; + + len -= vi->hdr_len; + + u64_stats_add(&stats->bytes, len); + + xdp = buf_to_xdp(vi, rq, buf, len); + if (!xdp) + return NULL; + + if (unlikely(len < ETH_HLEN)) { + pr_debug("%s: short packet %i\n", dev->name, len); + DEV_STATS_INC(dev, rx_length_errors); + xsk_buff_free(xdp); + return NULL; + } + + if (!vi->mergeable_rx_bufs) + skb = virtnet_receive_xsk_small(dev, vi, rq, xdp, xdp_xmit, stats); + + return skb; +} + static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue *rq, struct xsk_buff_pool *pool, gfp_t gfp) { @@ -2713,9 +2834,22 @@ static int virtnet_receive(struct receive_queue *rq, int budget, void *buf; int i; - if (!vi->big_packets || vi->mergeable_rx_bufs) { - void *ctx; + if (rq->xsk.pool) { + struct sk_buff *skb; + + while (packets < budget) { + buf = virtqueue_get_buf(rq->vq, &len); + if (!buf) + break; + skb = virtnet_receive_xsk_buf(vi, rq, buf, len, xdp_xmit, &stats); + if (skb) + virtnet_receive_done(vi, rq, skb); + + packets++; + } + } else if (!vi->big_packets || vi->mergeable_rx_bufs) { + void *ctx; while (packets < budget && (buf = virtnet_rq_get_buf(rq, &len, &ctx))) { receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
The virtnet_xdp_handler() is re-used. But 1. We need to copy data to create skb for XDP_PASS. 2. We need to call xsk_buff_free() to release the buffer. 3. The handle for xdp_buff is difference. If we pushed this logic into existing receive handle(merge and small), we would have to maintain code scattered inside merge and small (and big). So I think it is a good choice for us to put the xsk code into an independent function. Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com> --- drivers/net/virtio_net.c | 142 +++++++++++++++++++++++++++++++++++++-- 1 file changed, 138 insertions(+), 4 deletions(-)