diff mbox series

[net-next,v2,02/13] virtio_ring: split: record extras for indirect buffers

Message ID 20241030082453.97310-3-xuanzhuo@linux.alibaba.com (mailing list archive)
State Superseded
Delegated to: Netdev Maintainers
Headers show
Series virtio-net: support AF_XDP zero copy (tx) | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 5 this patch: 5
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers success CCed 6 of 6 maintainers
netdev/build_clang success Errors and warnings before: 3 this patch: 3
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 4 this patch: 4
netdev/checkpatch warning WARNING: line length of 83 exceeds 80 columns WARNING: line length of 91 exceeds 80 columns
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 22 this patch: 22
netdev/source_inline success Was 0 now: 0

Commit Message

Xuan Zhuo Oct. 30, 2024, 8:24 a.m. UTC
The subsequent commit needs to know whether every indirect buffer is
premapped or not. So we need to introduce an extra struct for every
indirect buffer to record this info.

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
 drivers/virtio/virtio_ring.c | 112 ++++++++++++++++-------------------
 1 file changed, 52 insertions(+), 60 deletions(-)

Comments

Jason Wang Nov. 5, 2024, 3:42 a.m. UTC | #1
On Wed, Oct 30, 2024 at 4:25 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
>
> The subsequent commit needs to know whether every indirect buffer is
> premapped or not. So we need to introduce an extra struct for every
> indirect buffer to record this info.
>
> Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> ---
>  drivers/virtio/virtio_ring.c | 112 ++++++++++++++++-------------------
>  1 file changed, 52 insertions(+), 60 deletions(-)

Do we have a performance impact for this patch?

>
> diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
> index 97590c201aa2..dca093744fe1 100644
> --- a/drivers/virtio/virtio_ring.c
> +++ b/drivers/virtio/virtio_ring.c
> @@ -69,7 +69,11 @@
>
>  struct vring_desc_state_split {
>         void *data;                     /* Data for callback. */
> -       struct vring_desc *indir_desc;  /* Indirect descriptor, if any. */
> +
> +       /* Indirect extra table and desc table, if any. These two will be
> +        * allocated together. So we won't stress more to the memory allocator.
> +        */
> +       struct vring_desc *indir_desc;

So it looks like we put a descriptor table after the extra table. Can
this lead to more crossing page mappings for the indirect descriptors?

If yes, it seems expensive so we probably need to make the descriptor
table come first.

>  };
>
>  struct vring_desc_state_packed {
> @@ -440,38 +444,20 @@ static void virtqueue_init(struct vring_virtqueue *vq, u32 num)
>   * Split ring specific functions - *_split().
>   */
>
> -static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
> -                                          const struct vring_desc *desc)
> -{
> -       u16 flags;
> -
> -       if (!vring_need_unmap_buffer(vq))
> -               return;
> -
> -       flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
> -
> -       dma_unmap_page(vring_dma_dev(vq),
> -                      virtio64_to_cpu(vq->vq.vdev, desc->addr),
> -                      virtio32_to_cpu(vq->vq.vdev, desc->len),
> -                      (flags & VRING_DESC_F_WRITE) ?
> -                      DMA_FROM_DEVICE : DMA_TO_DEVICE);
> -}
> -
>  static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
> -                                         unsigned int i)
> +                                         struct vring_desc_extra *extra)
>  {
> -       struct vring_desc_extra *extra = vq->split.desc_extra;
>         u16 flags;
>
> -       flags = extra[i].flags;
> +       flags = extra->flags;
>
>         if (flags & VRING_DESC_F_INDIRECT) {
>                 if (!vq->use_dma_api)
>                         goto out;
>
>                 dma_unmap_single(vring_dma_dev(vq),
> -                                extra[i].addr,
> -                                extra[i].len,
> +                                extra->addr,
> +                                extra->len,
>                                  (flags & VRING_DESC_F_WRITE) ?
>                                  DMA_FROM_DEVICE : DMA_TO_DEVICE);
>         } else {
> @@ -479,22 +465,23 @@ static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
>                         goto out;
>
>                 dma_unmap_page(vring_dma_dev(vq),
> -                              extra[i].addr,
> -                              extra[i].len,
> +                              extra->addr,
> +                              extra->len,
>                                (flags & VRING_DESC_F_WRITE) ?
>                                DMA_FROM_DEVICE : DMA_TO_DEVICE);
>         }
>
>  out:
> -       return extra[i].next;
> +       return extra->next;
>  }
>
>  static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
>                                                unsigned int total_sg,
>                                                gfp_t gfp)
>  {
> +       struct vring_desc_extra *extra;
>         struct vring_desc *desc;
> -       unsigned int i;
> +       unsigned int i, size;
>
>         /*
>          * We require lowmem mappings for the descriptors because
> @@ -503,40 +490,41 @@ static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
>          */
>         gfp &= ~__GFP_HIGHMEM;
>
> -       desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp);
> +       size = sizeof(*desc) * total_sg + sizeof(*extra) * total_sg;
> +
> +       desc = kmalloc(size, gfp);
>         if (!desc)
>                 return NULL;
>
> +       extra = (struct vring_desc_extra *)&desc[total_sg];
> +
>         for (i = 0; i < total_sg; i++)
> -               desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
> +               extra[i].next = i + 1;
> +
>         return desc;
>  }
>
>  static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq,
>                                                     struct vring_desc *desc,
> +                                                   struct vring_desc_extra *extra,
>                                                     unsigned int i,
>                                                     dma_addr_t addr,
>                                                     unsigned int len,
> -                                                   u16 flags,
> -                                                   bool indirect)
> +                                                   u16 flags)
>  {
> -       struct vring_virtqueue *vring = to_vvq(vq);
> -       struct vring_desc_extra *extra = vring->split.desc_extra;
>         u16 next;
>
>         desc[i].flags = cpu_to_virtio16(vq->vdev, flags);
>         desc[i].addr = cpu_to_virtio64(vq->vdev, addr);
>         desc[i].len = cpu_to_virtio32(vq->vdev, len);
>
> -       if (!indirect) {
> -               next = extra[i].next;
> -               desc[i].next = cpu_to_virtio16(vq->vdev, next);
> +       extra[i].addr = addr;
> +       extra[i].len = len;
> +       extra[i].flags = flags;
> +
> +       next = extra[i].next;
>
> -               extra[i].addr = addr;
> -               extra[i].len = len;
> -               extra[i].flags = flags;
> -       } else
> -               next = virtio16_to_cpu(vq->vdev, desc[i].next);
> +       desc[i].next = cpu_to_virtio16(vq->vdev, next);
>
>         return next;
>  }
> @@ -551,6 +539,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
>                                       gfp_t gfp)
>  {
>         struct vring_virtqueue *vq = to_vvq(_vq);
> +       struct vring_desc_extra *extra;
>         struct scatterlist *sg;
>         struct vring_desc *desc;
>         unsigned int i, n, avail, descs_used, prev, err_idx;
> @@ -586,9 +575,11 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
>                 /* Set up rest to use this indirect table. */
>                 i = 0;
>                 descs_used = 1;
> +               extra = (struct vring_desc_extra *)&desc[total_sg];
>         } else {
>                 indirect = false;
>                 desc = vq->split.vring.desc;
> +               extra = vq->split.desc_extra;
>                 i = head;
>                 descs_used = total_sg;
>         }
> @@ -618,9 +609,8 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
>                         /* Note that we trust indirect descriptor
>                          * table since it use stream DMA mapping.
>                          */
> -                       i = virtqueue_add_desc_split(_vq, desc, i, addr, sg->length,
> -                                                    VRING_DESC_F_NEXT,
> -                                                    indirect);
> +                       i = virtqueue_add_desc_split(_vq, desc, extra, i, addr, sg->length,
> +                                                    VRING_DESC_F_NEXT);
>                 }
>         }
>         for (; n < (out_sgs + in_sgs); n++) {
> @@ -634,11 +624,10 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
>                         /* Note that we trust indirect descriptor
>                          * table since it use stream DMA mapping.
>                          */
> -                       i = virtqueue_add_desc_split(_vq, desc, i, addr,
> +                       i = virtqueue_add_desc_split(_vq, desc, extra, i, addr,
>                                                      sg->length,
>                                                      VRING_DESC_F_NEXT |
> -                                                    VRING_DESC_F_WRITE,
> -                                                    indirect);
> +                                                    VRING_DESC_F_WRITE);
>                 }
>         }
>         /* Last one doesn't continue. */
> @@ -660,10 +649,10 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
>                 }
>
>                 virtqueue_add_desc_split(_vq, vq->split.vring.desc,
> +                                        vq->split.desc_extra,
>                                          head, addr,
>                                          total_sg * sizeof(struct vring_desc),
> -                                        VRING_DESC_F_INDIRECT,
> -                                        false);
> +                                        VRING_DESC_F_INDIRECT);
>         }
>
>         /* We're using some buffers from the free list. */
> @@ -716,11 +705,8 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
>         for (n = 0; n < total_sg; n++) {
>                 if (i == err_idx)
>                         break;
> -               if (indirect) {
> -                       vring_unmap_one_split_indirect(vq, &desc[i]);
> -                       i = virtio16_to_cpu(_vq->vdev, desc[i].next);
> -               } else
> -                       i = vring_unmap_one_split(vq, i);
> +
> +               i = vring_unmap_one_split(vq, &extra[i]);
>         }
>
>  free_indirect:
> @@ -765,22 +751,25 @@ static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
>  static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
>                              void **ctx)
>  {
> +       struct vring_desc_extra *extra;
>         unsigned int i, j;
>         __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
>
>         /* Clear data ptr. */
>         vq->split.desc_state[head].data = NULL;
>
> +       extra = vq->split.desc_extra;
> +
>         /* Put back on free list: unmap first-level descriptors and find end */
>         i = head;
>
>         while (vq->split.vring.desc[i].flags & nextflag) {
> -               vring_unmap_one_split(vq, i);
> +               vring_unmap_one_split(vq, &extra[i]);

Not sure if I've asked this before. But this part seems to deserve an
independent fix for -stable.

>                 i = vq->split.desc_extra[i].next;
>                 vq->vq.num_free++;
>         }
>
> -       vring_unmap_one_split(vq, i);
> +       vring_unmap_one_split(vq, &extra[i]);
>         vq->split.desc_extra[i].next = vq->free_head;
>         vq->free_head = head;
>
> @@ -790,21 +779,24 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
>         if (vq->indirect) {
>                 struct vring_desc *indir_desc =
>                                 vq->split.desc_state[head].indir_desc;
> -               u32 len;
> +               u32 len, num;
>
>                 /* Free the indirect table, if any, now that it's unmapped. */
>                 if (!indir_desc)
>                         return;
> -
>                 len = vq->split.desc_extra[head].len;
>
>                 BUG_ON(!(vq->split.desc_extra[head].flags &
>                                 VRING_DESC_F_INDIRECT));
>                 BUG_ON(len == 0 || len % sizeof(struct vring_desc));
>
> +               num = len / sizeof(struct vring_desc);
> +
> +               extra = (struct vring_desc_extra *)&indir_desc[num];
> +
>                 if (vring_need_unmap_buffer(vq)) {
> -                       for (j = 0; j < len / sizeof(struct vring_desc); j++)
> -                               vring_unmap_one_split_indirect(vq, &indir_desc[j]);
> +                       for (j = 0; j < num; j++)
> +                               vring_unmap_one_split(vq, &extra[j]);
>                 }
>
>                 kfree(indir_desc);
> --
> 2.32.0.3.g01195cf9f
>

Thanks
Xuan Zhuo Nov. 5, 2024, 6:51 a.m. UTC | #2
On Tue, 5 Nov 2024 11:42:09 +0800, Jason Wang <jasowang@redhat.com> wrote:
> On Wed, Oct 30, 2024 at 4:25 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> >
> > The subsequent commit needs to know whether every indirect buffer is
> > premapped or not. So we need to introduce an extra struct for every
> > indirect buffer to record this info.
> >
> > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > ---
> >  drivers/virtio/virtio_ring.c | 112 ++++++++++++++++-------------------
> >  1 file changed, 52 insertions(+), 60 deletions(-)
>
> Do we have a performance impact for this patch?
>
> >
> > diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
> > index 97590c201aa2..dca093744fe1 100644
> > --- a/drivers/virtio/virtio_ring.c
> > +++ b/drivers/virtio/virtio_ring.c
> > @@ -69,7 +69,11 @@
> >
> >  struct vring_desc_state_split {
> >         void *data;                     /* Data for callback. */
> > -       struct vring_desc *indir_desc;  /* Indirect descriptor, if any. */
> > +
> > +       /* Indirect extra table and desc table, if any. These two will be
> > +        * allocated together. So we won't stress more to the memory allocator.
> > +        */
> > +       struct vring_desc *indir_desc;
>
> So it looks like we put a descriptor table after the extra table. Can
> this lead to more crossing page mappings for the indirect descriptors?
>
> If yes, it seems expensive so we probably need to make the descriptor
> table come first.

No, the descriptors are before extra table.
So, there is not performance impact.


>
> >  };
> >
> >  struct vring_desc_state_packed {
> > @@ -440,38 +444,20 @@ static void virtqueue_init(struct vring_virtqueue *vq, u32 num)
> >   * Split ring specific functions - *_split().
> >   */
> >
> > -static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
> > -                                          const struct vring_desc *desc)
> > -{
> > -       u16 flags;
> > -
> > -       if (!vring_need_unmap_buffer(vq))
> > -               return;
> > -
> > -       flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
> > -
> > -       dma_unmap_page(vring_dma_dev(vq),
> > -                      virtio64_to_cpu(vq->vq.vdev, desc->addr),
> > -                      virtio32_to_cpu(vq->vq.vdev, desc->len),
> > -                      (flags & VRING_DESC_F_WRITE) ?
> > -                      DMA_FROM_DEVICE : DMA_TO_DEVICE);
> > -}
> > -
> >  static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
> > -                                         unsigned int i)
> > +                                         struct vring_desc_extra *extra)
> >  {
> > -       struct vring_desc_extra *extra = vq->split.desc_extra;
> >         u16 flags;
> >
> > -       flags = extra[i].flags;
> > +       flags = extra->flags;
> >
> >         if (flags & VRING_DESC_F_INDIRECT) {
> >                 if (!vq->use_dma_api)
> >                         goto out;
> >
> >                 dma_unmap_single(vring_dma_dev(vq),
> > -                                extra[i].addr,
> > -                                extra[i].len,
> > +                                extra->addr,
> > +                                extra->len,
> >                                  (flags & VRING_DESC_F_WRITE) ?
> >                                  DMA_FROM_DEVICE : DMA_TO_DEVICE);
> >         } else {
> > @@ -479,22 +465,23 @@ static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
> >                         goto out;
> >
> >                 dma_unmap_page(vring_dma_dev(vq),
> > -                              extra[i].addr,
> > -                              extra[i].len,
> > +                              extra->addr,
> > +                              extra->len,
> >                                (flags & VRING_DESC_F_WRITE) ?
> >                                DMA_FROM_DEVICE : DMA_TO_DEVICE);
> >         }
> >
> >  out:
> > -       return extra[i].next;
> > +       return extra->next;
> >  }
> >
> >  static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
> >                                                unsigned int total_sg,
> >                                                gfp_t gfp)
> >  {
> > +       struct vring_desc_extra *extra;
> >         struct vring_desc *desc;
> > -       unsigned int i;
> > +       unsigned int i, size;
> >
> >         /*
> >          * We require lowmem mappings for the descriptors because
> > @@ -503,40 +490,41 @@ static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
> >          */
> >         gfp &= ~__GFP_HIGHMEM;
> >
> > -       desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp);
> > +       size = sizeof(*desc) * total_sg + sizeof(*extra) * total_sg;
> > +
> > +       desc = kmalloc(size, gfp);
> >         if (!desc)
> >                 return NULL;
> >
> > +       extra = (struct vring_desc_extra *)&desc[total_sg];
> > +
> >         for (i = 0; i < total_sg; i++)
> > -               desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
> > +               extra[i].next = i + 1;
> > +
> >         return desc;
> >  }
> >
> >  static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq,
> >                                                     struct vring_desc *desc,
> > +                                                   struct vring_desc_extra *extra,
> >                                                     unsigned int i,
> >                                                     dma_addr_t addr,
> >                                                     unsigned int len,
> > -                                                   u16 flags,
> > -                                                   bool indirect)
> > +                                                   u16 flags)
> >  {
> > -       struct vring_virtqueue *vring = to_vvq(vq);
> > -       struct vring_desc_extra *extra = vring->split.desc_extra;
> >         u16 next;
> >
> >         desc[i].flags = cpu_to_virtio16(vq->vdev, flags);
> >         desc[i].addr = cpu_to_virtio64(vq->vdev, addr);
> >         desc[i].len = cpu_to_virtio32(vq->vdev, len);
> >
> > -       if (!indirect) {
> > -               next = extra[i].next;
> > -               desc[i].next = cpu_to_virtio16(vq->vdev, next);
> > +       extra[i].addr = addr;
> > +       extra[i].len = len;
> > +       extra[i].flags = flags;
> > +
> > +       next = extra[i].next;
> >
> > -               extra[i].addr = addr;
> > -               extra[i].len = len;
> > -               extra[i].flags = flags;
> > -       } else
> > -               next = virtio16_to_cpu(vq->vdev, desc[i].next);
> > +       desc[i].next = cpu_to_virtio16(vq->vdev, next);
> >
> >         return next;
> >  }
> > @@ -551,6 +539,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
> >                                       gfp_t gfp)
> >  {
> >         struct vring_virtqueue *vq = to_vvq(_vq);
> > +       struct vring_desc_extra *extra;
> >         struct scatterlist *sg;
> >         struct vring_desc *desc;
> >         unsigned int i, n, avail, descs_used, prev, err_idx;
> > @@ -586,9 +575,11 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
> >                 /* Set up rest to use this indirect table. */
> >                 i = 0;
> >                 descs_used = 1;
> > +               extra = (struct vring_desc_extra *)&desc[total_sg];
> >         } else {
> >                 indirect = false;
> >                 desc = vq->split.vring.desc;
> > +               extra = vq->split.desc_extra;
> >                 i = head;
> >                 descs_used = total_sg;
> >         }
> > @@ -618,9 +609,8 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
> >                         /* Note that we trust indirect descriptor
> >                          * table since it use stream DMA mapping.
> >                          */
> > -                       i = virtqueue_add_desc_split(_vq, desc, i, addr, sg->length,
> > -                                                    VRING_DESC_F_NEXT,
> > -                                                    indirect);
> > +                       i = virtqueue_add_desc_split(_vq, desc, extra, i, addr, sg->length,
> > +                                                    VRING_DESC_F_NEXT);
> >                 }
> >         }
> >         for (; n < (out_sgs + in_sgs); n++) {
> > @@ -634,11 +624,10 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
> >                         /* Note that we trust indirect descriptor
> >                          * table since it use stream DMA mapping.
> >                          */
> > -                       i = virtqueue_add_desc_split(_vq, desc, i, addr,
> > +                       i = virtqueue_add_desc_split(_vq, desc, extra, i, addr,
> >                                                      sg->length,
> >                                                      VRING_DESC_F_NEXT |
> > -                                                    VRING_DESC_F_WRITE,
> > -                                                    indirect);
> > +                                                    VRING_DESC_F_WRITE);
> >                 }
> >         }
> >         /* Last one doesn't continue. */
> > @@ -660,10 +649,10 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
> >                 }
> >
> >                 virtqueue_add_desc_split(_vq, vq->split.vring.desc,
> > +                                        vq->split.desc_extra,
> >                                          head, addr,
> >                                          total_sg * sizeof(struct vring_desc),
> > -                                        VRING_DESC_F_INDIRECT,
> > -                                        false);
> > +                                        VRING_DESC_F_INDIRECT);
> >         }
> >
> >         /* We're using some buffers from the free list. */
> > @@ -716,11 +705,8 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
> >         for (n = 0; n < total_sg; n++) {
> >                 if (i == err_idx)
> >                         break;
> > -               if (indirect) {
> > -                       vring_unmap_one_split_indirect(vq, &desc[i]);
> > -                       i = virtio16_to_cpu(_vq->vdev, desc[i].next);
> > -               } else
> > -                       i = vring_unmap_one_split(vq, i);
> > +
> > +               i = vring_unmap_one_split(vq, &extra[i]);
> >         }
> >
> >  free_indirect:
> > @@ -765,22 +751,25 @@ static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
> >  static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
> >                              void **ctx)
> >  {
> > +       struct vring_desc_extra *extra;
> >         unsigned int i, j;
> >         __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
> >
> >         /* Clear data ptr. */
> >         vq->split.desc_state[head].data = NULL;
> >
> > +       extra = vq->split.desc_extra;
> > +
> >         /* Put back on free list: unmap first-level descriptors and find end */
> >         i = head;
> >
> >         while (vq->split.vring.desc[i].flags & nextflag) {
> > -               vring_unmap_one_split(vq, i);
> > +               vring_unmap_one_split(vq, &extra[i]);
>
> Not sure if I've asked this before. But this part seems to deserve an
> independent fix for -stable.

What fix?

Thanks.

>
> >                 i = vq->split.desc_extra[i].next;
> >                 vq->vq.num_free++;
> >         }
> >
> > -       vring_unmap_one_split(vq, i);
> > +       vring_unmap_one_split(vq, &extra[i]);
> >         vq->split.desc_extra[i].next = vq->free_head;
> >         vq->free_head = head;
> >
> > @@ -790,21 +779,24 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
> >         if (vq->indirect) {
> >                 struct vring_desc *indir_desc =
> >                                 vq->split.desc_state[head].indir_desc;
> > -               u32 len;
> > +               u32 len, num;
> >
> >                 /* Free the indirect table, if any, now that it's unmapped. */
> >                 if (!indir_desc)
> >                         return;
> > -
> >                 len = vq->split.desc_extra[head].len;
> >
> >                 BUG_ON(!(vq->split.desc_extra[head].flags &
> >                                 VRING_DESC_F_INDIRECT));
> >                 BUG_ON(len == 0 || len % sizeof(struct vring_desc));
> >
> > +               num = len / sizeof(struct vring_desc);
> > +
> > +               extra = (struct vring_desc_extra *)&indir_desc[num];
> > +
> >                 if (vring_need_unmap_buffer(vq)) {
> > -                       for (j = 0; j < len / sizeof(struct vring_desc); j++)
> > -                               vring_unmap_one_split_indirect(vq, &indir_desc[j]);
> > +                       for (j = 0; j < num; j++)
> > +                               vring_unmap_one_split(vq, &extra[j]);
> >                 }
> >
> >                 kfree(indir_desc);
> > --
> > 2.32.0.3.g01195cf9f
> >
>
> Thanks
>
Jason Wang Nov. 6, 2024, 1:44 a.m. UTC | #3
On Tue, Nov 5, 2024 at 2:53 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
>
> On Tue, 5 Nov 2024 11:42:09 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > On Wed, Oct 30, 2024 at 4:25 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > >
> > > The subsequent commit needs to know whether every indirect buffer is
> > > premapped or not. So we need to introduce an extra struct for every
> > > indirect buffer to record this info.
> > >
> > > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > > ---
> > >  drivers/virtio/virtio_ring.c | 112 ++++++++++++++++-------------------
> > >  1 file changed, 52 insertions(+), 60 deletions(-)
> >
> > Do we have a performance impact for this patch?
> >
> > >
> > > diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
> > > index 97590c201aa2..dca093744fe1 100644
> > > --- a/drivers/virtio/virtio_ring.c
> > > +++ b/drivers/virtio/virtio_ring.c
> > > @@ -69,7 +69,11 @@
> > >
> > >  struct vring_desc_state_split {
> > >         void *data;                     /* Data for callback. */
> > > -       struct vring_desc *indir_desc;  /* Indirect descriptor, if any. */
> > > +
> > > +       /* Indirect extra table and desc table, if any. These two will be
> > > +        * allocated together. So we won't stress more to the memory allocator.
> > > +        */
> > > +       struct vring_desc *indir_desc;
> >
> > So it looks like we put a descriptor table after the extra table. Can
> > this lead to more crossing page mappings for the indirect descriptors?
> >
> > If yes, it seems expensive so we probably need to make the descriptor
> > table come first.
>
> No, the descriptors are before extra table.

Well, you need then tweak the above comment, it said

"Indirect extra table and desc table".

> So, there is not performance impact.
>
>
> >
> > >  };
> > >

[...]

> > >         while (vq->split.vring.desc[i].flags & nextflag) {
> > > -               vring_unmap_one_split(vq, i);
> > > +               vring_unmap_one_split(vq, &extra[i]);
> >
> > Not sure if I've asked this before. But this part seems to deserve an
> > independent fix for -stable.
>
> What fix?

I meant for hardening we need to check the flags stored in the extra
instead of the descriptor itself as it could be mangled by the device.

Thanks
Xuan Zhuo Nov. 6, 2024, 5:58 a.m. UTC | #4
On Wed, 6 Nov 2024 09:44:39 +0800, Jason Wang <jasowang@redhat.com> wrote:
> On Tue, Nov 5, 2024 at 2:53 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> >
> > On Tue, 5 Nov 2024 11:42:09 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > > On Wed, Oct 30, 2024 at 4:25 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > > >
> > > > The subsequent commit needs to know whether every indirect buffer is
> > > > premapped or not. So we need to introduce an extra struct for every
> > > > indirect buffer to record this info.
> > > >
> > > > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > > > ---
> > > >  drivers/virtio/virtio_ring.c | 112 ++++++++++++++++-------------------
> > > >  1 file changed, 52 insertions(+), 60 deletions(-)
> > >
> > > Do we have a performance impact for this patch?
> > >
> > > >
> > > > diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
> > > > index 97590c201aa2..dca093744fe1 100644
> > > > --- a/drivers/virtio/virtio_ring.c
> > > > +++ b/drivers/virtio/virtio_ring.c
> > > > @@ -69,7 +69,11 @@
> > > >
> > > >  struct vring_desc_state_split {
> > > >         void *data;                     /* Data for callback. */
> > > > -       struct vring_desc *indir_desc;  /* Indirect descriptor, if any. */
> > > > +
> > > > +       /* Indirect extra table and desc table, if any. These two will be
> > > > +        * allocated together. So we won't stress more to the memory allocator.
> > > > +        */
> > > > +       struct vring_desc *indir_desc;
> > >
> > > So it looks like we put a descriptor table after the extra table. Can
> > > this lead to more crossing page mappings for the indirect descriptors?
> > >
> > > If yes, it seems expensive so we probably need to make the descriptor
> > > table come first.
> >
> > No, the descriptors are before extra table.
>
> Well, you need then tweak the above comment, it said
>
> "Indirect extra table and desc table".
>
> > So, there is not performance impact.
> >
> >
> > >
> > > >  };
> > > >
>
> [...]
>
> > > >         while (vq->split.vring.desc[i].flags & nextflag) {
> > > > -               vring_unmap_one_split(vq, i);
> > > > +               vring_unmap_one_split(vq, &extra[i]);
> > >
> > > Not sure if I've asked this before. But this part seems to deserve an
> > > independent fix for -stable.
> >
> > What fix?
>
> I meant for hardening we need to check the flags stored in the extra
> instead of the descriptor itself as it could be mangled by the device.

I see, we can do it in future.

Thanks.


>
> Thanks
>
Michael S. Tsirkin Nov. 6, 2024, 7:42 a.m. UTC | #5
On Wed, Nov 06, 2024 at 09:44:39AM +0800, Jason Wang wrote:
> > > >         while (vq->split.vring.desc[i].flags & nextflag) {
> > > > -               vring_unmap_one_split(vq, i);
> > > > +               vring_unmap_one_split(vq, &extra[i]);
> > >
> > > Not sure if I've asked this before. But this part seems to deserve an
> > > independent fix for -stable.
> >
> > What fix?
> 
> I meant for hardening we need to check the flags stored in the extra
> instead of the descriptor itself as it could be mangled by the device.
> 
> Thanks

Good point. Jason, want to cook up a patch?
Jason Wang Nov. 11, 2024, 1:27 a.m. UTC | #6
On Wed, Nov 6, 2024 at 3:42 PM Michael S. Tsirkin <mst@redhat.com> wrote:
>
> On Wed, Nov 06, 2024 at 09:44:39AM +0800, Jason Wang wrote:
> > > > >         while (vq->split.vring.desc[i].flags & nextflag) {
> > > > > -               vring_unmap_one_split(vq, i);
> > > > > +               vring_unmap_one_split(vq, &extra[i]);
> > > >
> > > > Not sure if I've asked this before. But this part seems to deserve an
> > > > independent fix for -stable.
> > >
> > > What fix?
> >
> > I meant for hardening we need to check the flags stored in the extra
> > instead of the descriptor itself as it could be mangled by the device.
> >
> > Thanks
>
> Good point. Jason, want to cook up a patch?

Will do.

Thanks

>
> --
> MST
>
diff mbox series

Patch

diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 97590c201aa2..dca093744fe1 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -69,7 +69,11 @@ 
 
 struct vring_desc_state_split {
 	void *data;			/* Data for callback. */
-	struct vring_desc *indir_desc;	/* Indirect descriptor, if any. */
+
+	/* Indirect extra table and desc table, if any. These two will be
+	 * allocated together. So we won't stress more to the memory allocator.
+	 */
+	struct vring_desc *indir_desc;
 };
 
 struct vring_desc_state_packed {
@@ -440,38 +444,20 @@  static void virtqueue_init(struct vring_virtqueue *vq, u32 num)
  * Split ring specific functions - *_split().
  */
 
-static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
-					   const struct vring_desc *desc)
-{
-	u16 flags;
-
-	if (!vring_need_unmap_buffer(vq))
-		return;
-
-	flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
-
-	dma_unmap_page(vring_dma_dev(vq),
-		       virtio64_to_cpu(vq->vq.vdev, desc->addr),
-		       virtio32_to_cpu(vq->vq.vdev, desc->len),
-		       (flags & VRING_DESC_F_WRITE) ?
-		       DMA_FROM_DEVICE : DMA_TO_DEVICE);
-}
-
 static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
-					  unsigned int i)
+					  struct vring_desc_extra *extra)
 {
-	struct vring_desc_extra *extra = vq->split.desc_extra;
 	u16 flags;
 
-	flags = extra[i].flags;
+	flags = extra->flags;
 
 	if (flags & VRING_DESC_F_INDIRECT) {
 		if (!vq->use_dma_api)
 			goto out;
 
 		dma_unmap_single(vring_dma_dev(vq),
-				 extra[i].addr,
-				 extra[i].len,
+				 extra->addr,
+				 extra->len,
 				 (flags & VRING_DESC_F_WRITE) ?
 				 DMA_FROM_DEVICE : DMA_TO_DEVICE);
 	} else {
@@ -479,22 +465,23 @@  static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
 			goto out;
 
 		dma_unmap_page(vring_dma_dev(vq),
-			       extra[i].addr,
-			       extra[i].len,
+			       extra->addr,
+			       extra->len,
 			       (flags & VRING_DESC_F_WRITE) ?
 			       DMA_FROM_DEVICE : DMA_TO_DEVICE);
 	}
 
 out:
-	return extra[i].next;
+	return extra->next;
 }
 
 static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
 					       unsigned int total_sg,
 					       gfp_t gfp)
 {
+	struct vring_desc_extra *extra;
 	struct vring_desc *desc;
-	unsigned int i;
+	unsigned int i, size;
 
 	/*
 	 * We require lowmem mappings for the descriptors because
@@ -503,40 +490,41 @@  static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
 	 */
 	gfp &= ~__GFP_HIGHMEM;
 
-	desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp);
+	size = sizeof(*desc) * total_sg + sizeof(*extra) * total_sg;
+
+	desc = kmalloc(size, gfp);
 	if (!desc)
 		return NULL;
 
+	extra = (struct vring_desc_extra *)&desc[total_sg];
+
 	for (i = 0; i < total_sg; i++)
-		desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
+		extra[i].next = i + 1;
+
 	return desc;
 }
 
 static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq,
 						    struct vring_desc *desc,
+						    struct vring_desc_extra *extra,
 						    unsigned int i,
 						    dma_addr_t addr,
 						    unsigned int len,
-						    u16 flags,
-						    bool indirect)
+						    u16 flags)
 {
-	struct vring_virtqueue *vring = to_vvq(vq);
-	struct vring_desc_extra *extra = vring->split.desc_extra;
 	u16 next;
 
 	desc[i].flags = cpu_to_virtio16(vq->vdev, flags);
 	desc[i].addr = cpu_to_virtio64(vq->vdev, addr);
 	desc[i].len = cpu_to_virtio32(vq->vdev, len);
 
-	if (!indirect) {
-		next = extra[i].next;
-		desc[i].next = cpu_to_virtio16(vq->vdev, next);
+	extra[i].addr = addr;
+	extra[i].len = len;
+	extra[i].flags = flags;
+
+	next = extra[i].next;
 
-		extra[i].addr = addr;
-		extra[i].len = len;
-		extra[i].flags = flags;
-	} else
-		next = virtio16_to_cpu(vq->vdev, desc[i].next);
+	desc[i].next = cpu_to_virtio16(vq->vdev, next);
 
 	return next;
 }
@@ -551,6 +539,7 @@  static inline int virtqueue_add_split(struct virtqueue *_vq,
 				      gfp_t gfp)
 {
 	struct vring_virtqueue *vq = to_vvq(_vq);
+	struct vring_desc_extra *extra;
 	struct scatterlist *sg;
 	struct vring_desc *desc;
 	unsigned int i, n, avail, descs_used, prev, err_idx;
@@ -586,9 +575,11 @@  static inline int virtqueue_add_split(struct virtqueue *_vq,
 		/* Set up rest to use this indirect table. */
 		i = 0;
 		descs_used = 1;
+		extra = (struct vring_desc_extra *)&desc[total_sg];
 	} else {
 		indirect = false;
 		desc = vq->split.vring.desc;
+		extra = vq->split.desc_extra;
 		i = head;
 		descs_used = total_sg;
 	}
@@ -618,9 +609,8 @@  static inline int virtqueue_add_split(struct virtqueue *_vq,
 			/* Note that we trust indirect descriptor
 			 * table since it use stream DMA mapping.
 			 */
-			i = virtqueue_add_desc_split(_vq, desc, i, addr, sg->length,
-						     VRING_DESC_F_NEXT,
-						     indirect);
+			i = virtqueue_add_desc_split(_vq, desc, extra, i, addr, sg->length,
+						     VRING_DESC_F_NEXT);
 		}
 	}
 	for (; n < (out_sgs + in_sgs); n++) {
@@ -634,11 +624,10 @@  static inline int virtqueue_add_split(struct virtqueue *_vq,
 			/* Note that we trust indirect descriptor
 			 * table since it use stream DMA mapping.
 			 */
-			i = virtqueue_add_desc_split(_vq, desc, i, addr,
+			i = virtqueue_add_desc_split(_vq, desc, extra, i, addr,
 						     sg->length,
 						     VRING_DESC_F_NEXT |
-						     VRING_DESC_F_WRITE,
-						     indirect);
+						     VRING_DESC_F_WRITE);
 		}
 	}
 	/* Last one doesn't continue. */
@@ -660,10 +649,10 @@  static inline int virtqueue_add_split(struct virtqueue *_vq,
 		}
 
 		virtqueue_add_desc_split(_vq, vq->split.vring.desc,
+					 vq->split.desc_extra,
 					 head, addr,
 					 total_sg * sizeof(struct vring_desc),
-					 VRING_DESC_F_INDIRECT,
-					 false);
+					 VRING_DESC_F_INDIRECT);
 	}
 
 	/* We're using some buffers from the free list. */
@@ -716,11 +705,8 @@  static inline int virtqueue_add_split(struct virtqueue *_vq,
 	for (n = 0; n < total_sg; n++) {
 		if (i == err_idx)
 			break;
-		if (indirect) {
-			vring_unmap_one_split_indirect(vq, &desc[i]);
-			i = virtio16_to_cpu(_vq->vdev, desc[i].next);
-		} else
-			i = vring_unmap_one_split(vq, i);
+
+		i = vring_unmap_one_split(vq, &extra[i]);
 	}
 
 free_indirect:
@@ -765,22 +751,25 @@  static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
 static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
 			     void **ctx)
 {
+	struct vring_desc_extra *extra;
 	unsigned int i, j;
 	__virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
 
 	/* Clear data ptr. */
 	vq->split.desc_state[head].data = NULL;
 
+	extra = vq->split.desc_extra;
+
 	/* Put back on free list: unmap first-level descriptors and find end */
 	i = head;
 
 	while (vq->split.vring.desc[i].flags & nextflag) {
-		vring_unmap_one_split(vq, i);
+		vring_unmap_one_split(vq, &extra[i]);
 		i = vq->split.desc_extra[i].next;
 		vq->vq.num_free++;
 	}
 
-	vring_unmap_one_split(vq, i);
+	vring_unmap_one_split(vq, &extra[i]);
 	vq->split.desc_extra[i].next = vq->free_head;
 	vq->free_head = head;
 
@@ -790,21 +779,24 @@  static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
 	if (vq->indirect) {
 		struct vring_desc *indir_desc =
 				vq->split.desc_state[head].indir_desc;
-		u32 len;
+		u32 len, num;
 
 		/* Free the indirect table, if any, now that it's unmapped. */
 		if (!indir_desc)
 			return;
-
 		len = vq->split.desc_extra[head].len;
 
 		BUG_ON(!(vq->split.desc_extra[head].flags &
 				VRING_DESC_F_INDIRECT));
 		BUG_ON(len == 0 || len % sizeof(struct vring_desc));
 
+		num = len / sizeof(struct vring_desc);
+
+		extra = (struct vring_desc_extra *)&indir_desc[num];
+
 		if (vring_need_unmap_buffer(vq)) {
-			for (j = 0; j < len / sizeof(struct vring_desc); j++)
-				vring_unmap_one_split_indirect(vq, &indir_desc[j]);
+			for (j = 0; j < num; j++)
+				vring_unmap_one_split(vq, &extra[j]);
 		}
 
 		kfree(indir_desc);