Message ID | 20230829081142.3619-2-urezki@gmail.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | Mitigate a vmap lock contention v2 | expand |
On 08/29/23 at 10:11am, Uladzislau Rezki (Sony) wrote: > Currently __alloc_vmap_area() function contains an open codded > logic that finds and adjusts a VA based on allocation request. > > Introduce a va_alloc() helper that adjusts found VA only. It > will be used later at least in two places. > > There is no a functional change as a result of this patch. > > Reviewed-by: Christoph Hellwig <hch@lst.de> > Reviewed-by: Lorenzo Stoakes <lstoakes@gmail.com> > Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com> > --- > mm/vmalloc.c | 41 ++++++++++++++++++++++++++++------------- > 1 file changed, 28 insertions(+), 13 deletions(-) > > diff --git a/mm/vmalloc.c b/mm/vmalloc.c > index 93cf99aba335..00afc1ee4756 100644 > --- a/mm/vmalloc.c > +++ b/mm/vmalloc.c > @@ -1481,6 +1481,32 @@ adjust_va_to_fit_type(struct rb_root *root, struct list_head *head, > return 0; > } > > +static unsigned long > +va_alloc(struct vmap_area *va, > + struct rb_root *root, struct list_head *head, > + unsigned long size, unsigned long align, > + unsigned long vstart, unsigned long vend) > +{ > + unsigned long nva_start_addr; > + int ret; > + > + if (va->va_start > vstart) > + nva_start_addr = ALIGN(va->va_start, align); > + else > + nva_start_addr = ALIGN(vstart, align); > + > + /* Check the "vend" restriction. */ > + if (nva_start_addr + size > vend) > + return vend; > + > + /* Update the free vmap_area. */ > + ret = adjust_va_to_fit_type(root, head, va, nva_start_addr, size); > + if (WARN_ON_ONCE(ret)) > + return vend; > + > + return nva_start_addr; > +} > + > /* > * Returns a start address of the newly allocated area, if success. > * Otherwise a vend is returned that indicates failure. > @@ -1493,7 +1519,6 @@ __alloc_vmap_area(struct rb_root *root, struct list_head *head, > bool adjust_search_size = true; > unsigned long nva_start_addr; > struct vmap_area *va; > - int ret; > > /* > * Do not adjust when: > @@ -1511,18 +1536,8 @@ __alloc_vmap_area(struct rb_root *root, struct list_head *head, > if (unlikely(!va)) > return vend; > > - if (va->va_start > vstart) > - nva_start_addr = ALIGN(va->va_start, align); > - else > - nva_start_addr = ALIGN(vstart, align); > - > - /* Check the "vend" restriction. */ > - if (nva_start_addr + size > vend) > - return vend; > - > - /* Update the free vmap_area. */ > - ret = adjust_va_to_fit_type(root, head, va, nva_start_addr, size); > - if (WARN_ON_ONCE(ret)) > + nva_start_addr = va_alloc(va, root, head, size, align, vstart, vend); > + if (nva_start_addr == vend) > return vend; > > #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK > -- > 2.30.2 Reviewed-by: Baoquan He <bhe@redhat.com>
On Wed, Sep 06, 2023 at 01:51:03PM +0800, Baoquan He wrote: > On 08/29/23 at 10:11am, Uladzislau Rezki (Sony) wrote: > > Currently __alloc_vmap_area() function contains an open codded > > logic that finds and adjusts a VA based on allocation request. > > > > Introduce a va_alloc() helper that adjusts found VA only. It > > will be used later at least in two places. > > > > There is no a functional change as a result of this patch. > > > > Reviewed-by: Christoph Hellwig <hch@lst.de> > > Reviewed-by: Lorenzo Stoakes <lstoakes@gmail.com> > > Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com> > > --- > > mm/vmalloc.c | 41 ++++++++++++++++++++++++++++------------- > > 1 file changed, 28 insertions(+), 13 deletions(-) > > > > diff --git a/mm/vmalloc.c b/mm/vmalloc.c > > index 93cf99aba335..00afc1ee4756 100644 > > --- a/mm/vmalloc.c > > +++ b/mm/vmalloc.c > > @@ -1481,6 +1481,32 @@ adjust_va_to_fit_type(struct rb_root *root, struct list_head *head, > > return 0; > > } > > > > +static unsigned long > > +va_alloc(struct vmap_area *va, > > + struct rb_root *root, struct list_head *head, > > + unsigned long size, unsigned long align, > > + unsigned long vstart, unsigned long vend) > > +{ > > + unsigned long nva_start_addr; > > + int ret; > > + > > + if (va->va_start > vstart) > > + nva_start_addr = ALIGN(va->va_start, align); > > + else > > + nva_start_addr = ALIGN(vstart, align); > > + > > + /* Check the "vend" restriction. */ > > + if (nva_start_addr + size > vend) > > + return vend; > > + > > + /* Update the free vmap_area. */ > > + ret = adjust_va_to_fit_type(root, head, va, nva_start_addr, size); > > + if (WARN_ON_ONCE(ret)) > > + return vend; > > + > > + return nva_start_addr; > > +} > > + > > /* > > * Returns a start address of the newly allocated area, if success. > > * Otherwise a vend is returned that indicates failure. > > @@ -1493,7 +1519,6 @@ __alloc_vmap_area(struct rb_root *root, struct list_head *head, > > bool adjust_search_size = true; > > unsigned long nva_start_addr; > > struct vmap_area *va; > > - int ret; > > > > /* > > * Do not adjust when: > > @@ -1511,18 +1536,8 @@ __alloc_vmap_area(struct rb_root *root, struct list_head *head, > > if (unlikely(!va)) > > return vend; > > > > - if (va->va_start > vstart) > > - nva_start_addr = ALIGN(va->va_start, align); > > - else > > - nva_start_addr = ALIGN(vstart, align); > > - > > - /* Check the "vend" restriction. */ > > - if (nva_start_addr + size > vend) > > - return vend; > > - > > - /* Update the free vmap_area. */ > > - ret = adjust_va_to_fit_type(root, head, va, nva_start_addr, size); > > - if (WARN_ON_ONCE(ret)) > > + nva_start_addr = va_alloc(va, root, head, size, align, vstart, vend); > > + if (nva_start_addr == vend) > > return vend; > > > > #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK > > -- > > 2.30.2 > > Reviewed-by: Baoquan He <bhe@redhat.com> > Thanks, i picked it for V3. -- Uladzislau Rezki
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 93cf99aba335..00afc1ee4756 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1481,6 +1481,32 @@ adjust_va_to_fit_type(struct rb_root *root, struct list_head *head, return 0; } +static unsigned long +va_alloc(struct vmap_area *va, + struct rb_root *root, struct list_head *head, + unsigned long size, unsigned long align, + unsigned long vstart, unsigned long vend) +{ + unsigned long nva_start_addr; + int ret; + + if (va->va_start > vstart) + nva_start_addr = ALIGN(va->va_start, align); + else + nva_start_addr = ALIGN(vstart, align); + + /* Check the "vend" restriction. */ + if (nva_start_addr + size > vend) + return vend; + + /* Update the free vmap_area. */ + ret = adjust_va_to_fit_type(root, head, va, nva_start_addr, size); + if (WARN_ON_ONCE(ret)) + return vend; + + return nva_start_addr; +} + /* * Returns a start address of the newly allocated area, if success. * Otherwise a vend is returned that indicates failure. @@ -1493,7 +1519,6 @@ __alloc_vmap_area(struct rb_root *root, struct list_head *head, bool adjust_search_size = true; unsigned long nva_start_addr; struct vmap_area *va; - int ret; /* * Do not adjust when: @@ -1511,18 +1536,8 @@ __alloc_vmap_area(struct rb_root *root, struct list_head *head, if (unlikely(!va)) return vend; - if (va->va_start > vstart) - nva_start_addr = ALIGN(va->va_start, align); - else - nva_start_addr = ALIGN(vstart, align); - - /* Check the "vend" restriction. */ - if (nva_start_addr + size > vend) - return vend; - - /* Update the free vmap_area. */ - ret = adjust_va_to_fit_type(root, head, va, nva_start_addr, size); - if (WARN_ON_ONCE(ret)) + nva_start_addr = va_alloc(va, root, head, size, align, vstart, vend); + if (nva_start_addr == vend) return vend; #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK