Message ID | 20230522110849.2921-2-urezki@gmail.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | Mitigate a vmap lock contention | expand |
On Mon, May 22, 2023 at 01:08:41PM +0200, Uladzislau Rezki (Sony) wrote: > Currently __alloc_vmap_area() function contains an open codded > logic that finds and adjusts a VA based on allocation request. > > Introduce a va_alloc() helper that adjusts found VA only. It > will be used later at least in two places. > > There is no a functional change as a result of this patch. > > Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com> > --- > mm/vmalloc.c | 41 ++++++++++++++++++++++++++++------------- > 1 file changed, 28 insertions(+), 13 deletions(-) > > diff --git a/mm/vmalloc.c b/mm/vmalloc.c > index 31ff782d368b..409285b68a67 100644 > --- a/mm/vmalloc.c > +++ b/mm/vmalloc.c > @@ -1482,6 +1482,32 @@ adjust_va_to_fit_type(struct rb_root *root, struct list_head *head, > return 0; > } > > +static unsigned long > +va_alloc(struct vmap_area *va, > + struct rb_root *root, struct list_head *head, > + unsigned long size, unsigned long align, > + unsigned long vstart, unsigned long vend) Prototype continuations don't use a single tab indent. Either two tabs (my preference) or after the opening brace. I.e.: static unsigned long va_alloc(struct vmap_area *va, struct rb_root *root, struct list_head *head, unsigned long size, unsigned long align, unsigned long vstart, unsigned long vend) Otherwise looks good: Reviewed-by: Christoph Hellwig <hch@lst.de>
On Mon, May 22, 2023 at 11:05:57PM -0700, Christoph Hellwig wrote: > On Mon, May 22, 2023 at 01:08:41PM +0200, Uladzislau Rezki (Sony) wrote: > > Currently __alloc_vmap_area() function contains an open codded > > logic that finds and adjusts a VA based on allocation request. > > > > Introduce a va_alloc() helper that adjusts found VA only. It > > will be used later at least in two places. > > > > There is no a functional change as a result of this patch. > > > > Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com> > > --- > > mm/vmalloc.c | 41 ++++++++++++++++++++++++++++------------- > > 1 file changed, 28 insertions(+), 13 deletions(-) > > > > diff --git a/mm/vmalloc.c b/mm/vmalloc.c > > index 31ff782d368b..409285b68a67 100644 > > --- a/mm/vmalloc.c > > +++ b/mm/vmalloc.c > > @@ -1482,6 +1482,32 @@ adjust_va_to_fit_type(struct rb_root *root, struct list_head *head, > > return 0; > > } > > > > +static unsigned long > > +va_alloc(struct vmap_area *va, > > + struct rb_root *root, struct list_head *head, > > + unsigned long size, unsigned long align, > > + unsigned long vstart, unsigned long vend) > > Prototype continuations don't use a single tab indent. Either two > tabs (my preference) or after the opening brace. I.e.: > Will go with two tabs. > static unsigned long > va_alloc(struct vmap_area *va, struct rb_root *root, struct list_head *head, > unsigned long size, unsigned long align, unsigned long vstart, > unsigned long vend) > > > Otherwise looks good: > > Reviewed-by: Christoph Hellwig <hch@lst.de> > Added. Appreciate you look at it! -- Uladizislau Rezki
On Mon, May 22, 2023 at 01:08:41PM +0200, Uladzislau Rezki (Sony) wrote: > Currently __alloc_vmap_area() function contains an open codded > logic that finds and adjusts a VA based on allocation request. > > Introduce a va_alloc() helper that adjusts found VA only. It > will be used later at least in two places. > > There is no a functional change as a result of this patch. > > Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com> > --- > mm/vmalloc.c | 41 ++++++++++++++++++++++++++++------------- > 1 file changed, 28 insertions(+), 13 deletions(-) > > diff --git a/mm/vmalloc.c b/mm/vmalloc.c > index 31ff782d368b..409285b68a67 100644 > --- a/mm/vmalloc.c > +++ b/mm/vmalloc.c > @@ -1482,6 +1482,32 @@ adjust_va_to_fit_type(struct rb_root *root, struct list_head *head, > return 0; > } > > +static unsigned long > +va_alloc(struct vmap_area *va, > + struct rb_root *root, struct list_head *head, > + unsigned long size, unsigned long align, > + unsigned long vstart, unsigned long vend) > +{ > + unsigned long nva_start_addr; > + int ret; > + > + if (va->va_start > vstart) > + nva_start_addr = ALIGN(va->va_start, align); > + else > + nva_start_addr = ALIGN(vstart, align); > + > + /* Check the "vend" restriction. */ > + if (nva_start_addr + size > vend) > + return vend; > + > + /* Update the free vmap_area. */ > + ret = adjust_va_to_fit_type(root, head, va, nva_start_addr, size); > + if (WARN_ON_ONCE(ret)) > + return vend; > + > + return nva_start_addr; > +} > + > /* > * Returns a start address of the newly allocated area, if success. > * Otherwise a vend is returned that indicates failure. > @@ -1494,7 +1520,6 @@ __alloc_vmap_area(struct rb_root *root, struct list_head *head, > bool adjust_search_size = true; > unsigned long nva_start_addr; > struct vmap_area *va; > - int ret; > > /* > * Do not adjust when: > @@ -1512,18 +1537,8 @@ __alloc_vmap_area(struct rb_root *root, struct list_head *head, > if (unlikely(!va)) > return vend; > > - if (va->va_start > vstart) > - nva_start_addr = ALIGN(va->va_start, align); > - else > - nva_start_addr = ALIGN(vstart, align); > - > - /* Check the "vend" restriction. */ > - if (nva_start_addr + size > vend) > - return vend; > - > - /* Update the free vmap_area. */ > - ret = adjust_va_to_fit_type(root, head, va, nva_start_addr, size); > - if (WARN_ON_ONCE(ret)) > + nva_start_addr = va_alloc(va, root, head, size, align, vstart, vend); > + if (nva_start_addr == vend) > return vend; > > #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK > -- > 2.30.2 > LGTM, Reviewed-by: Lorenzo Stoakes <lstoakes@gmail.com>
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 31ff782d368b..409285b68a67 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1482,6 +1482,32 @@ adjust_va_to_fit_type(struct rb_root *root, struct list_head *head, return 0; } +static unsigned long +va_alloc(struct vmap_area *va, + struct rb_root *root, struct list_head *head, + unsigned long size, unsigned long align, + unsigned long vstart, unsigned long vend) +{ + unsigned long nva_start_addr; + int ret; + + if (va->va_start > vstart) + nva_start_addr = ALIGN(va->va_start, align); + else + nva_start_addr = ALIGN(vstart, align); + + /* Check the "vend" restriction. */ + if (nva_start_addr + size > vend) + return vend; + + /* Update the free vmap_area. */ + ret = adjust_va_to_fit_type(root, head, va, nva_start_addr, size); + if (WARN_ON_ONCE(ret)) + return vend; + + return nva_start_addr; +} + /* * Returns a start address of the newly allocated area, if success. * Otherwise a vend is returned that indicates failure. @@ -1494,7 +1520,6 @@ __alloc_vmap_area(struct rb_root *root, struct list_head *head, bool adjust_search_size = true; unsigned long nva_start_addr; struct vmap_area *va; - int ret; /* * Do not adjust when: @@ -1512,18 +1537,8 @@ __alloc_vmap_area(struct rb_root *root, struct list_head *head, if (unlikely(!va)) return vend; - if (va->va_start > vstart) - nva_start_addr = ALIGN(va->va_start, align); - else - nva_start_addr = ALIGN(vstart, align); - - /* Check the "vend" restriction. */ - if (nva_start_addr + size > vend) - return vend; - - /* Update the free vmap_area. */ - ret = adjust_va_to_fit_type(root, head, va, nva_start_addr, size); - if (WARN_ON_ONCE(ret)) + nva_start_addr = va_alloc(va, root, head, size, align, vstart, vend); + if (nva_start_addr == vend) return vend; #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
Currently __alloc_vmap_area() function contains an open codded logic that finds and adjusts a VA based on allocation request. Introduce a va_alloc() helper that adjusts found VA only. It will be used later at least in two places. There is no a functional change as a result of this patch. Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com> --- mm/vmalloc.c | 41 ++++++++++++++++++++++++++++------------- 1 file changed, 28 insertions(+), 13 deletions(-)