Message ID | 20230829081142.3619-3-urezki@gmail.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | Mitigate a vmap lock contention v2 | expand |
On 08/29/23 at 10:11am, Uladzislau Rezki (Sony) wrote: > This patch renames the adjust_va_to_fit_type() function > to va_clip() which is shorter and more expressive. > > There is no a functional change as a result of this patch. > > Reviewed-by: Christoph Hellwig <hch@lst.de> > Reviewed-by: Lorenzo Stoakes <lstoakes@gmail.com> > Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com> > --- > mm/vmalloc.c | 13 ++++++------- > 1 file changed, 6 insertions(+), 7 deletions(-) > > diff --git a/mm/vmalloc.c b/mm/vmalloc.c > index 00afc1ee4756..09e315f8ea34 100644 > --- a/mm/vmalloc.c > +++ b/mm/vmalloc.c > @@ -1382,9 +1382,9 @@ classify_va_fit_type(struct vmap_area *va, > } > > static __always_inline int > -adjust_va_to_fit_type(struct rb_root *root, struct list_head *head, > - struct vmap_area *va, unsigned long nva_start_addr, > - unsigned long size) > +va_clip(struct rb_root *root, struct list_head *head, > + struct vmap_area *va, unsigned long nva_start_addr, > + unsigned long size) > { > struct vmap_area *lva = NULL; > enum fit_type type = classify_va_fit_type(va, nva_start_addr, size); > @@ -1500,7 +1500,7 @@ va_alloc(struct vmap_area *va, > return vend; > > /* Update the free vmap_area. */ > - ret = adjust_va_to_fit_type(root, head, va, nva_start_addr, size); > + ret = va_clip(root, head, va, nva_start_addr, size); > if (WARN_ON_ONCE(ret)) > return vend; > > @@ -4151,9 +4151,8 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, > /* It is a BUG(), but trigger recovery instead. */ > goto recovery; > > - ret = adjust_va_to_fit_type(&free_vmap_area_root, > - &free_vmap_area_list, > - va, start, size); > + ret = va_clip(&free_vmap_area_root, > + &free_vmap_area_list, va, start, size); > if (WARN_ON_ONCE(unlikely(ret))) > /* It is a BUG(), but trigger recovery instead. */ > goto recovery; > -- > 2.30.2 > Reviewed-by: Baoquan He <bhe@redhat.com>
On Wed, Sep 06, 2023 at 01:51:42PM +0800, Baoquan He wrote: > On 08/29/23 at 10:11am, Uladzislau Rezki (Sony) wrote: > > This patch renames the adjust_va_to_fit_type() function > > to va_clip() which is shorter and more expressive. > > > > There is no a functional change as a result of this patch. > > > > Reviewed-by: Christoph Hellwig <hch@lst.de> > > Reviewed-by: Lorenzo Stoakes <lstoakes@gmail.com> > > Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com> > > --- > > mm/vmalloc.c | 13 ++++++------- > > 1 file changed, 6 insertions(+), 7 deletions(-) > > > > diff --git a/mm/vmalloc.c b/mm/vmalloc.c > > index 00afc1ee4756..09e315f8ea34 100644 > > --- a/mm/vmalloc.c > > +++ b/mm/vmalloc.c > > @@ -1382,9 +1382,9 @@ classify_va_fit_type(struct vmap_area *va, > > } > > > > static __always_inline int > > -adjust_va_to_fit_type(struct rb_root *root, struct list_head *head, > > - struct vmap_area *va, unsigned long nva_start_addr, > > - unsigned long size) > > +va_clip(struct rb_root *root, struct list_head *head, > > + struct vmap_area *va, unsigned long nva_start_addr, > > + unsigned long size) > > { > > struct vmap_area *lva = NULL; > > enum fit_type type = classify_va_fit_type(va, nva_start_addr, size); > > @@ -1500,7 +1500,7 @@ va_alloc(struct vmap_area *va, > > return vend; > > > > /* Update the free vmap_area. */ > > - ret = adjust_va_to_fit_type(root, head, va, nva_start_addr, size); > > + ret = va_clip(root, head, va, nva_start_addr, size); > > if (WARN_ON_ONCE(ret)) > > return vend; > > > > @@ -4151,9 +4151,8 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, > > /* It is a BUG(), but trigger recovery instead. */ > > goto recovery; > > > > - ret = adjust_va_to_fit_type(&free_vmap_area_root, > > - &free_vmap_area_list, > > - va, start, size); > > + ret = va_clip(&free_vmap_area_root, > > + &free_vmap_area_list, va, start, size); > > if (WARN_ON_ONCE(unlikely(ret))) > > /* It is a BUG(), but trigger recovery instead. */ > > goto recovery; > > -- > > 2.30.2 > > > > Reviewed-by: Baoquan He <bhe@redhat.com> > Thank you for the review. Picked it up. -- Uladzislau Rezki
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 00afc1ee4756..09e315f8ea34 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1382,9 +1382,9 @@ classify_va_fit_type(struct vmap_area *va, } static __always_inline int -adjust_va_to_fit_type(struct rb_root *root, struct list_head *head, - struct vmap_area *va, unsigned long nva_start_addr, - unsigned long size) +va_clip(struct rb_root *root, struct list_head *head, + struct vmap_area *va, unsigned long nva_start_addr, + unsigned long size) { struct vmap_area *lva = NULL; enum fit_type type = classify_va_fit_type(va, nva_start_addr, size); @@ -1500,7 +1500,7 @@ va_alloc(struct vmap_area *va, return vend; /* Update the free vmap_area. */ - ret = adjust_va_to_fit_type(root, head, va, nva_start_addr, size); + ret = va_clip(root, head, va, nva_start_addr, size); if (WARN_ON_ONCE(ret)) return vend; @@ -4151,9 +4151,8 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, /* It is a BUG(), but trigger recovery instead. */ goto recovery; - ret = adjust_va_to_fit_type(&free_vmap_area_root, - &free_vmap_area_list, - va, start, size); + ret = va_clip(&free_vmap_area_root, + &free_vmap_area_list, va, start, size); if (WARN_ON_ONCE(unlikely(ret))) /* It is a BUG(), but trigger recovery instead. */ goto recovery;