Message ID | 20250415023952.27850-6-bhe@redhat.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | mm/vmalloc.c: code cleanup and improvements | expand |
On 04/15/25 at 10:39am, Baoquan He wrote: > In codes of alloc_vmap_area(), it returns the upper bound 'vend' to > indicate if the allocation is successful or failed. That is not very clear. > > Here change to return explicit error values and check them to judge if > allocation is successful. > > IS_ERR_VALUE already uses unlikely() internally ^^^^^^^^^ Sorry, above line was added mistakenly in log draft, should be removed. > > Signed-off-by: Baoquan He <bhe@redhat.com> > --- > mm/vmalloc.c | 34 +++++++++++++++++----------------- > 1 file changed, 17 insertions(+), 17 deletions(-) > > diff --git a/mm/vmalloc.c b/mm/vmalloc.c > index 3f38a232663b..5b21cd09b2b4 100644 > --- a/mm/vmalloc.c > +++ b/mm/vmalloc.c > @@ -1715,7 +1715,7 @@ va_clip(struct rb_root *root, struct list_head *head, > */ > lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT); > if (!lva) > - return -1; > + return -ENOMEM; > } > > /* > @@ -1729,7 +1729,7 @@ va_clip(struct rb_root *root, struct list_head *head, > */ > va->va_start = nva_start_addr + size; > } else { > - return -1; > + return -EINVAL; > } > > if (type != FL_FIT_TYPE) { > @@ -1758,19 +1758,19 @@ va_alloc(struct vmap_area *va, > > /* Check the "vend" restriction. */ > if (nva_start_addr + size > vend) > - return vend; > + return -ERANGE; > > /* Update the free vmap_area. */ > ret = va_clip(root, head, va, nva_start_addr, size); > - if (WARN_ON_ONCE(ret)) > - return vend; > + if (ret) > + return ret; > > return nva_start_addr; > } > > /* > * Returns a start address of the newly allocated area, if success. > - * Otherwise a vend is returned that indicates failure. > + * Otherwise an error value is returned that indicates failure. > */ > static __always_inline unsigned long > __alloc_vmap_area(struct rb_root *root, struct list_head *head, > @@ -1795,14 +1795,13 @@ __alloc_vmap_area(struct rb_root *root, struct list_head *head, > > va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size); > if (unlikely(!va)) > - return vend; > + return -ENOENT; > > nva_start_addr = va_alloc(va, root, head, size, align, vstart, vend); > - if (nva_start_addr == vend) > - return vend; > > #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK > - find_vmap_lowest_match_check(root, head, size, align); > + if (!IS_ERR_VALUE(nva_start_addr)) > + find_vmap_lowest_match_check(root, head, size, align); > #endif > > return nva_start_addr; > @@ -1932,7 +1931,7 @@ node_alloc(unsigned long size, unsigned long align, > struct vmap_area *va; > > *vn_id = 0; > - *addr = vend; > + *addr = -EINVAL; > > /* > * Fallback to a global heap if not vmalloc or there > @@ -2012,20 +2011,20 @@ static struct vmap_area *alloc_vmap_area(unsigned long size, > } > > retry: > - if (addr == vend) { > + if (IS_ERR_VALUE(addr)) { > preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node); > addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list, > size, align, vstart, vend); > spin_unlock(&free_vmap_area_lock); > } > > - trace_alloc_vmap_area(addr, size, align, vstart, vend, addr == vend); > + trace_alloc_vmap_area(addr, size, align, vstart, vend, IS_ERR_VALUE(addr)); > > /* > - * If an allocation fails, the "vend" address is > + * If an allocation fails, the error value is > * returned. Therefore trigger the overflow path. > */ > - if (unlikely(addr == vend)) > + if (IS_ERR_VALUE(addr)) > goto overflow; > > va->va_start = addr; > @@ -4753,9 +4752,10 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, > > ret = va_clip(&free_vmap_area_root, > &free_vmap_area_list, va, start, size); > - if (WARN_ON_ONCE(unlikely(ret))) > - /* It is a BUG(), but trigger recovery instead. */ > + if ((unlikely(ret))) { > + WARN_ONCE(1, "%s error: errno (%d)\n", __func__, ret); > goto recovery; > + } > > /* Allocated area. */ > va = vas[area]; > -- > 2.41.0 >
On 4/15/2025 8:09 AM, Baoquan He wrote: > In codes of alloc_vmap_area(), it returns the upper bound 'vend' to > indicate if the allocation is successful or failed. That is not very clear. > > Here change to return explicit error values and check them to judge if > allocation is successful. > > IS_ERR_VALUE already uses unlikely() internally > > Signed-off-by: Baoquan He <bhe@redhat.com> > --- > mm/vmalloc.c | 34 +++++++++++++++++----------------- > 1 file changed, 17 insertions(+), 17 deletions(-) > > diff --git a/mm/vmalloc.c b/mm/vmalloc.c > index 3f38a232663b..5b21cd09b2b4 100644 > --- a/mm/vmalloc.c > +++ b/mm/vmalloc.c > @@ -1715,7 +1715,7 @@ va_clip(struct rb_root *root, struct list_head *head, > */ > lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT); > if (!lva) > - return -1; > + return -ENOMEM; > } > > /* > @@ -1729,7 +1729,7 @@ va_clip(struct rb_root *root, struct list_head *head, > */ > va->va_start = nva_start_addr + size; > } else { > - return -1; > + return -EINVAL; > } Braces around return -EINVAL seem unnecessary. They can be dropped. > > if (type != FL_FIT_TYPE) { > @@ -1758,19 +1758,19 @@ va_alloc(struct vmap_area *va, > > /* Check the "vend" restriction. */ > if (nva_start_addr + size > vend) > - return vend; > + return -ERANGE; > > /* Update the free vmap_area. */ > ret = va_clip(root, head, va, nva_start_addr, size); > - if (WARN_ON_ONCE(ret)) > - return vend; > + if (ret) > + return ret; Is it safe to remove the warning, or was it critical for debugging? > > return nva_start_addr; > } > > /* > * Returns a start address of the newly allocated area, if success. > - * Otherwise a vend is returned that indicates failure. > + * Otherwise an error value is returned that indicates failure. > */ > static __always_inline unsigned long > __alloc_vmap_area(struct rb_root *root, struct list_head *head, > @@ -1795,14 +1795,13 @@ __alloc_vmap_area(struct rb_root *root, struct list_head *head, > > va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size); > if (unlikely(!va)) > - return vend; > + return -ENOENT; > > nva_start_addr = va_alloc(va, root, head, size, align, vstart, vend); > - if (nva_start_addr == vend) > - return vend; > > #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK > - find_vmap_lowest_match_check(root, head, size, align); > + if (!IS_ERR_VALUE(nva_start_addr)) > + find_vmap_lowest_match_check(root, head, size, align); > #endif > > return nva_start_addr; > @@ -1932,7 +1931,7 @@ node_alloc(unsigned long size, unsigned long align, > struct vmap_area *va; > > *vn_id = 0; > - *addr = vend; > + *addr = -EINVAL; > > /* > * Fallback to a global heap if not vmalloc or there > @@ -2012,20 +2011,20 @@ static struct vmap_area *alloc_vmap_area(unsigned long size, > } > > retry: > - if (addr == vend) { > + if (IS_ERR_VALUE(addr)) { > preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node); > addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list, > size, align, vstart, vend); > spin_unlock(&free_vmap_area_lock); > } > > - trace_alloc_vmap_area(addr, size, align, vstart, vend, addr == vend); > + trace_alloc_vmap_area(addr, size, align, vstart, vend, IS_ERR_VALUE(addr)); > > /* > - * If an allocation fails, the "vend" address is > + * If an allocation fails, the error value is > * returned. Therefore trigger the overflow path. > */ > - if (unlikely(addr == vend)) > + if (IS_ERR_VALUE(addr)) > goto overflow; > > va->va_start = addr; > @@ -4753,9 +4752,10 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, > > ret = va_clip(&free_vmap_area_root, > &free_vmap_area_list, va, start, size); > - if (WARN_ON_ONCE(unlikely(ret))) > - /* It is a BUG(), but trigger recovery instead. */ > + if ((unlikely(ret))) { ^^ ^^ The extra parentheses are redundant and can be removed for clarity. > + WARN_ONCE(1, "%s error: errno (%d)\n", __func__, ret); > goto recovery; > + } > > /* Allocated area. */ > va = vas[area];
On 04/15/25 at 12:52pm, Shivank Garg wrote: > On 4/15/2025 8:09 AM, Baoquan He wrote: > > In codes of alloc_vmap_area(), it returns the upper bound 'vend' to > > indicate if the allocation is successful or failed. That is not very clear. > > > > Here change to return explicit error values and check them to judge if > > allocation is successful. > > > > IS_ERR_VALUE already uses unlikely() internally > > > > Signed-off-by: Baoquan He <bhe@redhat.com> > > --- > > mm/vmalloc.c | 34 +++++++++++++++++----------------- > > 1 file changed, 17 insertions(+), 17 deletions(-) > > > > diff --git a/mm/vmalloc.c b/mm/vmalloc.c > > index 3f38a232663b..5b21cd09b2b4 100644 > > --- a/mm/vmalloc.c > > +++ b/mm/vmalloc.c > > @@ -1715,7 +1715,7 @@ va_clip(struct rb_root *root, struct list_head *head, > > */ > > lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT); > > if (!lva) > > - return -1; > > + return -ENOMEM; > > } > > > > /* > > @@ -1729,7 +1729,7 @@ va_clip(struct rb_root *root, struct list_head *head, > > */ > > va->va_start = nva_start_addr + size; > > } else { > > - return -1; > > + return -EINVAL; > > } Thanks for reviewing. > > Braces around return -EINVAL seem unnecessary. > They can be dropped. This complys with the codeing style required in 3) Placing Braces and Spaces of Documentation/process/coding-style.rst because other branches are multiple statements. > > > > > if (type != FL_FIT_TYPE) { > > @@ -1758,19 +1758,19 @@ va_alloc(struct vmap_area *va, > > > > /* Check the "vend" restriction. */ > > if (nva_start_addr + size > vend) > > - return vend; > > + return -ERANGE; > > > > /* Update the free vmap_area. */ > > ret = va_clip(root, head, va, nva_start_addr, size); > > - if (WARN_ON_ONCE(ret)) > > - return vend; > > + if (ret) > > + return ret; > > Is it safe to remove the warning, or was it critical for debugging? This comes from a reported concern because va_clip() could be failed by NOTHING_FIT or kmem_cache_alloc failure. The warning here could cause confusion misleading people to think vmap area management is failed. > > > > > return nva_start_addr; > > } > > > > /* > > * Returns a start address of the newly allocated area, if success. > > - * Otherwise a vend is returned that indicates failure. > > + * Otherwise an error value is returned that indicates failure. > > */ > > static __always_inline unsigned long > > __alloc_vmap_area(struct rb_root *root, struct list_head *head, > > @@ -1795,14 +1795,13 @@ __alloc_vmap_area(struct rb_root *root, struct list_head *head, > > > > va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size); > > if (unlikely(!va)) > > - return vend; > > + return -ENOENT; > > > > nva_start_addr = va_alloc(va, root, head, size, align, vstart, vend); > > - if (nva_start_addr == vend) > > - return vend; > > > > #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK > > - find_vmap_lowest_match_check(root, head, size, align); > > + if (!IS_ERR_VALUE(nva_start_addr)) > > + find_vmap_lowest_match_check(root, head, size, align); > > #endif > > > > return nva_start_addr; > > @@ -1932,7 +1931,7 @@ node_alloc(unsigned long size, unsigned long align, > > struct vmap_area *va; > > > > *vn_id = 0; > > - *addr = vend; > > + *addr = -EINVAL; > > > > /* > > * Fallback to a global heap if not vmalloc or there > > @@ -2012,20 +2011,20 @@ static struct vmap_area *alloc_vmap_area(unsigned long size, > > } > > > > retry: > > - if (addr == vend) { > > + if (IS_ERR_VALUE(addr)) { > > preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node); > > addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list, > > size, align, vstart, vend); > > spin_unlock(&free_vmap_area_lock); > > } > > > > - trace_alloc_vmap_area(addr, size, align, vstart, vend, addr == vend); > > + trace_alloc_vmap_area(addr, size, align, vstart, vend, IS_ERR_VALUE(addr)); > > > > /* > > - * If an allocation fails, the "vend" address is > > + * If an allocation fails, the error value is > > * returned. Therefore trigger the overflow path. > > */ > > - if (unlikely(addr == vend)) > > + if (IS_ERR_VALUE(addr)) > > goto overflow; > > > > va->va_start = addr; > > @@ -4753,9 +4752,10 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, > > > > ret = va_clip(&free_vmap_area_root, > > &free_vmap_area_list, va, start, size); > > - if (WARN_ON_ONCE(unlikely(ret))) > > - /* It is a BUG(), but trigger recovery instead. */ > > + if ((unlikely(ret))) { > ^^ ^^ > The extra parentheses are redundant and can be removed for clarity. You are right, I will remove it. Thanks. > > > + WARN_ONCE(1, "%s error: errno (%d)\n", __func__, ret); > > goto recovery; > > + } > > > > /* Allocated area. */ > > va = vas[area]; >
On 4/15/2025 6:31 PM, Baoquan He wrote: > On 04/15/25 at 12:52pm, Shivank Garg wrote: >> On 4/15/2025 8:09 AM, Baoquan He wrote: >>> In codes of alloc_vmap_area(), it returns the upper bound 'vend' to >>> indicate if the allocation is successful or failed. That is not very clear. >>> >>> Here change to return explicit error values and check them to judge if >>> allocation is successful. >>> >>> IS_ERR_VALUE already uses unlikely() internally >>> >>> Signed-off-by: Baoquan He <bhe@redhat.com> >>> --- >>> mm/vmalloc.c | 34 +++++++++++++++++----------------- >>> 1 file changed, 17 insertions(+), 17 deletions(-) >>> >>> diff --git a/mm/vmalloc.c b/mm/vmalloc.c >>> index 3f38a232663b..5b21cd09b2b4 100644 >>> --- a/mm/vmalloc.c >>> +++ b/mm/vmalloc.c >>> @@ -1715,7 +1715,7 @@ va_clip(struct rb_root *root, struct list_head *head, >>> */ >>> lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT); >>> if (!lva) >>> - return -1; >>> + return -ENOMEM; >>> } >>> >>> /* >>> @@ -1729,7 +1729,7 @@ va_clip(struct rb_root *root, struct list_head *head, >>> */ >>> va->va_start = nva_start_addr + size; >>> } else { >>> - return -1; >>> + return -EINVAL; >>> } > > Thanks for reviewing. > >> >> Braces around return -EINVAL seem unnecessary. >> They can be dropped. > > This complys with the codeing style required in 3) Placing Braces and > Spaces of Documentation/process/coding-style.rst because other branches > are multiple statements. > >> >>> >>> if (type != FL_FIT_TYPE) { >>> @@ -1758,19 +1758,19 @@ va_alloc(struct vmap_area *va, >>> >>> /* Check the "vend" restriction. */ >>> if (nva_start_addr + size > vend) >>> - return vend; >>> + return -ERANGE; >>> >>> /* Update the free vmap_area. */ >>> ret = va_clip(root, head, va, nva_start_addr, size); >>> - if (WARN_ON_ONCE(ret)) >>> - return vend; >>> + if (ret) >>> + return ret; >> >> Is it safe to remove the warning, or was it critical for debugging? > > This comes from a reported concern because va_clip() could be failed by > NOTHING_FIT or kmem_cache_alloc failure. The warning here could cause > confusion misleading people to think vmap area management is failed. > >> >>> >>> return nva_start_addr; >>> } >>> >>> /* >>> * Returns a start address of the newly allocated area, if success. >>> - * Otherwise a vend is returned that indicates failure. >>> + * Otherwise an error value is returned that indicates failure. >>> */ >>> static __always_inline unsigned long >>> __alloc_vmap_area(struct rb_root *root, struct list_head *head, >>> @@ -1795,14 +1795,13 @@ __alloc_vmap_area(struct rb_root *root, struct list_head *head, >>> >>> va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size); >>> if (unlikely(!va)) >>> - return vend; >>> + return -ENOENT; >>> >>> nva_start_addr = va_alloc(va, root, head, size, align, vstart, vend); >>> - if (nva_start_addr == vend) >>> - return vend; >>> >>> #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK >>> - find_vmap_lowest_match_check(root, head, size, align); >>> + if (!IS_ERR_VALUE(nva_start_addr)) >>> + find_vmap_lowest_match_check(root, head, size, align); >>> #endif >>> >>> return nva_start_addr; >>> @@ -1932,7 +1931,7 @@ node_alloc(unsigned long size, unsigned long align, >>> struct vmap_area *va; >>> >>> *vn_id = 0; >>> - *addr = vend; >>> + *addr = -EINVAL; >>> >>> /* >>> * Fallback to a global heap if not vmalloc or there >>> @@ -2012,20 +2011,20 @@ static struct vmap_area *alloc_vmap_area(unsigned long size, >>> } >>> >>> retry: >>> - if (addr == vend) { >>> + if (IS_ERR_VALUE(addr)) { >>> preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node); >>> addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list, >>> size, align, vstart, vend); >>> spin_unlock(&free_vmap_area_lock); >>> } >>> >>> - trace_alloc_vmap_area(addr, size, align, vstart, vend, addr == vend); >>> + trace_alloc_vmap_area(addr, size, align, vstart, vend, IS_ERR_VALUE(addr)); >>> >>> /* >>> - * If an allocation fails, the "vend" address is >>> + * If an allocation fails, the error value is >>> * returned. Therefore trigger the overflow path. >>> */ >>> - if (unlikely(addr == vend)) >>> + if (IS_ERR_VALUE(addr)) >>> goto overflow; >>> >>> va->va_start = addr; >>> @@ -4753,9 +4752,10 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, >>> >>> ret = va_clip(&free_vmap_area_root, >>> &free_vmap_area_list, va, start, size); >>> - if (WARN_ON_ONCE(unlikely(ret))) >>> - /* It is a BUG(), but trigger recovery instead. */ >>> + if ((unlikely(ret))) { >> ^^ ^^ >> The extra parentheses are redundant and can be removed for clarity. > > You are right, I will remove it. Thanks. > Please feel free to add following in next version. Reviewed-by: Shivank Garg <shivankg@amd.com> Tested-by: Shivank Garg <shivankg@amd.com> Thanks, Shivank >> >>> + WARN_ONCE(1, "%s error: errno (%d)\n", __func__, ret); >>> goto recovery; >>> + } >>> >>> /* Allocated area. */ >>> va = vas[area]; >> >
On 04/16/25 at 12:30am, Shivank Garg wrote: > > > On 4/15/2025 6:31 PM, Baoquan He wrote: > > On 04/15/25 at 12:52pm, Shivank Garg wrote: > >> On 4/15/2025 8:09 AM, Baoquan He wrote: > >>> In codes of alloc_vmap_area(), it returns the upper bound 'vend' to > >>> indicate if the allocation is successful or failed. That is not very clear. > >>> > >>> Here change to return explicit error values and check them to judge if > >>> allocation is successful. > >>> > >>> IS_ERR_VALUE already uses unlikely() internally > >>> > >>> Signed-off-by: Baoquan He <bhe@redhat.com> > >>> --- > >>> mm/vmalloc.c | 34 +++++++++++++++++----------------- > >>> 1 file changed, 17 insertions(+), 17 deletions(-) > >>> > >>> diff --git a/mm/vmalloc.c b/mm/vmalloc.c > >>> index 3f38a232663b..5b21cd09b2b4 100644 > >>> --- a/mm/vmalloc.c > >>> +++ b/mm/vmalloc.c > >>> @@ -1715,7 +1715,7 @@ va_clip(struct rb_root *root, struct list_head *head, > >>> */ > >>> lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT); > >>> if (!lva) > >>> - return -1; > >>> + return -ENOMEM; > >>> } > >>> > >>> /* > >>> @@ -1729,7 +1729,7 @@ va_clip(struct rb_root *root, struct list_head *head, > >>> */ > >>> va->va_start = nva_start_addr + size; > >>> } else { > >>> - return -1; > >>> + return -EINVAL; > >>> } > > > > Thanks for reviewing. > > > >> > >> Braces around return -EINVAL seem unnecessary. > >> They can be dropped. > > > > This complys with the codeing style required in 3) Placing Braces and > > Spaces of Documentation/process/coding-style.rst because other branches > > are multiple statements. > > > >> > >>> > >>> if (type != FL_FIT_TYPE) { > >>> @@ -1758,19 +1758,19 @@ va_alloc(struct vmap_area *va, > >>> > >>> /* Check the "vend" restriction. */ > >>> if (nva_start_addr + size > vend) > >>> - return vend; > >>> + return -ERANGE; > >>> > >>> /* Update the free vmap_area. */ > >>> ret = va_clip(root, head, va, nva_start_addr, size); > >>> - if (WARN_ON_ONCE(ret)) > >>> - return vend; > >>> + if (ret) > >>> + return ret; > >> > >> Is it safe to remove the warning, or was it critical for debugging? > > > > This comes from a reported concern because va_clip() could be failed by > > NOTHING_FIT or kmem_cache_alloc failure. The warning here could cause > > confusion misleading people to think vmap area management is failed. > > > >> > >>> > >>> return nva_start_addr; > >>> } > >>> > >>> /* > >>> * Returns a start address of the newly allocated area, if success. > >>> - * Otherwise a vend is returned that indicates failure. > >>> + * Otherwise an error value is returned that indicates failure. > >>> */ > >>> static __always_inline unsigned long > >>> __alloc_vmap_area(struct rb_root *root, struct list_head *head, > >>> @@ -1795,14 +1795,13 @@ __alloc_vmap_area(struct rb_root *root, struct list_head *head, > >>> > >>> va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size); > >>> if (unlikely(!va)) > >>> - return vend; > >>> + return -ENOENT; > >>> > >>> nva_start_addr = va_alloc(va, root, head, size, align, vstart, vend); > >>> - if (nva_start_addr == vend) > >>> - return vend; > >>> > >>> #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK > >>> - find_vmap_lowest_match_check(root, head, size, align); > >>> + if (!IS_ERR_VALUE(nva_start_addr)) > >>> + find_vmap_lowest_match_check(root, head, size, align); > >>> #endif > >>> > >>> return nva_start_addr; > >>> @@ -1932,7 +1931,7 @@ node_alloc(unsigned long size, unsigned long align, > >>> struct vmap_area *va; > >>> > >>> *vn_id = 0; > >>> - *addr = vend; > >>> + *addr = -EINVAL; > >>> > >>> /* > >>> * Fallback to a global heap if not vmalloc or there > >>> @@ -2012,20 +2011,20 @@ static struct vmap_area *alloc_vmap_area(unsigned long size, > >>> } > >>> > >>> retry: > >>> - if (addr == vend) { > >>> + if (IS_ERR_VALUE(addr)) { > >>> preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node); > >>> addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list, > >>> size, align, vstart, vend); > >>> spin_unlock(&free_vmap_area_lock); > >>> } > >>> > >>> - trace_alloc_vmap_area(addr, size, align, vstart, vend, addr == vend); > >>> + trace_alloc_vmap_area(addr, size, align, vstart, vend, IS_ERR_VALUE(addr)); > >>> > >>> /* > >>> - * If an allocation fails, the "vend" address is > >>> + * If an allocation fails, the error value is > >>> * returned. Therefore trigger the overflow path. > >>> */ > >>> - if (unlikely(addr == vend)) > >>> + if (IS_ERR_VALUE(addr)) > >>> goto overflow; > >>> > >>> va->va_start = addr; > >>> @@ -4753,9 +4752,10 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, > >>> > >>> ret = va_clip(&free_vmap_area_root, > >>> &free_vmap_area_list, va, start, size); > >>> - if (WARN_ON_ONCE(unlikely(ret))) > >>> - /* It is a BUG(), but trigger recovery instead. */ > >>> + if ((unlikely(ret))) { > >> ^^ ^^ > >> The extra parentheses are redundant and can be removed for clarity. > > > > You are right, I will remove it. Thanks. > > > > Please feel free to add following in next version. > > Reviewed-by: Shivank Garg <shivankg@amd.com> > Tested-by: Shivank Garg <shivankg@amd.com> Thanks a lot for your careful reviewing and testing. > > >> > >>> + WARN_ONCE(1, "%s error: errno (%d)\n", __func__, ret); > >>> goto recovery; > >>> + } > >>> > >>> /* Allocated area. */ > >>> va = vas[area]; > >> > > >
On Tue, Apr 15, 2025 at 10:39:52AM +0800, Baoquan He wrote: > In codes of alloc_vmap_area(), it returns the upper bound 'vend' to > indicate if the allocation is successful or failed. That is not very clear. > > Here change to return explicit error values and check them to judge if > allocation is successful. > > IS_ERR_VALUE already uses unlikely() internally > > Signed-off-by: Baoquan He <bhe@redhat.com> > --- > mm/vmalloc.c | 34 +++++++++++++++++----------------- > 1 file changed, 17 insertions(+), 17 deletions(-) > > diff --git a/mm/vmalloc.c b/mm/vmalloc.c > index 3f38a232663b..5b21cd09b2b4 100644 > --- a/mm/vmalloc.c > +++ b/mm/vmalloc.c > @@ -1715,7 +1715,7 @@ va_clip(struct rb_root *root, struct list_head *head, > */ > lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT); > if (!lva) > - return -1; > + return -ENOMEM; > } > > /* > @@ -1729,7 +1729,7 @@ va_clip(struct rb_root *root, struct list_head *head, > */ > va->va_start = nva_start_addr + size; > } else { > - return -1; > + return -EINVAL; > } > > if (type != FL_FIT_TYPE) { > @@ -1758,19 +1758,19 @@ va_alloc(struct vmap_area *va, > > /* Check the "vend" restriction. */ > if (nva_start_addr + size > vend) > - return vend; > + return -ERANGE; > > /* Update the free vmap_area. */ > ret = va_clip(root, head, va, nva_start_addr, size); > - if (WARN_ON_ONCE(ret)) > - return vend; > Not clear why you remove this WARN_ON by this patch. It should be a separate patch or just keep it as is. The warning here can mean that something is really wrong, especially if NOTHING_FIT. So we definitely want the warning. > + if (ret) > + return ret; > > return nva_start_addr; > } > > /* > * Returns a start address of the newly allocated area, if success. > - * Otherwise a vend is returned that indicates failure. > + * Otherwise an error value is returned that indicates failure. > */ > static __always_inline unsigned long > __alloc_vmap_area(struct rb_root *root, struct list_head *head, > @@ -1795,14 +1795,13 @@ __alloc_vmap_area(struct rb_root *root, struct list_head *head, > > va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size); > if (unlikely(!va)) > - return vend; > + return -ENOENT; > > nva_start_addr = va_alloc(va, root, head, size, align, vstart, vend); > - if (nva_start_addr == vend) > - return vend; > > #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK > - find_vmap_lowest_match_check(root, head, size, align); > + if (!IS_ERR_VALUE(nva_start_addr)) > Just keep it as it was. No need to check if addr is valid or not. > + find_vmap_lowest_match_check(root, head, size, align); > #endif > > return nva_start_addr; > @@ -1932,7 +1931,7 @@ node_alloc(unsigned long size, unsigned long align, > struct vmap_area *va; > > *vn_id = 0; > - *addr = vend; > + *addr = -EINVAL; > > /* > * Fallback to a global heap if not vmalloc or there > @@ -2012,20 +2011,20 @@ static struct vmap_area *alloc_vmap_area(unsigned long size, > } > > retry: > - if (addr == vend) { > + if (IS_ERR_VALUE(addr)) { > preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node); > addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list, > size, align, vstart, vend); > spin_unlock(&free_vmap_area_lock); > } > > - trace_alloc_vmap_area(addr, size, align, vstart, vend, addr == vend); > + trace_alloc_vmap_area(addr, size, align, vstart, vend, IS_ERR_VALUE(addr)); > > /* > - * If an allocation fails, the "vend" address is > + * If an allocation fails, the error value is > * returned. Therefore trigger the overflow path. > */ > - if (unlikely(addr == vend)) > + if (IS_ERR_VALUE(addr)) > goto overflow; > > va->va_start = addr; > @@ -4753,9 +4752,10 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, > > ret = va_clip(&free_vmap_area_root, > &free_vmap_area_list, va, start, size); > - if (WARN_ON_ONCE(unlikely(ret))) > - /* It is a BUG(), but trigger recovery instead. */ Keep the comment. > + if ((unlikely(ret))) { > + WARN_ONCE(1, "%s error: errno (%d)\n", __func__, ret); > goto recovery; > + } > -- Uladzislau Rezki
On 04/16/25 at 04:28pm, Uladzislau Rezki wrote: > On Tue, Apr 15, 2025 at 10:39:52AM +0800, Baoquan He wrote: > > In codes of alloc_vmap_area(), it returns the upper bound 'vend' to > > indicate if the allocation is successful or failed. That is not very clear. > > > > Here change to return explicit error values and check them to judge if > > allocation is successful. > > > > IS_ERR_VALUE already uses unlikely() internally > > > > Signed-off-by: Baoquan He <bhe@redhat.com> > > --- > > mm/vmalloc.c | 34 +++++++++++++++++----------------- > > 1 file changed, 17 insertions(+), 17 deletions(-) > > > > diff --git a/mm/vmalloc.c b/mm/vmalloc.c > > index 3f38a232663b..5b21cd09b2b4 100644 > > --- a/mm/vmalloc.c > > +++ b/mm/vmalloc.c > > @@ -1715,7 +1715,7 @@ va_clip(struct rb_root *root, struct list_head *head, > > */ > > lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT); > > if (!lva) > > - return -1; > > + return -ENOMEM; > > } > > > > /* > > @@ -1729,7 +1729,7 @@ va_clip(struct rb_root *root, struct list_head *head, > > */ > > va->va_start = nva_start_addr + size; > > } else { > > - return -1; > > + return -EINVAL; > > } > > > > if (type != FL_FIT_TYPE) { > > @@ -1758,19 +1758,19 @@ va_alloc(struct vmap_area *va, > > > > /* Check the "vend" restriction. */ > > if (nva_start_addr + size > vend) > > - return vend; > > + return -ERANGE; > > > > /* Update the free vmap_area. */ > > ret = va_clip(root, head, va, nva_start_addr, size); > > - if (WARN_ON_ONCE(ret)) > > - return vend; > > > Not clear why you remove this WARN_ON by this patch. It should be > a separate patch or just keep it as is. The warning here can mean > that something is really wrong, especially if NOTHING_FIT. So we > definitely want the warning. I remember one time someone reported that the slab allocation failure triggered this warning which is confusing to them. But yes, it should be discussed in a separate post or thread, not appropriate to remove it silently. I will add it back in v2. > > > + if (ret) > > + return ret; > > > > return nva_start_addr; > > } > > > > /* > > * Returns a start address of the newly allocated area, if success. > > - * Otherwise a vend is returned that indicates failure. > > + * Otherwise an error value is returned that indicates failure. > > */ > > static __always_inline unsigned long > > __alloc_vmap_area(struct rb_root *root, struct list_head *head, > > @@ -1795,14 +1795,13 @@ __alloc_vmap_area(struct rb_root *root, struct list_head *head, > > > > va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size); > > if (unlikely(!va)) > > - return vend; > > + return -ENOENT; > > > > nva_start_addr = va_alloc(va, root, head, size, align, vstart, vend); > > - if (nva_start_addr == vend) > > - return vend; > > > > #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK > > - find_vmap_lowest_match_check(root, head, size, align); > > + if (!IS_ERR_VALUE(nva_start_addr)) > > > Just keep it as it was. No need to check if addr is valid or not. This is to keep consistent with the old code. Before this patch, if va_alloc() return vend, it returns directly, no find_vmap_lowest_match_check() invocation is done. I tried to keep the behaviour unchanged. That code is for debugging, both is fine to me. > > > + find_vmap_lowest_match_check(root, head, size, align); > > #endif > > > > return nva_start_addr; > > @@ -1932,7 +1931,7 @@ node_alloc(unsigned long size, unsigned long align, > > struct vmap_area *va; > > > > *vn_id = 0; > > - *addr = vend; > > + *addr = -EINVAL; > > > > /* > > * Fallback to a global heap if not vmalloc or there > > @@ -2012,20 +2011,20 @@ static struct vmap_area *alloc_vmap_area(unsigned long size, > > } > > > > retry: > > - if (addr == vend) { > > + if (IS_ERR_VALUE(addr)) { > > preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node); > > addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list, > > size, align, vstart, vend); > > spin_unlock(&free_vmap_area_lock); > > } > > > > - trace_alloc_vmap_area(addr, size, align, vstart, vend, addr == vend); > > + trace_alloc_vmap_area(addr, size, align, vstart, vend, IS_ERR_VALUE(addr)); > > > > /* > > - * If an allocation fails, the "vend" address is > > + * If an allocation fails, the error value is > > * returned. Therefore trigger the overflow path. > > */ > > - if (unlikely(addr == vend)) > > + if (IS_ERR_VALUE(addr)) > > goto overflow; > > > > va->va_start = addr; > > @@ -4753,9 +4752,10 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, > > > > ret = va_clip(&free_vmap_area_root, > > &free_vmap_area_list, va, start, size); > > - if (WARN_ON_ONCE(unlikely(ret))) > > - /* It is a BUG(), but trigger recovery instead. */ > Keep the comment. OK, will add it back. > > > + if ((unlikely(ret))) { > > + WARN_ONCE(1, "%s error: errno (%d)\n", __func__, ret); > > goto recovery; > > + } > > > > -- > Uladzislau Rezki >
On Thu, Apr 17, 2025 at 11:02:06AM +0800, Baoquan He wrote: > On 04/16/25 at 04:28pm, Uladzislau Rezki wrote: > > On Tue, Apr 15, 2025 at 10:39:52AM +0800, Baoquan He wrote: > > > In codes of alloc_vmap_area(), it returns the upper bound 'vend' to > > > indicate if the allocation is successful or failed. That is not very clear. > > > > > > Here change to return explicit error values and check them to judge if > > > allocation is successful. > > > > > > IS_ERR_VALUE already uses unlikely() internally > > > > > > Signed-off-by: Baoquan He <bhe@redhat.com> > > > --- > > > mm/vmalloc.c | 34 +++++++++++++++++----------------- > > > 1 file changed, 17 insertions(+), 17 deletions(-) > > > > > > diff --git a/mm/vmalloc.c b/mm/vmalloc.c > > > index 3f38a232663b..5b21cd09b2b4 100644 > > > --- a/mm/vmalloc.c > > > +++ b/mm/vmalloc.c > > > @@ -1715,7 +1715,7 @@ va_clip(struct rb_root *root, struct list_head *head, > > > */ > > > lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT); > > > if (!lva) > > > - return -1; > > > + return -ENOMEM; > > > } > > > > > > /* > > > @@ -1729,7 +1729,7 @@ va_clip(struct rb_root *root, struct list_head *head, > > > */ > > > va->va_start = nva_start_addr + size; > > > } else { > > > - return -1; > > > + return -EINVAL; > > > } > > > > > > if (type != FL_FIT_TYPE) { > > > @@ -1758,19 +1758,19 @@ va_alloc(struct vmap_area *va, > > > > > > /* Check the "vend" restriction. */ > > > if (nva_start_addr + size > vend) > > > - return vend; > > > + return -ERANGE; > > > > > > /* Update the free vmap_area. */ > > > ret = va_clip(root, head, va, nva_start_addr, size); > > > - if (WARN_ON_ONCE(ret)) > > > - return vend; > > > > > Not clear why you remove this WARN_ON by this patch. It should be > > a separate patch or just keep it as is. The warning here can mean > > that something is really wrong, especially if NOTHING_FIT. So we > > definitely want the warning. > > I remember one time someone reported that the slab allocation failure > triggered this warning which is confusing to them. But yes, it should be > discussed in a separate post or thread, not appropriate to remove it > silently. I will add it back in v2. > Thanks! > > > > > + if (ret) > > > + return ret; > > > > > > return nva_start_addr; > > > } > > > > > > /* > > > * Returns a start address of the newly allocated area, if success. > > > - * Otherwise a vend is returned that indicates failure. > > > + * Otherwise an error value is returned that indicates failure. > > > */ > > > static __always_inline unsigned long > > > __alloc_vmap_area(struct rb_root *root, struct list_head *head, > > > @@ -1795,14 +1795,13 @@ __alloc_vmap_area(struct rb_root *root, struct list_head *head, > > > > > > va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size); > > > if (unlikely(!va)) > > > - return vend; > > > + return -ENOENT; > > > > > > nva_start_addr = va_alloc(va, root, head, size, align, vstart, vend); > > > - if (nva_start_addr == vend) > > > - return vend; > > > > > > #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK > > > - find_vmap_lowest_match_check(root, head, size, align); > > > + if (!IS_ERR_VALUE(nva_start_addr)) > > > > > Just keep it as it was. No need to check if addr is valid or not. > > This is to keep consistent with the old code. Before this patch, if > va_alloc() return vend, it returns directly, no > find_vmap_lowest_match_check() invocation is done. I tried to keep the > behaviour unchanged. That code is for debugging, both is fine to me. > Ack. Makes sense to keep same behaviour as it was/is. > > > > > + find_vmap_lowest_match_check(root, head, size, align); > > > #endif > > > > > > return nva_start_addr; > > > @@ -1932,7 +1931,7 @@ node_alloc(unsigned long size, unsigned long align, > > > struct vmap_area *va; > > > > > > *vn_id = 0; > > > - *addr = vend; > > > + *addr = -EINVAL; > > > > > > /* > > > * Fallback to a global heap if not vmalloc or there > > > @@ -2012,20 +2011,20 @@ static struct vmap_area *alloc_vmap_area(unsigned long size, > > > } > > > > > > retry: > > > - if (addr == vend) { > > > + if (IS_ERR_VALUE(addr)) { > > > preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node); > > > addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list, > > > size, align, vstart, vend); > > > spin_unlock(&free_vmap_area_lock); > > > } > > > > > > - trace_alloc_vmap_area(addr, size, align, vstart, vend, addr == vend); > > > + trace_alloc_vmap_area(addr, size, align, vstart, vend, IS_ERR_VALUE(addr)); > > > > > > /* > > > - * If an allocation fails, the "vend" address is > > > + * If an allocation fails, the error value is > > > * returned. Therefore trigger the overflow path. > > > */ > > > - if (unlikely(addr == vend)) > > > + if (IS_ERR_VALUE(addr)) > > > goto overflow; > > > > > > va->va_start = addr; > > > @@ -4753,9 +4752,10 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, > > > > > > ret = va_clip(&free_vmap_area_root, > > > &free_vmap_area_list, va, start, size); > > > - if (WARN_ON_ONCE(unlikely(ret))) > > > - /* It is a BUG(), but trigger recovery instead. */ > > Keep the comment. > > OK, will add it back. > Thank you. -- Uladzislau Rezki
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 3f38a232663b..5b21cd09b2b4 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1715,7 +1715,7 @@ va_clip(struct rb_root *root, struct list_head *head, */ lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT); if (!lva) - return -1; + return -ENOMEM; } /* @@ -1729,7 +1729,7 @@ va_clip(struct rb_root *root, struct list_head *head, */ va->va_start = nva_start_addr + size; } else { - return -1; + return -EINVAL; } if (type != FL_FIT_TYPE) { @@ -1758,19 +1758,19 @@ va_alloc(struct vmap_area *va, /* Check the "vend" restriction. */ if (nva_start_addr + size > vend) - return vend; + return -ERANGE; /* Update the free vmap_area. */ ret = va_clip(root, head, va, nva_start_addr, size); - if (WARN_ON_ONCE(ret)) - return vend; + if (ret) + return ret; return nva_start_addr; } /* * Returns a start address of the newly allocated area, if success. - * Otherwise a vend is returned that indicates failure. + * Otherwise an error value is returned that indicates failure. */ static __always_inline unsigned long __alloc_vmap_area(struct rb_root *root, struct list_head *head, @@ -1795,14 +1795,13 @@ __alloc_vmap_area(struct rb_root *root, struct list_head *head, va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size); if (unlikely(!va)) - return vend; + return -ENOENT; nva_start_addr = va_alloc(va, root, head, size, align, vstart, vend); - if (nva_start_addr == vend) - return vend; #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK - find_vmap_lowest_match_check(root, head, size, align); + if (!IS_ERR_VALUE(nva_start_addr)) + find_vmap_lowest_match_check(root, head, size, align); #endif return nva_start_addr; @@ -1932,7 +1931,7 @@ node_alloc(unsigned long size, unsigned long align, struct vmap_area *va; *vn_id = 0; - *addr = vend; + *addr = -EINVAL; /* * Fallback to a global heap if not vmalloc or there @@ -2012,20 +2011,20 @@ static struct vmap_area *alloc_vmap_area(unsigned long size, } retry: - if (addr == vend) { + if (IS_ERR_VALUE(addr)) { preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node); addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list, size, align, vstart, vend); spin_unlock(&free_vmap_area_lock); } - trace_alloc_vmap_area(addr, size, align, vstart, vend, addr == vend); + trace_alloc_vmap_area(addr, size, align, vstart, vend, IS_ERR_VALUE(addr)); /* - * If an allocation fails, the "vend" address is + * If an allocation fails, the error value is * returned. Therefore trigger the overflow path. */ - if (unlikely(addr == vend)) + if (IS_ERR_VALUE(addr)) goto overflow; va->va_start = addr; @@ -4753,9 +4752,10 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, ret = va_clip(&free_vmap_area_root, &free_vmap_area_list, va, start, size); - if (WARN_ON_ONCE(unlikely(ret))) - /* It is a BUG(), but trigger recovery instead. */ + if ((unlikely(ret))) { + WARN_ONCE(1, "%s error: errno (%d)\n", __func__, ret); goto recovery; + } /* Allocated area. */ va = vas[area];
In codes of alloc_vmap_area(), it returns the upper bound 'vend' to indicate if the allocation is successful or failed. That is not very clear. Here change to return explicit error values and check them to judge if allocation is successful. IS_ERR_VALUE already uses unlikely() internally Signed-off-by: Baoquan He <bhe@redhat.com> --- mm/vmalloc.c | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-)