Message ID | 20230829081142.3619-4-urezki@gmail.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | Mitigate a vmap lock contention v2 | expand |
On 08/29/23 at 10:11am, Uladzislau Rezki (Sony) wrote: > A vmap_init_free_space() is a function that setups a vmap space > and is considered as part of initialization phase. Since a main > entry which is vmalloc_init(), has been moved down in vmalloc.c > it makes sense to follow the pattern. > > There is no a functional change as a result of this patch. > > Reviewed-by: Christoph Hellwig <hch@lst.de> > Reviewed-by: Lorenzo Stoakes <lstoakes@gmail.com> > Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com> > --- > mm/vmalloc.c | 82 ++++++++++++++++++++++++++-------------------------- > 1 file changed, 41 insertions(+), 41 deletions(-) > > diff --git a/mm/vmalloc.c b/mm/vmalloc.c > index 09e315f8ea34..b7deacca1483 100644 > --- a/mm/vmalloc.c > +++ b/mm/vmalloc.c > @@ -2512,47 +2512,6 @@ void __init vm_area_register_early(struct vm_struct *vm, size_t align) > kasan_populate_early_vm_area_shadow(vm->addr, vm->size); > } > > -static void vmap_init_free_space(void) > -{ > - unsigned long vmap_start = 1; > - const unsigned long vmap_end = ULONG_MAX; > - struct vmap_area *busy, *free; > - > - /* > - * B F B B B F > - * -|-----|.....|-----|-----|-----|.....|- > - * | The KVA space | > - * |<--------------------------------->| > - */ > - list_for_each_entry(busy, &vmap_area_list, list) { > - if (busy->va_start - vmap_start > 0) { > - free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); > - if (!WARN_ON_ONCE(!free)) { > - free->va_start = vmap_start; > - free->va_end = busy->va_start; > - > - insert_vmap_area_augment(free, NULL, > - &free_vmap_area_root, > - &free_vmap_area_list); > - } > - } > - > - vmap_start = busy->va_end; > - } > - > - if (vmap_end - vmap_start > 0) { > - free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); > - if (!WARN_ON_ONCE(!free)) { > - free->va_start = vmap_start; > - free->va_end = vmap_end; > - > - insert_vmap_area_augment(free, NULL, > - &free_vmap_area_root, > - &free_vmap_area_list); > - } > - } > -} > - > static inline void setup_vmalloc_vm_locked(struct vm_struct *vm, > struct vmap_area *va, unsigned long flags, const void *caller) > { > @@ -4443,6 +4402,47 @@ module_init(proc_vmalloc_init); > > #endif > > +static void vmap_init_free_space(void) > +{ > + unsigned long vmap_start = 1; > + const unsigned long vmap_end = ULONG_MAX; > + struct vmap_area *busy, *free; > + > + /* > + * B F B B B F > + * -|-----|.....|-----|-----|-----|.....|- > + * | The KVA space | > + * |<--------------------------------->| > + */ > + list_for_each_entry(busy, &vmap_area_list, list) { > + if (busy->va_start - vmap_start > 0) { > + free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); > + if (!WARN_ON_ONCE(!free)) { > + free->va_start = vmap_start; > + free->va_end = busy->va_start; > + > + insert_vmap_area_augment(free, NULL, > + &free_vmap_area_root, > + &free_vmap_area_list); > + } > + } > + > + vmap_start = busy->va_end; > + } > + > + if (vmap_end - vmap_start > 0) { > + free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); > + if (!WARN_ON_ONCE(!free)) { > + free->va_start = vmap_start; > + free->va_end = vmap_end; > + > + insert_vmap_area_augment(free, NULL, > + &free_vmap_area_root, > + &free_vmap_area_list); > + } > + } > +} > + > void __init vmalloc_init(void) > { > struct vmap_area *va; > -- > 2.30.2 > Reviewed-by: Baoquan He <bhe@redhat.com>
On Wed, Sep 06, 2023 at 01:52:08PM +0800, Baoquan He wrote: > On 08/29/23 at 10:11am, Uladzislau Rezki (Sony) wrote: > > A vmap_init_free_space() is a function that setups a vmap space > > and is considered as part of initialization phase. Since a main > > entry which is vmalloc_init(), has been moved down in vmalloc.c > > it makes sense to follow the pattern. > > > > There is no a functional change as a result of this patch. > > > > Reviewed-by: Christoph Hellwig <hch@lst.de> > > Reviewed-by: Lorenzo Stoakes <lstoakes@gmail.com> > > Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com> > > --- > > mm/vmalloc.c | 82 ++++++++++++++++++++++++++-------------------------- > > 1 file changed, 41 insertions(+), 41 deletions(-) > > > > diff --git a/mm/vmalloc.c b/mm/vmalloc.c > > index 09e315f8ea34..b7deacca1483 100644 > > --- a/mm/vmalloc.c > > +++ b/mm/vmalloc.c > > @@ -2512,47 +2512,6 @@ void __init vm_area_register_early(struct vm_struct *vm, size_t align) > > kasan_populate_early_vm_area_shadow(vm->addr, vm->size); > > } > > > > -static void vmap_init_free_space(void) > > -{ > > - unsigned long vmap_start = 1; > > - const unsigned long vmap_end = ULONG_MAX; > > - struct vmap_area *busy, *free; > > - > > - /* > > - * B F B B B F > > - * -|-----|.....|-----|-----|-----|.....|- > > - * | The KVA space | > > - * |<--------------------------------->| > > - */ > > - list_for_each_entry(busy, &vmap_area_list, list) { > > - if (busy->va_start - vmap_start > 0) { > > - free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); > > - if (!WARN_ON_ONCE(!free)) { > > - free->va_start = vmap_start; > > - free->va_end = busy->va_start; > > - > > - insert_vmap_area_augment(free, NULL, > > - &free_vmap_area_root, > > - &free_vmap_area_list); > > - } > > - } > > - > > - vmap_start = busy->va_end; > > - } > > - > > - if (vmap_end - vmap_start > 0) { > > - free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); > > - if (!WARN_ON_ONCE(!free)) { > > - free->va_start = vmap_start; > > - free->va_end = vmap_end; > > - > > - insert_vmap_area_augment(free, NULL, > > - &free_vmap_area_root, > > - &free_vmap_area_list); > > - } > > - } > > -} > > - > > static inline void setup_vmalloc_vm_locked(struct vm_struct *vm, > > struct vmap_area *va, unsigned long flags, const void *caller) > > { > > @@ -4443,6 +4402,47 @@ module_init(proc_vmalloc_init); > > > > #endif > > > > +static void vmap_init_free_space(void) > > +{ > > + unsigned long vmap_start = 1; > > + const unsigned long vmap_end = ULONG_MAX; > > + struct vmap_area *busy, *free; > > + > > + /* > > + * B F B B B F > > + * -|-----|.....|-----|-----|-----|.....|- > > + * | The KVA space | > > + * |<--------------------------------->| > > + */ > > + list_for_each_entry(busy, &vmap_area_list, list) { > > + if (busy->va_start - vmap_start > 0) { > > + free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); > > + if (!WARN_ON_ONCE(!free)) { > > + free->va_start = vmap_start; > > + free->va_end = busy->va_start; > > + > > + insert_vmap_area_augment(free, NULL, > > + &free_vmap_area_root, > > + &free_vmap_area_list); > > + } > > + } > > + > > + vmap_start = busy->va_end; > > + } > > + > > + if (vmap_end - vmap_start > 0) { > > + free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); > > + if (!WARN_ON_ONCE(!free)) { > > + free->va_start = vmap_start; > > + free->va_end = vmap_end; > > + > > + insert_vmap_area_augment(free, NULL, > > + &free_vmap_area_root, > > + &free_vmap_area_list); > > + } > > + } > > +} > > + > > void __init vmalloc_init(void) > > { > > struct vmap_area *va; > > -- > > 2.30.2 > > > > Reviewed-by: Baoquan He <bhe@redhat.com> > Thanks! -- Uladzislau Rezki
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 09e315f8ea34..b7deacca1483 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2512,47 +2512,6 @@ void __init vm_area_register_early(struct vm_struct *vm, size_t align) kasan_populate_early_vm_area_shadow(vm->addr, vm->size); } -static void vmap_init_free_space(void) -{ - unsigned long vmap_start = 1; - const unsigned long vmap_end = ULONG_MAX; - struct vmap_area *busy, *free; - - /* - * B F B B B F - * -|-----|.....|-----|-----|-----|.....|- - * | The KVA space | - * |<--------------------------------->| - */ - list_for_each_entry(busy, &vmap_area_list, list) { - if (busy->va_start - vmap_start > 0) { - free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); - if (!WARN_ON_ONCE(!free)) { - free->va_start = vmap_start; - free->va_end = busy->va_start; - - insert_vmap_area_augment(free, NULL, - &free_vmap_area_root, - &free_vmap_area_list); - } - } - - vmap_start = busy->va_end; - } - - if (vmap_end - vmap_start > 0) { - free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); - if (!WARN_ON_ONCE(!free)) { - free->va_start = vmap_start; - free->va_end = vmap_end; - - insert_vmap_area_augment(free, NULL, - &free_vmap_area_root, - &free_vmap_area_list); - } - } -} - static inline void setup_vmalloc_vm_locked(struct vm_struct *vm, struct vmap_area *va, unsigned long flags, const void *caller) { @@ -4443,6 +4402,47 @@ module_init(proc_vmalloc_init); #endif +static void vmap_init_free_space(void) +{ + unsigned long vmap_start = 1; + const unsigned long vmap_end = ULONG_MAX; + struct vmap_area *busy, *free; + + /* + * B F B B B F + * -|-----|.....|-----|-----|-----|.....|- + * | The KVA space | + * |<--------------------------------->| + */ + list_for_each_entry(busy, &vmap_area_list, list) { + if (busy->va_start - vmap_start > 0) { + free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); + if (!WARN_ON_ONCE(!free)) { + free->va_start = vmap_start; + free->va_end = busy->va_start; + + insert_vmap_area_augment(free, NULL, + &free_vmap_area_root, + &free_vmap_area_list); + } + } + + vmap_start = busy->va_end; + } + + if (vmap_end - vmap_start > 0) { + free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); + if (!WARN_ON_ONCE(!free)) { + free->va_start = vmap_start; + free->va_end = vmap_end; + + insert_vmap_area_augment(free, NULL, + &free_vmap_area_root, + &free_vmap_area_list); + } + } +} + void __init vmalloc_init(void) { struct vmap_area *va;