Message ID | 20231120091214.150502-5-sxwjean@me.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | supplement of slab removal | expand |
On Mon, Nov 20, 2023 at 6:13 PM <sxwjean@me.com> wrote: > > From: Xiongwei Song <xiongwei.song@windriver.com> > > Since slab allocator has been removed. There is no users about slab > merge except slub. This commit is almost to revert > commit 423c929cbbec ("mm/slab_common: commonize slab merge logic"). > > Also change all prefix of slab merge related functions, variables and > definitions from "slab/SLAB" to"slub/SLUB". Could you please elaborate a little bit? I am not sure if I understand what the last two patches of this series are useful for. - Why rename variable/function/macro names? - Why move merge related functions from slab_common.c to slub.c? (I mean merging slab_common.c and slub.c into single file might make sense but why move only some parts of one into the other?) > Signed-off-by: Xiongwei Song <xiongwei.song@windriver.com> > --- > mm/slab.h | 3 -- > mm/slab_common.c | 98 ---------------------------------------------- > mm/slub.c | 100 ++++++++++++++++++++++++++++++++++++++++++++++- > 3 files changed, 99 insertions(+), 102 deletions(-) > > diff --git a/mm/slab.h b/mm/slab.h > index 8d20f8c6269d..cd52e705ce28 100644 > --- a/mm/slab.h > +++ b/mm/slab.h > @@ -429,9 +429,6 @@ extern void create_boot_cache(struct kmem_cache *, const char *name, > > unsigned int calculate_alignment(slab_flags_t flags, > unsigned int align, unsigned int size); > -int slab_unmergeable(struct kmem_cache *s); > -struct kmem_cache *find_mergeable(unsigned size, unsigned align, > - slab_flags_t flags, const char *name, void (*ctor)(void *)); > struct kmem_cache * > __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, > slab_flags_t flags, void (*ctor)(void *)); > diff --git a/mm/slab_common.c b/mm/slab_common.c > index 62eb77fdedf2..6960ae5c35ee 100644 > --- a/mm/slab_common.c > +++ b/mm/slab_common.c > @@ -45,36 +45,6 @@ static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work); > static DECLARE_WORK(slab_caches_to_rcu_destroy_work, > slab_caches_to_rcu_destroy_workfn); > > -/* > - * Set of flags that will prevent slab merging > - */ > -#define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ > - SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \ > - SLAB_FAILSLAB | SLAB_NO_MERGE | kasan_never_merge()) > - > -#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \ > - SLAB_CACHE_DMA32 | SLAB_ACCOUNT) > - > -/* > - * Merge control. If this is set then no merging of slab caches will occur. > - */ > -static bool slub_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT); > - > -static int __init setup_slab_nomerge(char *str) > -{ > - slub_nomerge = true; > - return 1; > -} > - > -static int __init setup_slab_merge(char *str) > -{ > - slub_nomerge = false; > - return 1; > -} > - > -__setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0); > -__setup_param("slub_merge", slub_merge, setup_slab_merge, 0); > - > /* > * Determine the size of a slab object > */ > @@ -130,74 +100,6 @@ unsigned int calculate_alignment(slab_flags_t flags, > return ALIGN(align, sizeof(void *)); > } > > -/* > - * Find a mergeable slab cache > - */ > -int slab_unmergeable(struct kmem_cache *s) > -{ > - if (slub_nomerge || (s->flags & SLAB_NEVER_MERGE)) > - return 1; > - > - if (s->ctor) > - return 1; > - > -#ifdef CONFIG_HARDENED_USERCOPY > - if (s->usersize) > - return 1; > -#endif > - > - /* > - * We may have set a slab to be unmergeable during bootstrap. > - */ > - if (s->refcount < 0) > - return 1; > - > - return 0; > -} > - > -struct kmem_cache *find_mergeable(unsigned int size, unsigned int align, > - slab_flags_t flags, const char *name, void (*ctor)(void *)) > -{ > - struct kmem_cache *s; > - > - if (slub_nomerge) > - return NULL; > - > - if (ctor) > - return NULL; > - > - size = ALIGN(size, sizeof(void *)); > - align = calculate_alignment(flags, align, size); > - size = ALIGN(size, align); > - flags = kmem_cache_flags(size, flags, name); > - > - if (flags & SLAB_NEVER_MERGE) > - return NULL; > - > - list_for_each_entry_reverse(s, &slab_caches, list) { > - if (slab_unmergeable(s)) > - continue; > - > - if (size > s->size) > - continue; > - > - if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME)) > - continue; > - /* > - * Check if alignment is compatible. > - * Courtesy of Adrian Drzewiecki > - */ > - if ((s->size & ~(align - 1)) != s->size) > - continue; > - > - if (s->size - size >= sizeof(void *)) > - continue; > - > - return s; > - } > - return NULL; > -} > - > static struct kmem_cache *create_cache(const char *name, > unsigned int object_size, unsigned int align, > slab_flags_t flags, unsigned int useroffset, > diff --git a/mm/slub.c b/mm/slub.c > index ae1e6e635253..435d9ed140e4 100644 > --- a/mm/slub.c > +++ b/mm/slub.c > @@ -709,6 +709,104 @@ static inline bool slab_update_freelist(struct kmem_cache *s, struct slab *slab, > return false; > } > > +/* > + * Set of flags that will prevent slab merging > + */ > +#define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ > + SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \ > + SLAB_FAILSLAB | SLAB_NO_MERGE | kasan_never_merge()) > + > +#define SLUB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \ > + SLAB_CACHE_DMA32 | SLAB_ACCOUNT) > + > +/* > + * Merge control. If this is set then no merging of slab caches will occur. > + */ > +static bool slub_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT); > + > +static int __init setup_slub_nomerge(char *str) > +{ > + slub_nomerge = true; > + return 1; > +} > + > +static int __init setup_slub_merge(char *str) > +{ > + slub_nomerge = false; > + return 1; > +} > + > +__setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0); > +__setup_param("slub_merge", slub_merge, setup_slab_merge, 0); > + > +/* > + * Find a mergeable slab cache > + */ > +static inline int slub_unmergeable(struct kmem_cache *s) > +{ > + if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) > + return 1; > + > + if (s->ctor) > + return 1; > + > +#ifdef CONFIG_HARDENED_USERCOPY > + if (s->usersize) > + return 1; > +#endif > + > + /* > + * We may have set a slab to be unmergeable during bootstrap. > + */ > + if (s->refcount < 0) > + return 1; > + > + return 0; > +} > + > +static struct kmem_cache *find_mergeable(unsigned int size, unsigned int align, > + slab_flags_t flags, const char *name, void (*ctor)(void *)) > +{ > + struct kmem_cache *s; > + > + if (slub_nomerge) > + return NULL; > + > + if (ctor) > + return NULL; > + > + size = ALIGN(size, sizeof(void *)); > + align = calculate_alignment(flags, align, size); > + size = ALIGN(size, align); > + flags = kmem_cache_flags(size, flags, name); > + > + if (flags & SLUB_NEVER_MERGE) > + return NULL; > + > + list_for_each_entry_reverse(s, &slab_caches, list) { > + if (slub_unmergeable(s)) > + continue; > + > + if (size > s->size) > + continue; > + > + if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME)) > + continue; > + /* > + * Check if alignment is compatible. > + * Courtesy of Adrian Drzewiecki > + */ > + if ((s->size & ~(align - 1)) != s->size) > + continue; > + > + if (s->size - size >= sizeof(void *)) > + continue; > + > + return s; > + } > + return NULL; > +} > + > #ifdef CONFIG_SLUB_DEBUG > static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)]; > static DEFINE_SPINLOCK(object_map_lock); > @@ -6679,7 +6777,7 @@ static int sysfs_slab_add(struct kmem_cache *s) > int err; > const char *name; > struct kset *kset = cache_kset(s); > - int unmergeable = slab_unmergeable(s); > + int unmergeable = slub_unmergeable(s); > > if (!unmergeable && disable_higher_order_debug && > (slub_debug & DEBUG_METADATA_FLAGS)) > -- > 2.34.1 >
On Mon, Nov 20, 2023 at 6:13 PM <sxwjean@me.com> wrote: > > From: Xiongwei Song <xiongwei.song@windriver.com> > > Since slab allocator has been removed. There is no users about slab > merge except slub. This commit is almost to revert > commit 423c929cbbec ("mm/slab_common: commonize slab merge logic"). > > Also change all prefix of slab merge related functions, variables and > definitions from "slab/SLAB" to"slub/SLUB". > > Signed-off-by: Xiongwei Song <xiongwei.song@windriver.com> > --- > mm/slab.h | 3 -- > mm/slab_common.c | 98 ---------------------------------------------- > mm/slub.c | 100 ++++++++++++++++++++++++++++++++++++++++++++++- > 3 files changed, 99 insertions(+), 102 deletions(-) [...] > +/* > + * Merge control. If this is set then no merging of slab caches will occur. > + */ > +static bool slub_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT); > + > +static int __init setup_slub_nomerge(char *str) > +{ > + slub_nomerge = true; > + return 1; > +} > + > +static int __init setup_slub_merge(char *str) > +{ > + slub_nomerge = false; > + return 1; > +} > + > +__setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0); > +__setup_param("slub_merge", slub_merge, setup_slab_merge, 0); FYI This hunk breaks kernel builds: In file included from ./include/linux/printk.h:6, from ./include/asm-generic/bug.h:22, from ./arch/x86/include/asm/bug.h:87, from ./include/linux/bug.h:5, from ./include/linux/mmdebug.h:5, from ./include/linux/mm.h:6, from mm/slub.c:13: mm/slub.c:748:45: error: ‘setup_slab_nomerge’ undeclared here (not in a function); did you mean ‘setup_slub_nomerge’? 748 | __setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0); | ^~~~~~~~~~~~~~~~~~ ./include/linux/init.h:340:32: note: in definition of macro ‘__setup_param’ 340 | = { __setup_str_##unique_id, fn, early } | ^~ mm/slub.c:749:41: error: ‘setup_slab_merge’ undeclared here (not in a function); did you mean ‘setup_slub_merge’? 749 | __setup_param("slub_merge", slub_merge, setup_slab_merge, 0); | ^~~~~~~~~~~~~~~~ ./include/linux/init.h:340:32: note: in definition of macro ‘__setup_param’ 340 | = { __setup_str_##unique_id, fn, early } | ^~ CC kernel/time/ntp.o mm/slub.c:742:19: warning: ‘setup_slub_merge’ defined but not used [-Wunused-function] 742 | static int __init setup_slub_merge(char *str) | ^~~~~~~~~~~~~~~~ mm/slub.c:736:19: warning: ‘setup_slub_nomerge’ defined but not used [-Wunused-function] 736 | static int __init setup_slub_nomerge(char *str) | ^~~~~~~~~~~~~~~~~~
On 11/21/23 09:54, Hyeonggon Yoo wrote: > On Mon, Nov 20, 2023 at 6:13 PM <sxwjean@me.com> wrote: >> >> From: Xiongwei Song <xiongwei.song@windriver.com> >> >> Since slab allocator has been removed. There is no users about slab >> merge except slub. This commit is almost to revert >> commit 423c929cbbec ("mm/slab_common: commonize slab merge logic"). >> >> Also change all prefix of slab merge related functions, variables and >> definitions from "slab/SLAB" to"slub/SLUB". > > Could you please elaborate a little bit? > I am not sure if I understand what the last two patches of this series > are useful for. > > - Why rename variable/function/macro names? > - Why move merge related functions from slab_common.c to slub.c? In my series I have moved functions that were part of allocation/free hot paths as there should be performance benefits if they are all in the same compilation unit. > (I mean merging slab_common.c and slub.c into single file might make sense > but why move only some parts of one into the other?) OTOH slub.c becomes quite big, so I think it would make sense to not merge mm/slab_common.c fully. The non-hot code that's handling e.g. the caches creation and management, such as what this patch is moving, could certainly stay away from mm/slub.c. We could just pick a more descriptive name for slab_common.c. I'd even investigate if more parts of slub.c could be split out (to a new file/files) without compromising the hot paths, i.e. sysfs, debugging etc.
> -----Original Message----- > From: owner-linux-mm@kvack.org <owner-linux-mm@kvack.org> On Behalf Of Vlastimil > Babka > Sent: Tuesday, November 21, 2023 6:03 PM > To: Hyeonggon Yoo <42.hyeyoo@gmail.com>; sxwjean@me.com > Cc: cl@linux.com; penberg@kernel.org; rientjes@google.com; iamjoonsoo.kim@lge.com; > roman.gushchin@linux.dev; corbet@lwn.net; linux-mm@kvack.org; linux- > doc@vger.kernel.org; linux-kernel@vger.kernel.org > Subject: Re: [PATCH 4/4] mm/slab: move slab merge from slab_common.c to slub.c > > CAUTION: This email comes from a non Wind River email account! > Do not click links or open attachments unless you recognize the sender and know the > content is safe. > > On 11/21/23 09:54, Hyeonggon Yoo wrote: > > On Mon, Nov 20, 2023 at 6:13 PM <sxwjean@me.com> wrote: > >> > >> From: Xiongwei Song <xiongwei.song@windriver.com> > >> > >> Since slab allocator has been removed. There is no users about slab > >> merge except slub. This commit is almost to revert > >> commit 423c929cbbec ("mm/slab_common: commonize slab merge logic"). > >> > >> Also change all prefix of slab merge related functions, variables and > >> definitions from "slab/SLAB" to"slub/SLUB". > > > > Could you please elaborate a little bit? > > I am not sure if I understand what the last two patches of this series > > are useful for. > > > > - Why rename variable/function/macro names? > > - Why move merge related functions from slab_common.c to slub.c? > > In my series I have moved functions that were part of allocation/free hot > paths as there should be performance benefits if they are all in the same > compilation unit. > > > (I mean merging slab_common.c and slub.c into single file might make sense > > but why move only some parts of one into the other?) > > OTOH slub.c becomes quite big, so I think it would make sense to not merge > mm/slab_common.c fully. The non-hot code that's handling e.g. the caches > creation and management, such as what this patch is moving, could certainly > stay away from mm/slub.c. We could just pick a more descriptive name for > slab_common.c. > > I'd even investigate if more parts of slub.c could be split out (to a new > file/files) without compromising the hot paths, i.e. sysfs, debugging etc. Ok, sure. Sounds good. Regards, Xiongwei
diff --git a/mm/slab.h b/mm/slab.h index 8d20f8c6269d..cd52e705ce28 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -429,9 +429,6 @@ extern void create_boot_cache(struct kmem_cache *, const char *name, unsigned int calculate_alignment(slab_flags_t flags, unsigned int align, unsigned int size); -int slab_unmergeable(struct kmem_cache *s); -struct kmem_cache *find_mergeable(unsigned size, unsigned align, - slab_flags_t flags, const char *name, void (*ctor)(void *)); struct kmem_cache * __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, slab_flags_t flags, void (*ctor)(void *)); diff --git a/mm/slab_common.c b/mm/slab_common.c index 62eb77fdedf2..6960ae5c35ee 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -45,36 +45,6 @@ static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work); static DECLARE_WORK(slab_caches_to_rcu_destroy_work, slab_caches_to_rcu_destroy_workfn); -/* - * Set of flags that will prevent slab merging - */ -#define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ - SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \ - SLAB_FAILSLAB | SLAB_NO_MERGE | kasan_never_merge()) - -#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \ - SLAB_CACHE_DMA32 | SLAB_ACCOUNT) - -/* - * Merge control. If this is set then no merging of slab caches will occur. - */ -static bool slub_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT); - -static int __init setup_slab_nomerge(char *str) -{ - slub_nomerge = true; - return 1; -} - -static int __init setup_slab_merge(char *str) -{ - slub_nomerge = false; - return 1; -} - -__setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0); -__setup_param("slub_merge", slub_merge, setup_slab_merge, 0); - /* * Determine the size of a slab object */ @@ -130,74 +100,6 @@ unsigned int calculate_alignment(slab_flags_t flags, return ALIGN(align, sizeof(void *)); } -/* - * Find a mergeable slab cache - */ -int slab_unmergeable(struct kmem_cache *s) -{ - if (slub_nomerge || (s->flags & SLAB_NEVER_MERGE)) - return 1; - - if (s->ctor) - return 1; - -#ifdef CONFIG_HARDENED_USERCOPY - if (s->usersize) - return 1; -#endif - - /* - * We may have set a slab to be unmergeable during bootstrap. - */ - if (s->refcount < 0) - return 1; - - return 0; -} - -struct kmem_cache *find_mergeable(unsigned int size, unsigned int align, - slab_flags_t flags, const char *name, void (*ctor)(void *)) -{ - struct kmem_cache *s; - - if (slub_nomerge) - return NULL; - - if (ctor) - return NULL; - - size = ALIGN(size, sizeof(void *)); - align = calculate_alignment(flags, align, size); - size = ALIGN(size, align); - flags = kmem_cache_flags(size, flags, name); - - if (flags & SLAB_NEVER_MERGE) - return NULL; - - list_for_each_entry_reverse(s, &slab_caches, list) { - if (slab_unmergeable(s)) - continue; - - if (size > s->size) - continue; - - if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME)) - continue; - /* - * Check if alignment is compatible. - * Courtesy of Adrian Drzewiecki - */ - if ((s->size & ~(align - 1)) != s->size) - continue; - - if (s->size - size >= sizeof(void *)) - continue; - - return s; - } - return NULL; -} - static struct kmem_cache *create_cache(const char *name, unsigned int object_size, unsigned int align, slab_flags_t flags, unsigned int useroffset, diff --git a/mm/slub.c b/mm/slub.c index ae1e6e635253..435d9ed140e4 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -709,6 +709,104 @@ static inline bool slab_update_freelist(struct kmem_cache *s, struct slab *slab, return false; } +/* + * Set of flags that will prevent slab merging + */ +#define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ + SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \ + SLAB_FAILSLAB | SLAB_NO_MERGE | kasan_never_merge()) + +#define SLUB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \ + SLAB_CACHE_DMA32 | SLAB_ACCOUNT) + +/* + * Merge control. If this is set then no merging of slab caches will occur. + */ +static bool slub_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT); + +static int __init setup_slub_nomerge(char *str) +{ + slub_nomerge = true; + return 1; +} + +static int __init setup_slub_merge(char *str) +{ + slub_nomerge = false; + return 1; +} + +__setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0); +__setup_param("slub_merge", slub_merge, setup_slab_merge, 0); + +/* + * Find a mergeable slab cache + */ +static inline int slub_unmergeable(struct kmem_cache *s) +{ + if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) + return 1; + + if (s->ctor) + return 1; + +#ifdef CONFIG_HARDENED_USERCOPY + if (s->usersize) + return 1; +#endif + + /* + * We may have set a slab to be unmergeable during bootstrap. + */ + if (s->refcount < 0) + return 1; + + return 0; +} + +static struct kmem_cache *find_mergeable(unsigned int size, unsigned int align, + slab_flags_t flags, const char *name, void (*ctor)(void *)) +{ + struct kmem_cache *s; + + if (slub_nomerge) + return NULL; + + if (ctor) + return NULL; + + size = ALIGN(size, sizeof(void *)); + align = calculate_alignment(flags, align, size); + size = ALIGN(size, align); + flags = kmem_cache_flags(size, flags, name); + + if (flags & SLUB_NEVER_MERGE) + return NULL; + + list_for_each_entry_reverse(s, &slab_caches, list) { + if (slub_unmergeable(s)) + continue; + + if (size > s->size) + continue; + + if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME)) + continue; + /* + * Check if alignment is compatible. + * Courtesy of Adrian Drzewiecki + */ + if ((s->size & ~(align - 1)) != s->size) + continue; + + if (s->size - size >= sizeof(void *)) + continue; + + return s; + } + return NULL; +} + #ifdef CONFIG_SLUB_DEBUG static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)]; static DEFINE_SPINLOCK(object_map_lock); @@ -6679,7 +6777,7 @@ static int sysfs_slab_add(struct kmem_cache *s) int err; const char *name; struct kset *kset = cache_kset(s); - int unmergeable = slab_unmergeable(s); + int unmergeable = slub_unmergeable(s); if (!unmergeable && disable_higher_order_debug && (slub_debug & DEBUG_METADATA_FLAGS))