Message ID | 20231120-slab-remove-slab-v2-15-9c9c70177183@suse.cz (mailing list archive) |
---|---|
State | Mainlined |
Commit | b52ef56e9b324b172053b03d8c775ef4708fbc23 |
Headers | show |
Series | remove the SLAB allocator | expand |
On Mon, Nov 20, 2023 at 07:34:26PM +0100, Vlastimil Babka wrote: > The declaration and associated helpers are not used anywhere else > anymore. > > Reviewed-by: Kees Cook <keescook@chromium.org> > Signed-off-by: Vlastimil Babka <vbabka@suse.cz> > --- > mm/slab.h | 29 ----------------------------- > mm/slub.c | 27 +++++++++++++++++++++++++++ > 2 files changed, 27 insertions(+), 29 deletions(-) > > diff --git a/mm/slab.h b/mm/slab.h > index a81ef7c9282d..5ae6a978e9c2 100644 > --- a/mm/slab.h > +++ b/mm/slab.h > @@ -588,35 +588,6 @@ static inline size_t slab_ksize(const struct kmem_cache *s) > return s->size; > } > > - > -/* > - * The slab lists for all objects. > - */ > -struct kmem_cache_node { > - spinlock_t list_lock; > - unsigned long nr_partial; > - struct list_head partial; > -#ifdef CONFIG_SLUB_DEBUG > - atomic_long_t nr_slabs; > - atomic_long_t total_objects; > - struct list_head full; > -#endif > -}; > - > -static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) > -{ > - return s->node[node]; > -} > - > -/* > - * Iterator over all nodes. The body will be executed for each node that has > - * a kmem_cache_node structure allocated (which is true for all online nodes) > - */ > -#define for_each_kmem_cache_node(__s, __node, __n) \ > - for (__node = 0; __node < nr_node_ids; __node++) \ > - if ((__n = get_node(__s, __node))) > - > - > #ifdef CONFIG_SLUB_DEBUG > void dump_unreclaimable_slab(void); > #else > diff --git a/mm/slub.c b/mm/slub.c > index 844e0beb84ee..cc801f8258fe 100644 > --- a/mm/slub.c > +++ b/mm/slub.c > @@ -396,6 +396,33 @@ static inline void stat(const struct kmem_cache *s, enum stat_item si) > #endif > } > > +/* > + * The slab lists for all objects. > + */ > +struct kmem_cache_node { > + spinlock_t list_lock; > + unsigned long nr_partial; > + struct list_head partial; > +#ifdef CONFIG_SLUB_DEBUG > + atomic_long_t nr_slabs; > + atomic_long_t total_objects; > + struct list_head full; > +#endif > +}; > + > +static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) > +{ > + return s->node[node]; > +} > + > +/* > + * Iterator over all nodes. The body will be executed for each node that has > + * a kmem_cache_node structure allocated (which is true for all online nodes) > + */ > +#define for_each_kmem_cache_node(__s, __node, __n) \ > + for (__node = 0; __node < nr_node_ids; __node++) \ > + if ((__n = get_node(__s, __node))) > + > /* > * Tracks for which NUMA nodes we have kmem_cache_nodes allocated. > * Corresponds to node_state[N_NORMAL_MEMORY], but can temporarily > > -- Looks good to me, Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> > 2.42.1 > >
diff --git a/mm/slab.h b/mm/slab.h index a81ef7c9282d..5ae6a978e9c2 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -588,35 +588,6 @@ static inline size_t slab_ksize(const struct kmem_cache *s) return s->size; } - -/* - * The slab lists for all objects. - */ -struct kmem_cache_node { - spinlock_t list_lock; - unsigned long nr_partial; - struct list_head partial; -#ifdef CONFIG_SLUB_DEBUG - atomic_long_t nr_slabs; - atomic_long_t total_objects; - struct list_head full; -#endif -}; - -static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) -{ - return s->node[node]; -} - -/* - * Iterator over all nodes. The body will be executed for each node that has - * a kmem_cache_node structure allocated (which is true for all online nodes) - */ -#define for_each_kmem_cache_node(__s, __node, __n) \ - for (__node = 0; __node < nr_node_ids; __node++) \ - if ((__n = get_node(__s, __node))) - - #ifdef CONFIG_SLUB_DEBUG void dump_unreclaimable_slab(void); #else diff --git a/mm/slub.c b/mm/slub.c index 844e0beb84ee..cc801f8258fe 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -396,6 +396,33 @@ static inline void stat(const struct kmem_cache *s, enum stat_item si) #endif } +/* + * The slab lists for all objects. + */ +struct kmem_cache_node { + spinlock_t list_lock; + unsigned long nr_partial; + struct list_head partial; +#ifdef CONFIG_SLUB_DEBUG + atomic_long_t nr_slabs; + atomic_long_t total_objects; + struct list_head full; +#endif +}; + +static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) +{ + return s->node[node]; +} + +/* + * Iterator over all nodes. The body will be executed for each node that has + * a kmem_cache_node structure allocated (which is true for all online nodes) + */ +#define for_each_kmem_cache_node(__s, __node, __n) \ + for (__node = 0; __node < nr_node_ids; __node++) \ + if ((__n = get_node(__s, __node))) + /* * Tracks for which NUMA nodes we have kmem_cache_nodes allocated. * Corresponds to node_state[N_NORMAL_MEMORY], but can temporarily