diff mbox series

[v4,29/32] mm/slub: Define struct slab fields for CONFIG_SLUB_CPU_PARTIAL only when enabled

Message ID 20220104001046.12263-30-vbabka@suse.cz (mailing list archive)
State New
Headers show
Series Separate struct slab from struct page | expand

Commit Message

Vlastimil Babka Jan. 4, 2022, 12:10 a.m. UTC
The fields 'next' and 'slabs' are only used when CONFIG_SLUB_CPU_PARTIAL
is enabled. We can put their definition to #ifdef to prevent accidental
use when disabled.

Currenlty show_slab_objects() and slabs_cpu_partial_show() contain code
accessing the slabs field that's effectively dead with
CONFIG_SLUB_CPU_PARTIAL=n through the wrappers slub_percpu_partial() and
slub_percpu_partial_read_once(), but to prevent a compile error, we need
to hide all this code behind #ifdef.

Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
---
 mm/slab.h | 2 ++
 mm/slub.c | 8 ++++++--
 2 files changed, 8 insertions(+), 2 deletions(-)

Comments

Roman Gushchin Jan. 6, 2022, 4:16 a.m. UTC | #1
On Tue, Jan 04, 2022 at 01:10:43AM +0100, Vlastimil Babka wrote:
> The fields 'next' and 'slabs' are only used when CONFIG_SLUB_CPU_PARTIAL
> is enabled. We can put their definition to #ifdef to prevent accidental
> use when disabled.
> 
> Currenlty show_slab_objects() and slabs_cpu_partial_show() contain code
> accessing the slabs field that's effectively dead with
> CONFIG_SLUB_CPU_PARTIAL=n through the wrappers slub_percpu_partial() and
> slub_percpu_partial_read_once(), but to prevent a compile error, we need
> to hide all this code behind #ifdef.
> 
> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
> Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
> Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
> ---
>  mm/slab.h | 2 ++
>  mm/slub.c | 8 ++++++--
>  2 files changed, 8 insertions(+), 2 deletions(-)
> 
> diff --git a/mm/slab.h b/mm/slab.h
> index 362832f465e6..ac8be7dab535 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -25,10 +25,12 @@ struct slab {
>  	union {
>  		struct list_head slab_list;
>  		struct rcu_head rcu_head;
> +#ifdef CONFIG_SLUB_CPU_PARTIAL
>  		struct {
>  			struct slab *next;
>  			int slabs;	/* Nr of slabs left */
>  		};
> +#endif
>  	};
>  	struct kmem_cache *slab_cache;
>  	/* Double-word boundary */
> diff --git a/mm/slub.c b/mm/slub.c
> index d08ba1025aae..261474092e43 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -5258,6 +5258,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
>  			total += x;
>  			nodes[node] += x;
>  
> +#ifdef CONFIG_SLUB_CPU_PARTIAL
>  			slab = slub_percpu_partial_read_once(c);
>  			if (slab) {
>  				node = slab_nid(slab);
> @@ -5270,6 +5271,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
>  				total += x;
>  				nodes[node] += x;
>  			}
> +#endif
>  		}
>  	}
>  
> @@ -5469,9 +5471,10 @@ static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
>  {
>  	int objects = 0;
>  	int slabs = 0;
> -	int cpu;
> +	int cpu __maybe_unused;
>  	int len = 0;
>  
> +#ifdef CONFIG_SLUB_CPU_PARTIAL
>  	for_each_online_cpu(cpu) {
>  		struct slab *slab;
>  
> @@ -5480,12 +5483,13 @@ static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
>  		if (slab)
>  			slabs += slab->slabs;
>  	}
> +#endif
>  
>  	/* Approximate half-full slabs, see slub_set_cpu_partial() */
>  	objects = (slabs * oo_objects(s->oo)) / 2;
>  	len += sysfs_emit_at(buf, len, "%d(%d)", objects, slabs);
>  
> -#ifdef CONFIG_SMP
> +#if defined(CONFIG_SLUB_CPU_PARTIAL) && defined(CONFIG_SMP)

CONFIG_SLUB_CPU_PARTIAL is defined like:
config SLUB_CPU_PARTIAL
	default y
	depends on SLUB && SMP

So I guess the " && defined(CONFIG_SMP)" part can be dropped.

Otherwise lgtm.

Reviewed-by: Roman Gushchin <guro@fb.com>
diff mbox series

Patch

diff --git a/mm/slab.h b/mm/slab.h
index 362832f465e6..ac8be7dab535 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -25,10 +25,12 @@  struct slab {
 	union {
 		struct list_head slab_list;
 		struct rcu_head rcu_head;
+#ifdef CONFIG_SLUB_CPU_PARTIAL
 		struct {
 			struct slab *next;
 			int slabs;	/* Nr of slabs left */
 		};
+#endif
 	};
 	struct kmem_cache *slab_cache;
 	/* Double-word boundary */
diff --git a/mm/slub.c b/mm/slub.c
index d08ba1025aae..261474092e43 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5258,6 +5258,7 @@  static ssize_t show_slab_objects(struct kmem_cache *s,
 			total += x;
 			nodes[node] += x;
 
+#ifdef CONFIG_SLUB_CPU_PARTIAL
 			slab = slub_percpu_partial_read_once(c);
 			if (slab) {
 				node = slab_nid(slab);
@@ -5270,6 +5271,7 @@  static ssize_t show_slab_objects(struct kmem_cache *s,
 				total += x;
 				nodes[node] += x;
 			}
+#endif
 		}
 	}
 
@@ -5469,9 +5471,10 @@  static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
 {
 	int objects = 0;
 	int slabs = 0;
-	int cpu;
+	int cpu __maybe_unused;
 	int len = 0;
 
+#ifdef CONFIG_SLUB_CPU_PARTIAL
 	for_each_online_cpu(cpu) {
 		struct slab *slab;
 
@@ -5480,12 +5483,13 @@  static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
 		if (slab)
 			slabs += slab->slabs;
 	}
+#endif
 
 	/* Approximate half-full slabs, see slub_set_cpu_partial() */
 	objects = (slabs * oo_objects(s->oo)) / 2;
 	len += sysfs_emit_at(buf, len, "%d(%d)", objects, slabs);
 
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SLUB_CPU_PARTIAL) && defined(CONFIG_SMP)
 	for_each_online_cpu(cpu) {
 		struct slab *slab;