diff mbox series

[RFC,30/32] mm/sl*b: Differentiate struct slab fields by sl*b implementations

Message ID 20211116001628.24216-31-vbabka@suse.cz (mailing list archive)
State New
Headers show
Series Separate struct slab from struct page | expand

Commit Message

Vlastimil Babka Nov. 16, 2021, 12:16 a.m. UTC
With a struct slab definition separate from struct page, we can go further and
define only fields that the chosen sl*b implementation uses. This means
everything between __page_flags and __page_refcount placeholders now depends on
the chosen CONFIG_SL*B. Some fields exist in all implementations (slab_list)
but can be part of a union in some, so it's simpler to repeat them than
complicate the definition with ifdefs even more.

The patch doesn't change physical offsets of the fields, although it could be
done later - for example it's now clear that tighter packing in SLOB could be
possible.

This should also prevent accidental use of fields that don't exist in given
implementation. Before this patch virt_to_cache() and and cache_from_obj() was
visible for SLOB (albeit not used), although it relies on the slab_cache field
that isn't set by SLOB. With this patch it's now a compile error, so these
functions are now hidden behind #ifndef CONFIG_SLOB.

Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Alexander Potapenko <glider@google.com> (maintainer:KFENCE)
Cc: Marco Elver <elver@google.com> (maintainer:KFENCE)
Cc: Dmitry Vyukov <dvyukov@google.com> (reviewer:KFENCE)
Cc: <kasan-dev@googlegroups.com>
---
 mm/kfence/core.c |  9 +++++----
 mm/slab.h        | 46 ++++++++++++++++++++++++++++++++++++----------
 2 files changed, 41 insertions(+), 14 deletions(-)

Comments

Marco Elver Nov. 17, 2021, 7 a.m. UTC | #1
On Tue, 16 Nov 2021 at 01:16, Vlastimil Babka <vbabka@suse.cz> wrote:
> With a struct slab definition separate from struct page, we can go further and
> define only fields that the chosen sl*b implementation uses. This means
> everything between __page_flags and __page_refcount placeholders now depends on
> the chosen CONFIG_SL*B. Some fields exist in all implementations (slab_list)
> but can be part of a union in some, so it's simpler to repeat them than
> complicate the definition with ifdefs even more.
>
> The patch doesn't change physical offsets of the fields, although it could be
> done later - for example it's now clear that tighter packing in SLOB could be
> possible.
>
> This should also prevent accidental use of fields that don't exist in given
> implementation. Before this patch virt_to_cache() and and cache_from_obj() was
> visible for SLOB (albeit not used), although it relies on the slab_cache field
> that isn't set by SLOB. With this patch it's now a compile error, so these
> functions are now hidden behind #ifndef CONFIG_SLOB.
>
> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
> Cc: Alexander Potapenko <glider@google.com> (maintainer:KFENCE)
> Cc: Marco Elver <elver@google.com> (maintainer:KFENCE)
> Cc: Dmitry Vyukov <dvyukov@google.com> (reviewer:KFENCE)
> Cc: <kasan-dev@googlegroups.com>

Ran kfence_test with both slab and slub, and all passes:

Tested-by: Marco Elver <elver@google.com>

> ---
>  mm/kfence/core.c |  9 +++++----
>  mm/slab.h        | 46 ++++++++++++++++++++++++++++++++++++----------
>  2 files changed, 41 insertions(+), 14 deletions(-)
>
> diff --git a/mm/kfence/core.c b/mm/kfence/core.c
> index 4eb60cf5ff8b..46103a7628a6 100644
> --- a/mm/kfence/core.c
> +++ b/mm/kfence/core.c
> @@ -427,10 +427,11 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
>         /* Set required slab fields. */
>         slab = virt_to_slab((void *)meta->addr);
>         slab->slab_cache = cache;
> -       if (IS_ENABLED(CONFIG_SLUB))
> -               slab->objects = 1;
> -       if (IS_ENABLED(CONFIG_SLAB))
> -               slab->s_mem = addr;
> +#if defined(CONFIG_SLUB)
> +       slab->objects = 1;
> +#elif defined (CONFIG_SLAB)
> +       slab->s_mem = addr;
> +#endif
>
>         /* Memory initialization. */
>         for_each_canary(meta, set_canary_byte);
> diff --git a/mm/slab.h b/mm/slab.h
> index 58b65e5e5d49..10a9ee195249 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -8,9 +8,24 @@
>  /* Reuses the bits in struct page */
>  struct slab {
>         unsigned long __page_flags;
> +
> +#if defined(CONFIG_SLAB)
> +
> +       union {
> +               struct list_head slab_list;
> +               struct rcu_head rcu_head;
> +       };
> +       struct kmem_cache *slab_cache;
> +       void *freelist; /* array of free object indexes */
> +       void * s_mem;   /* first object */
> +       unsigned int active;
> +
> +#elif defined(CONFIG_SLUB)
> +
>         union {
>                 struct list_head slab_list;
> -               struct {        /* Partial pages */
> +               struct rcu_head rcu_head;
> +               struct {
>                         struct slab *next;
>  #ifdef CONFIG_64BIT
>                         int slabs;      /* Nr of slabs left */
> @@ -18,25 +33,32 @@ struct slab {
>                         short int slabs;
>  #endif
>                 };
> -               struct rcu_head rcu_head;
>         };
> -       struct kmem_cache *slab_cache; /* not slob */
> +       struct kmem_cache *slab_cache;
>         /* Double-word boundary */
>         void *freelist;         /* first free object */
>         union {
> -               void *s_mem;    /* slab: first object */
> -               unsigned long counters;         /* SLUB */
> -               struct {                        /* SLUB */
> +               unsigned long counters;
> +               struct {
>                         unsigned inuse:16;
>                         unsigned objects:15;
>                         unsigned frozen:1;
>                 };
>         };
> +       unsigned int __unused;
> +
> +#elif defined(CONFIG_SLOB)
> +
> +       struct list_head slab_list;
> +       void * __unused_1;
> +       void *freelist;         /* first free block */
> +       void * __unused_2;
> +       int units;
> +
> +#else
> +#error "Unexpected slab allocator configured"
> +#endif
>
> -       union {
> -               unsigned int active;            /* SLAB */
> -               int units;                      /* SLOB */
> -       };
>         atomic_t __page_refcount;
>  #ifdef CONFIG_MEMCG
>         unsigned long memcg_data;
> @@ -47,7 +69,9 @@ struct slab {
>         static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
>  SLAB_MATCH(flags, __page_flags);
>  SLAB_MATCH(compound_head, slab_list);  /* Ensure bit 0 is clear */
> +#ifndef CONFIG_SLOB
>  SLAB_MATCH(rcu_head, rcu_head);
> +#endif
>  SLAB_MATCH(_refcount, __page_refcount);
>  #ifdef CONFIG_MEMCG
>  SLAB_MATCH(memcg_data, memcg_data);
> @@ -623,6 +647,7 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s,
>  }
>  #endif /* CONFIG_MEMCG_KMEM */
>
> +#ifndef CONFIG_SLOB
>  static inline struct kmem_cache *virt_to_cache(const void *obj)
>  {
>         struct slab *slab;
> @@ -669,6 +694,7 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
>                 print_tracking(cachep, x);
>         return cachep;
>  }
> +#endif /* CONFIG_SLOB */
>
>  static inline size_t slab_ksize(const struct kmem_cache *s)
>  {
> --
> 2.33.1
>
diff mbox series

Patch

diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index 4eb60cf5ff8b..46103a7628a6 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -427,10 +427,11 @@  static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
 	/* Set required slab fields. */
 	slab = virt_to_slab((void *)meta->addr);
 	slab->slab_cache = cache;
-	if (IS_ENABLED(CONFIG_SLUB))
-		slab->objects = 1;
-	if (IS_ENABLED(CONFIG_SLAB))
-		slab->s_mem = addr;
+#if defined(CONFIG_SLUB)
+	slab->objects = 1;
+#elif defined (CONFIG_SLAB)
+	slab->s_mem = addr;
+#endif
 
 	/* Memory initialization. */
 	for_each_canary(meta, set_canary_byte);
diff --git a/mm/slab.h b/mm/slab.h
index 58b65e5e5d49..10a9ee195249 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -8,9 +8,24 @@ 
 /* Reuses the bits in struct page */
 struct slab {
 	unsigned long __page_flags;
+
+#if defined(CONFIG_SLAB)
+
+	union {
+		struct list_head slab_list;
+		struct rcu_head rcu_head;
+	};
+	struct kmem_cache *slab_cache;
+	void *freelist;	/* array of free object indexes */
+	void * s_mem;	/* first object */
+	unsigned int active;
+
+#elif defined(CONFIG_SLUB)
+
 	union {
 		struct list_head slab_list;
-		struct {	/* Partial pages */
+		struct rcu_head rcu_head;
+		struct {
 			struct slab *next;
 #ifdef CONFIG_64BIT
 			int slabs;	/* Nr of slabs left */
@@ -18,25 +33,32 @@  struct slab {
 			short int slabs;
 #endif
 		};
-		struct rcu_head rcu_head;
 	};
-	struct kmem_cache *slab_cache; /* not slob */
+	struct kmem_cache *slab_cache;
 	/* Double-word boundary */
 	void *freelist;		/* first free object */
 	union {
-		void *s_mem;	/* slab: first object */
-		unsigned long counters;		/* SLUB */
-		struct {			/* SLUB */
+		unsigned long counters;
+		struct {
 			unsigned inuse:16;
 			unsigned objects:15;
 			unsigned frozen:1;
 		};
 	};
+	unsigned int __unused;
+
+#elif defined(CONFIG_SLOB)
+
+	struct list_head slab_list;
+	void * __unused_1;
+	void *freelist;		/* first free block */
+	void * __unused_2;
+	int units;
+
+#else
+#error "Unexpected slab allocator configured"
+#endif
 
-	union {
-		unsigned int active;		/* SLAB */
-		int units;			/* SLOB */
-	};
 	atomic_t __page_refcount;
 #ifdef CONFIG_MEMCG
 	unsigned long memcg_data;
@@ -47,7 +69,9 @@  struct slab {
 	static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
 SLAB_MATCH(flags, __page_flags);
 SLAB_MATCH(compound_head, slab_list);	/* Ensure bit 0 is clear */
+#ifndef CONFIG_SLOB
 SLAB_MATCH(rcu_head, rcu_head);
+#endif
 SLAB_MATCH(_refcount, __page_refcount);
 #ifdef CONFIG_MEMCG
 SLAB_MATCH(memcg_data, memcg_data);
@@ -623,6 +647,7 @@  static inline void memcg_slab_free_hook(struct kmem_cache *s,
 }
 #endif /* CONFIG_MEMCG_KMEM */
 
+#ifndef CONFIG_SLOB
 static inline struct kmem_cache *virt_to_cache(const void *obj)
 {
 	struct slab *slab;
@@ -669,6 +694,7 @@  static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
 		print_tracking(cachep, x);
 	return cachep;
 }
+#endif /* CONFIG_SLOB */
 
 static inline size_t slab_ksize(const struct kmem_cache *s)
 {