@@ -1885,9 +1885,8 @@ config SLAB_MERGE_DEFAULT
command line.
config SLAB_FREELIST_RANDOM
- default n
+ bool "Randomize slab freelist"
depends on SLAB || SLUB
- bool "SLAB freelist randomization"
help
Randomizes the freelist order used on creating new pages. This
security feature reduces the predictability of the kernel slab
@@ -1895,12 +1894,13 @@ config SLAB_FREELIST_RANDOM
config SLAB_FREELIST_HARDENED
bool "Harden slab freelist metadata"
- depends on SLUB
help
Many kernel heap attacks try to target slab cache metadata and
other infrastructure. This options makes minor performance
sacrifices to harden the kernel slab allocator against common
- freelist exploit methods.
+ freelist exploit methods. Some slab implementations have more
+ sanity-checking than others. This option is most effective with
+ CONFIG_SLUB.
config SHUFFLE_PAGE_ALLOCATOR
bool "Page allocator randomization"
@@ -3672,14 +3672,6 @@ void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
}
EXPORT_SYMBOL(__kmalloc_track_caller);
-static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
-{
- if (memcg_kmem_enabled())
- return virt_to_cache(x);
- else
- return s;
-}
-
/**
* kmem_cache_free - Deallocate an object
* @cachep: The cache the allocation was from.
@@ -467,6 +467,20 @@ static inline void memcg_link_cache(struct kmem_cache *s,
#endif /* CONFIG_MEMCG_KMEM */
+#ifdef CONFIG_SLUB_DEBUG
+extern inline int kmem_cache_debug_flags(struct kmem_cache *s,
+ slab_flags_t flags);
+extern inline void print_tracking(struct kmem_cache *s, void *object);
+#else
+static inline int kmem_cache_debug_flags(struct kmem_cache *s,
+ slab_flags_t flags)
+{
+ return 0;
+}
+static inline void print_tracking(struct kmem_cache *s, void *object)
+{ }
+#endif
+
static inline struct kmem_cache *virt_to_cache(const void *obj)
{
struct page *page;
@@ -503,6 +517,23 @@ static __always_inline void uncharge_slab_page(struct page *page, int order,
memcg_uncharge_slab(page, order, s);
}
+static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
+{
+ struct kmem_cache *cachep;
+
+ if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
+ !memcg_kmem_enabled() &&
+ !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS))
+ return s;
+
+ cachep = virt_to_cache(x);
+ if (WARN(cachep && !slab_equal_or_root(cachep, s),
+ "%s: Wrong slab cache. %s but object is from %s\n",
+ __func__, s->name, cachep->name))
+ print_tracking(cachep, x);
+ return cachep;
+}
+
static inline size_t slab_ksize(const struct kmem_cache *s)
{
#ifndef CONFIG_SLUB
@@ -120,7 +120,6 @@ DEFINE_STATIC_KEY_TRUE(slub_debug_enabled);
#else
DEFINE_STATIC_KEY_FALSE(slub_debug_enabled);
#endif
-#endif
/*
* Returns true if any of the specified slub_debug flags is enabled for the
@@ -129,12 +128,11 @@ DEFINE_STATIC_KEY_FALSE(slub_debug_enabled);
*/
static inline int kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
{
-#ifdef CONFIG_SLUB_DEBUG
if (static_branch_unlikely(&slub_debug_enabled))
return s->flags & flags;
-#endif
return 0;
}
+#endif
static inline int kmem_cache_debug(struct kmem_cache *s)
{
@@ -1524,10 +1522,6 @@ static bool freelist_corrupted(struct kmem_cache *s, struct page *page,
{
return false;
}
-
-static void print_tracking(struct kmem_cache *s, void *object)
-{
-}
#endif /* CONFIG_SLUB_DEBUG */
/*
@@ -3179,23 +3173,6 @@ void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
}
#endif
-static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
-{
- struct kmem_cache *cachep;
-
- if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
- !memcg_kmem_enabled() &&
- !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS))
- return s;
-
- cachep = virt_to_cache(x);
- if (WARN(cachep && !slab_equal_or_root(cachep, s),
- "%s: Wrong slab cache. %s but object is from %s\n",
- __func__, s->name, cachep->name))
- print_tracking(cachep, x);
- return cachep;
-}
-
void kmem_cache_free(struct kmem_cache *s, void *x)
{
s = cache_from_obj(s, x);
Include SLAB and SLOB caches when performing kmem_cache pointer verification. A defense against such corruption[1] should be applied to all the allocators. With this added, the "SLAB_FREE_CROSS" and "SLAB_FREE_PAGE" LKDTM tests now pass on SLAB: lkdtm: Performing direct entry SLAB_FREE_CROSS lkdtm: Attempting cross-cache slab free ... ------------[ cut here ]------------ cache_from_obj: Wrong slab cache. lkdtm-heap-b but object is from lkdtm-heap-a WARNING: CPU: 2 PID: 2195 at mm/slab.h:530 kmem_cache_free+0x8d/0x1d0 ... lkdtm: Performing direct entry SLAB_FREE_PAGE lkdtm: Attempting non-Slab slab free ... ------------[ cut here ]------------ virt_to_cache: Object is not a Slab page! WARNING: CPU: 1 PID: 2202 at mm/slab.h:489 kmem_cache_free+0x196/0x1d0 Additionally clean up neighboring Kconfig entries for clarity, readability, and redundant option removal. [1] https://github.com/ThomasKing2014/slides/raw/master/Building%20universal%20Android%20rooting%20with%20a%20type%20confusion%20vulnerability.pdf Fixes: 598a0717a816 ("mm/slab: validate cache membership under freelist hardening") Signed-off-by: Kees Cook <keescook@chromium.org> --- init/Kconfig | 8 ++++---- mm/slab.c | 8 -------- mm/slab.h | 31 +++++++++++++++++++++++++++++++ mm/slub.c | 25 +------------------------ 4 files changed, 36 insertions(+), 36 deletions(-)