@@ -21,6 +21,8 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/kasan.h>
+#include <linux/kmsan.h>
+#include <linux/kmsan-checks.h> /* KMSAN_INIT_VALUE */
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/mempolicy.h>
@@ -283,17 +285,27 @@ static void prefetch_freepointer(const struct kmem_cache *s, void *object)
prefetch(object + s->offset);
}
+/*
+ * When running under KMSAN, get_freepointer_safe() may return an uninitialized
+ * pointer value in the case the current thread loses the race for the next
+ * memory chunk in the freelist. In that case this_cpu_cmpxchg_double() in
+ * slab_alloc_node() will fail, so the uninitialized value won't be used, but
+ * KMSAN will still check all arguments of cmpxchg because of imperfect
+ * handling of inline assembly.
+ * To work around this problem, use KMSAN_INIT_VALUE() to force initialize the
+ * return value of get_freepointer_safe().
+ */
static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
{
unsigned long freepointer_addr;
void *p;
if (!debug_pagealloc_enabled_static())
- return get_freepointer(s, object);
+ return KMSAN_INIT_VALUE(get_freepointer(s, object));
freepointer_addr = (unsigned long)object + s->offset;
probe_kernel_read(&p, (void **)freepointer_addr, sizeof(p));
- return freelist_ptr(s, p, freepointer_addr);
+ return KMSAN_INIT_VALUE(freelist_ptr(s, p, freepointer_addr));
}
static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
@@ -1411,6 +1423,7 @@ static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
ptr = kasan_kmalloc_large(ptr, size, flags);
/* As ptr might get tagged, call kmemleak hook after KASAN. */
kmemleak_alloc(ptr, size, 1, flags);
+ kmsan_kmalloc_large(ptr, size, flags);
return ptr;
}
@@ -1418,6 +1431,7 @@ static __always_inline void kfree_hook(void *x)
{
kmemleak_free(x);
kasan_kfree_large(x, _RET_IP_);
+ kmsan_kfree_large(x);
}
static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x)
@@ -1461,6 +1475,7 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
do {
object = next;
next = get_freepointer(s, object);
+ kmsan_slab_free(s, object);
if (slab_want_init_on_free(s)) {
/*
@@ -2784,6 +2799,7 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object)
memset(object, 0, s->object_size);
+ kmsan_slab_alloc(s, object, gfpflags);
slab_post_alloc_hook(s, gfpflags, 1, &object);
return object;
@@ -3167,7 +3183,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
void **p)
{
struct kmem_cache_cpu *c;
- int i;
+ int i, j;
/* memcg and kmem_cache debug support */
s = slab_pre_alloc_hook(s, flags);
@@ -3217,11 +3233,11 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
/* Clear memory outside IRQ disabled fastpath loop */
if (unlikely(slab_want_init_on_alloc(flags, s))) {
- int j;
-
for (j = 0; j < i; j++)
memset(p[j], 0, s->object_size);
}
+ for (j = 0; j < i; j++)
+ kmsan_slab_alloc(s, p[j], flags);
/* memcg and kmem_cache debug support */
slab_post_alloc_hook(s, flags, size, p);
@@ -3829,6 +3845,7 @@ static int __init setup_slub_min_objects(char *str)
__setup("slub_min_objects=", setup_slub_min_objects);
+__no_sanitize_memory
void *__kmalloc(size_t size, gfp_t flags)
{
struct kmem_cache *s;
@@ -5725,6 +5742,7 @@ static char *create_unique_id(struct kmem_cache *s)
p += sprintf(p, "%07u", s->size);
BUG_ON(p > name + ID_STR_LENGTH - 1);
+ kmsan_unpoison_shadow(name, p - name);
return name;
}
@@ -5874,6 +5892,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
al->name = name;
al->next = alias_list;
alias_list = al;
+ kmsan_unpoison_shadow(al, sizeof(struct saved_alias));
return 0;
}
In order to report uninitialized memory coming from heap allocations KMSAN has to poison them unless they're created with __GFP_ZERO. It's handy that we need KMSAN hooks in the places where init_on_alloc/init_on_free initialization is performed. Signed-off-by: Alexander Potapenko <glider@google.com> To: Alexander Potapenko <glider@google.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Vegard Nossum <vegard.nossum@oracle.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Marco Elver <elver@google.com> Cc: Andrey Konovalov <andreyknvl@google.com> Cc: linux-mm@kvack.org --- v3: - reverted unrelated whitespace changes Change-Id: I51103b7981d3aabed747d0c85cbdc85568665871 --- mm/slub.c | 29 ++++++++++++++++++++++++----- 1 file changed, 24 insertions(+), 5 deletions(-)