@@ -232,7 +232,12 @@ struct obj_cgroup {
* if MEMCG_DATA_OBJEXTS is set.
*/
struct slabobj_ext {
+#ifdef CONFIG_MEMCG_KMEM
struct obj_cgroup *objcg;
+#endif
+#ifdef CONFIG_SLAB_ALLOC_TAGGING
+ union codetag_ref ref;
+#endif
} __aligned(8);
/*
@@ -438,6 +438,31 @@ static __always_inline unsigned int __kmalloc_index(size_t size,
#define kmalloc_index(s) __kmalloc_index(s, true)
#endif /* !CONFIG_SLOB */
+#ifdef CONFIG_SLAB_ALLOC_TAGGING
+
+#include <linux/alloc_tag.h>
+
+union codetag_ref *get_slab_tag_ref(const void *objp);
+
+#define slab_tag_add(_old, _new) \
+do { \
+ if (!ZERO_OR_NULL_PTR(_new) && _old != _new) \
+ alloc_tag_add(get_slab_tag_ref(_new), __ksize(_new)); \
+} while (0)
+
+static inline void slab_tag_dec(const void *ptr)
+{
+ if (!ZERO_OR_NULL_PTR(ptr))
+ alloc_tag_sub(get_slab_tag_ref(ptr), __ksize(ptr));
+}
+
+#else
+
+#define slab_tag_add(_old, _new) do {} while (0)
+static inline void slab_tag_dec(const void *ptr) {}
+
+#endif
+
void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1);
void *kmem_cache_alloc(struct kmem_cache *s, gfp_t flags) __assume_slab_alignment __malloc;
void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
@@ -106,7 +106,7 @@ static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *sla
* reciprocal_divide(offset, cache->reciprocal_buffer_size)
*/
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
- const struct slab *slab, void *obj)
+ const struct slab *slab, const void *obj)
{
u32 offset = (obj - slab->s_mem);
return reciprocal_divide(offset, cache->reciprocal_buffer_size);
@@ -170,14 +170,14 @@ static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *sla
/* Determine object index from a given position */
static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
- void *addr, void *obj)
+ void *addr, const void *obj)
{
return reciprocal_divide(kasan_reset_tag(obj) - addr,
cache->reciprocal_size);
}
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
- const struct slab *slab, void *obj)
+ const struct slab *slab, const void *obj)
{
if (is_kfence_address(obj))
return 0;
@@ -989,6 +989,17 @@ config PAGE_ALLOC_TAGGING
initiated at that code location. The mechanism can be used to track
memory leaks with a low performance impact.
+config SLAB_ALLOC_TAGGING
+ bool "Enable slab allocation tagging"
+ default n
+ select ALLOC_TAGGING
+ select SLAB_OBJ_EXT
+ help
+ Instrument slab allocators to track allocation source code and
+ collect statistics on the number of allocations and their total size
+ initiated at that code location. The mechanism can be used to track
+ memory leaks with a low performance impact.
+
source "lib/Kconfig.kasan"
source "lib/Kconfig.kfence"
@@ -202,6 +202,39 @@ struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
return NULL;
}
+#ifdef CONFIG_SLAB_ALLOC_TAGGING
+
+union codetag_ref *get_slab_tag_ref(const void *objp)
+{
+ struct slabobj_ext *obj_exts;
+ union codetag_ref *res = NULL;
+ struct slab *slab;
+ unsigned int off;
+
+ slab = virt_to_slab(objp);
+ /*
+ * We could be given a kmalloc_large() object, skip those. They use
+ * alloc_pages and can be tracked by page allocation tracking.
+ */
+ if (!slab)
+ goto out;
+
+ obj_exts = slab_obj_exts(slab);
+ if (!obj_exts)
+ goto out;
+
+ if (!slab->slab_cache)
+ goto out;
+
+ off = obj_to_index(slab->slab_cache, slab, objp);
+ res = &obj_exts[off].ref;
+out:
+ return res;
+}
+EXPORT_SYMBOL(get_slab_tag_ref);
+
+#endif /* CONFIG_SLAB_ALLOC_TAGGING */
+
static struct kmem_cache *create_cache(const char *name,
unsigned int object_size, unsigned int align,
slab_flags_t flags, unsigned int useroffset,