@@ -460,7 +460,6 @@ static __always_inline void kfree_bulk(size_t size, void **p)
kmem_cache_free_bulk(NULL, size, p);
}
-#ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
__assume_slab_alignment __alloc_size(3);
@@ -468,39 +467,11 @@ extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
int node, size_t size) __assume_slab_alignment
__alloc_size(4);
-#else /* CONFIG_TRACING */
-static __always_inline __alloc_size(3) void *kmem_cache_alloc_trace(struct kmem_cache *s,
- gfp_t flags, size_t size)
-{
- void *ret = kmem_cache_alloc(s, flags);
-
- ret = kasan_kmalloc(s, ret, size, flags);
- return ret;
-}
-
-static __always_inline void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
- int node, size_t size)
-{
- void *ret = kmem_cache_alloc_node(s, gfpflags, node);
-
- ret = kasan_kmalloc(s, ret, size, gfpflags);
- return ret;
-}
-#endif /* CONFIG_TRACING */
-
extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment
__alloc_size(1);
-#ifdef CONFIG_TRACING
extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
__assume_page_alignment __alloc_size(1);
-#else
-static __always_inline __alloc_size(1) void *kmalloc_order_trace(size_t size, gfp_t flags,
- unsigned int order)
-{
- return kmalloc_order(size, flags, order);
-}
-#endif
static __always_inline __alloc_size(1) void *kmalloc_large(size_t size, gfp_t flags)
{
@@ -3519,7 +3519,6 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
}
EXPORT_SYMBOL(kmem_cache_alloc_bulk);
-#ifdef CONFIG_TRACING
void *
kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
{
@@ -3533,7 +3532,6 @@ kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_trace);
-#endif
/**
* kmem_cache_alloc_node - Allocate an object on the specified node
@@ -3560,7 +3558,6 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
}
EXPORT_SYMBOL(kmem_cache_alloc_node);
-#ifdef CONFIG_TRACING
void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
gfp_t flags,
int nodeid,
@@ -3577,7 +3574,6 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
-#endif
static __always_inline void *
__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
@@ -954,7 +954,6 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
}
EXPORT_SYMBOL(kmalloc_order);
-#ifdef CONFIG_TRACING
void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
{
void *ret = kmalloc_order(size, flags, order);
@@ -962,7 +961,6 @@ void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
return ret;
}
EXPORT_SYMBOL(kmalloc_order_trace);
-#endif
#ifdef CONFIG_SLAB_FREELIST_RANDOM
/* Randomize a generic freelist */
@@ -3238,7 +3238,6 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr, orig_size);
}
-#ifdef CONFIG_TRACING
void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
{
void *ret = slab_alloc(s, gfpflags, _RET_IP_, size);
@@ -3247,7 +3246,6 @@ void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_trace);
-#endif
void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
{
@@ -3260,7 +3258,6 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
}
EXPORT_SYMBOL(kmem_cache_alloc_node);
-#ifdef CONFIG_TRACING
void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
gfp_t gfpflags,
int node, size_t size)
@@ -3274,7 +3271,6 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
-#endif
/*
* Slow path handling. This may still be called frequently since objects
CONFIG_TRACING ifdefs are not necessary because tracepoints do nothing on kernels without CONFIG_TRACING. In later cleanup these functions will be removed. Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> --- include/linux/slab.h | 29 ----------------------------- mm/slab.c | 4 ---- mm/slab_common.c | 2 -- mm/slub.c | 4 ---- 4 files changed, 39 deletions(-)