@@ -460,9 +460,6 @@ static __always_inline void kfree_bulk(size_t size, void **p)
kmem_cache_free_bulk(NULL, size, p);
}
-extern void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
- __assume_slab_alignment __alloc_size(3);
-
extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
int node, size_t size) __assume_slab_alignment
__alloc_size(4);
@@ -475,6 +472,36 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
return kmalloc_large_node(size, flags, NUMA_NO_NODE);
}
+#ifndef CONFIG_SLOB
+static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
+{
+ if (__builtin_constant_p(size)) {
+ unsigned int index;
+
+ if (size > KMALLOC_MAX_CACHE_SIZE)
+ return kmalloc_large(size, flags);
+
+ index = kmalloc_index(size);
+
+ if (!index)
+ return ZERO_SIZE_PTR;
+
+ return kmem_cache_alloc_node_trace(
+ kmalloc_caches[kmalloc_type(flags)][index],
+ flags, node, size);
+ }
+ return __kmalloc_node(size, flags, node);
+}
+#else
+static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
+{
+ if (__builtin_constant_p(size) && size > KMALLOC_MAX_CACHE_SIZE)
+ return kmalloc_large(size, flags);
+
+ return __kmalloc_node(size, flags, node);
+}
+#endif
+
/**
* kmalloc - allocate memory
* @size: how many bytes of memory are required.
@@ -531,55 +558,8 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
*/
static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
{
- if (__builtin_constant_p(size)) {
-#ifndef CONFIG_SLOB
- unsigned int index;
-#endif
- if (size > KMALLOC_MAX_CACHE_SIZE)
- return kmalloc_large(size, flags);
-#ifndef CONFIG_SLOB
- index = kmalloc_index(size);
-
- if (!index)
- return ZERO_SIZE_PTR;
-
- return kmem_cache_alloc_trace(
- kmalloc_caches[kmalloc_type(flags)][index],
- flags, size);
-#endif
- }
- return __kmalloc(size, flags);
-}
-
-#ifndef CONFIG_SLOB
-static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
-{
- if (__builtin_constant_p(size)) {
- unsigned int index;
-
- if (size > KMALLOC_MAX_CACHE_SIZE)
- return kmalloc_large_node(size, flags, node);
-
- index = kmalloc_index(size);
-
- if (!index)
- return ZERO_SIZE_PTR;
-
- return kmem_cache_alloc_node_trace(
- kmalloc_caches[kmalloc_type(flags)][i],
- flags, node, size);
- }
- return __kmalloc_node(size, flags, node);
-}
-#else
-static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
-{
- if (__builtin_constant_p(size) && size > KMALLOC_MAX_CACHE_SIZE)
- return kmalloc_large_node(size, flags, node);
-
- return __kmalloc_node(size, flags, node);
+ return kmalloc_node(size, flags, NUMA_NO_NODE);
}
-#endif
/**
* kmalloc_array - allocate memory for an array.
@@ -3519,20 +3519,6 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
}
EXPORT_SYMBOL(kmem_cache_alloc_bulk);
-void *
-kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
-{
- void *ret;
-
- ret = slab_alloc(cachep, flags, size, _RET_IP_);
-
- ret = kasan_kmalloc(cachep, ret, size, flags);
- trace_kmalloc(_RET_IP_, ret,
- size, cachep->size, flags);
- return ret;
-}
-EXPORT_SYMBOL(kmem_cache_alloc_trace);
-
/**
* kmem_cache_alloc_node - Allocate an object on the specified node
* @cachep: The cache to allocate from.
@@ -3230,15 +3230,6 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr, orig_size);
}
-void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
-{
- void *ret = slab_alloc(s, gfpflags, _RET_IP_, size);
- trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
- ret = kasan_kmalloc(s, ret, size, gfpflags);
- return ret;
-}
-EXPORT_SYMBOL(kmem_cache_alloc_trace);
-
void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
{
void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_, s->object_size);
Now that kmalloc() and kmalloc_node() do same job, make kmalloc() wrapper of kmalloc_node(). Remove kmalloc_trace() that is now unused. This patch makes slab allocator use kmalloc_node tracepoints in kmalloc(). Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> --- include/linux/slab.h | 82 +++++++++++++++++--------------------------- mm/slab.c | 14 -------- mm/slub.c | 9 ----- 3 files changed, 31 insertions(+), 74 deletions(-)