@@ -11,62 +11,6 @@
DECLARE_EVENT_CLASS(kmem_alloc,
- TP_PROTO(unsigned long call_site,
- const void *ptr,
- struct kmem_cache *s,
- size_t bytes_req,
- size_t bytes_alloc,
- gfp_t gfp_flags),
-
- TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags),
-
- TP_STRUCT__entry(
- __field( unsigned long, call_site )
- __field( const void *, ptr )
- __field( size_t, bytes_req )
- __field( size_t, bytes_alloc )
- __field( unsigned long, gfp_flags )
- __field( bool, accounted )
- ),
-
- TP_fast_assign(
- __entry->call_site = call_site;
- __entry->ptr = ptr;
- __entry->bytes_req = bytes_req;
- __entry->bytes_alloc = bytes_alloc;
- __entry->gfp_flags = (__force unsigned long)gfp_flags;
- __entry->accounted = IS_ENABLED(CONFIG_MEMCG_KMEM) ?
- ((gfp_flags & __GFP_ACCOUNT) ||
- (s && s->flags & SLAB_ACCOUNT)) : false;
- ),
-
- TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s accounted=%s",
- (void *)__entry->call_site,
- __entry->ptr,
- __entry->bytes_req,
- __entry->bytes_alloc,
- show_gfp_flags(__entry->gfp_flags),
- __entry->accounted ? "true" : "false")
-);
-
-DEFINE_EVENT(kmem_alloc, kmalloc,
-
- TP_PROTO(unsigned long call_site, const void *ptr, struct kmem_cache *s,
- size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
-
- TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags)
-);
-
-DEFINE_EVENT(kmem_alloc, kmem_cache_alloc,
-
- TP_PROTO(unsigned long call_site, const void *ptr, struct kmem_cache *s,
- size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
-
- TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags)
-);
-
-DECLARE_EVENT_CLASS(kmem_alloc_node,
-
TP_PROTO(unsigned long call_site,
const void *ptr,
struct kmem_cache *s,
@@ -109,7 +53,7 @@ DECLARE_EVENT_CLASS(kmem_alloc_node,
__entry->accounted ? "true" : "false")
);
-DEFINE_EVENT(kmem_alloc_node, kmalloc_node,
+DEFINE_EVENT(kmem_alloc, kmalloc,
TP_PROTO(unsigned long call_site, const void *ptr,
struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc,
@@ -118,7 +62,7 @@ DEFINE_EVENT(kmem_alloc_node, kmalloc_node,
TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node)
);
-DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node,
+DEFINE_EVENT(kmem_alloc, kmem_cache_alloc,
TP_PROTO(unsigned long call_site, const void *ptr,
struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc,
@@ -3440,8 +3440,8 @@ void *__kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
{
void *ret = slab_alloc(cachep, lru, flags, cachep->object_size, _RET_IP_);
- trace_kmem_cache_alloc(_RET_IP_, ret, cachep,
- cachep->object_size, cachep->size, flags);
+ trace_kmem_cache_alloc(_RET_IP_, ret, cachep, cachep->object_size,
+ cachep->size, flags, NUMA_NO_NODE);
return ret;
}
@@ -3529,7 +3529,7 @@ kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
ret = kasan_kmalloc(cachep, ret, size, flags);
trace_kmalloc(_RET_IP_, ret, cachep,
- size, cachep->size, flags);
+ size, cachep->size, flags, NUMA_NO_NODE);
return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_trace);
@@ -3552,9 +3552,9 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
void *ret = slab_alloc_node(cachep, NULL, flags, nodeid, cachep->object_size, _RET_IP_);
- trace_kmem_cache_alloc_node(_RET_IP_, ret, cachep,
- cachep->object_size, cachep->size,
- flags, nodeid);
+ trace_kmem_cache_alloc(_RET_IP_, ret, cachep,
+ cachep->object_size, cachep->size,
+ flags, nodeid);
return ret;
}
@@ -3579,9 +3579,9 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
ret = slab_alloc_node(cachep, NULL, flags, nodeid, size, _RET_IP_);
ret = kasan_kmalloc(cachep, ret, size, flags);
- trace_kmalloc_node(_RET_IP_, ret, cachep,
- size, cachep->size,
- flags, nodeid);
+ trace_kmalloc(_RET_IP_, ret, cachep,
+ size, cachep->size,
+ flags, nodeid);
return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
@@ -933,9 +933,9 @@ void *__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller
if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
ret = kmalloc_large_node_notrace(size, flags, node);
- trace_kmalloc_node(caller, ret, NULL,
- size, PAGE_SIZE << get_order(size),
- flags, node);
+ trace_kmalloc(_RET_IP_, ret, NULL,
+ size, PAGE_SIZE << get_order(size),
+ flags, node);
return ret;
}
@@ -946,8 +946,8 @@ void *__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller
ret = __kmem_cache_alloc_node(s, flags, node, size, caller);
ret = kasan_kmalloc(s, ret, size, flags);
- trace_kmalloc_node(caller, ret, s, size,
- s->size, flags, node);
+ trace_kmalloc(_RET_IP_, ret, s, size,
+ s->size, flags, node);
return ret;
}
@@ -1076,7 +1076,7 @@ void *kmalloc_large(size_t size, gfp_t flags)
void *ret = __kmalloc_large_node_notrace(size, flags, NUMA_NO_NODE);
trace_kmalloc(_RET_IP_, ret, NULL, size,
- PAGE_SIZE << get_order(size), flags);
+ PAGE_SIZE << get_order(size), flags, NUMA_NO_NODE);
return ret;
}
EXPORT_SYMBOL(kmalloc_large);
@@ -1090,8 +1090,8 @@ void *kmalloc_large_node(size_t size, gfp_t flags, int node)
{
void *ret = __kmalloc_large_node_notrace(size, flags, node);
- trace_kmalloc_node(_RET_IP_, ret, NULL, size,
- PAGE_SIZE << get_order(size), flags, node);
+ trace_kmalloc(_RET_IP_, ret, NULL, size,
+ PAGE_SIZE << get_order(size), flags, node);
return ret;
}
EXPORT_SYMBOL(kmalloc_large_node);
@@ -1426,8 +1426,6 @@ EXPORT_SYMBOL(ksize);
/* Tracepoints definitions. */
EXPORT_TRACEPOINT_SYMBOL(kmalloc);
EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
-EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
-EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
EXPORT_TRACEPOINT_SYMBOL(kfree);
EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
@@ -507,8 +507,8 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
*m = size;
ret = (void *)m + minalign;
- trace_kmalloc_node(caller, ret, NULL,
- size, size + minalign, gfp, node);
+ trace_kmalloc(caller, ret, NULL,
+ size, size + minalign, gfp, node);
} else {
unsigned int order = get_order(size);
@@ -516,8 +516,8 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
gfp |= __GFP_COMP;
ret = slob_new_pages(gfp, order, node);
- trace_kmalloc_node(caller, ret, NULL,
- size, PAGE_SIZE << order, gfp, node);
+ trace_kmalloc(caller, ret, NULL,
+ size, PAGE_SIZE << order, gfp, node);
}
kmemleak_alloc(ret, size, 1, gfp);
@@ -608,14 +608,14 @@ static void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
if (c->size < PAGE_SIZE) {
b = slob_alloc(c->size, flags, c->align, node, 0);
- trace_kmem_cache_alloc_node(_RET_IP_, b, NULL, c->object_size,
- SLOB_UNITS(c->size) * SLOB_UNIT,
- flags, node);
+ trace_kmem_cache_alloc(_RET_IP_, b, NULL, c->object_size,
+ SLOB_UNITS(c->size) * SLOB_UNIT,
+ flags, node);
} else {
b = slob_new_pages(flags, get_order(c->size), node);
- trace_kmem_cache_alloc_node(_RET_IP_, b, NULL, c->object_size,
- PAGE_SIZE << get_order(c->size),
- flags, node);
+ trace_kmem_cache_alloc(_RET_IP_, b, NULL, c->object_size,
+ PAGE_SIZE << get_order(c->size),
+ flags, node);
}
if (b && c->ctor) {
@@ -3244,7 +3244,7 @@ void *__kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
void *ret = slab_alloc(s, lru, gfpflags, _RET_IP_, s->object_size);
trace_kmem_cache_alloc(_RET_IP_, ret, s, s->object_size,
- s->size, gfpflags);
+ s->size, gfpflags, NUMA_NO_NODE);
return ret;
}
@@ -3274,7 +3274,7 @@ void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags,
void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
{
void *ret = slab_alloc(s, NULL, gfpflags, _RET_IP_, size);
- trace_kmalloc(_RET_IP_, ret, s, size, s->size, gfpflags);
+ trace_kmalloc(_RET_IP_, ret, s, size, s->size, gfpflags, NUMA_NO_NODE);
ret = kasan_kmalloc(s, ret, size, gfpflags);
return ret;
}
@@ -3285,8 +3285,8 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
{
void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size);
- trace_kmem_cache_alloc_node(_RET_IP_, ret, s,
- s->object_size, s->size, gfpflags, node);
+ trace_kmem_cache_alloc(_RET_IP_, ret, s,
+ s->object_size, s->size, gfpflags, node);
return ret;
}
@@ -3299,8 +3299,8 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
{
void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size);
- trace_kmalloc_node(_RET_IP_, ret, s,
- size, s->size, gfpflags, node);
+ trace_kmalloc(_RET_IP_, ret, s,
+ size, s->size, gfpflags, node);
ret = kasan_kmalloc(s, ret, size, gfpflags);
return ret;
Drop kmem_alloc event class, rename kmem_alloc_node to kmem_alloc, and remove _node postfix for NUMA version of tracepoints. This will break some tools that depend on {kmem_cache_alloc,kmalloc}_node, but at this point maintaining both kmem_alloc and kmem_alloc_node event classes does not makes sense at all. Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> --- include/trace/events/kmem.h | 60 ++----------------------------------- mm/slab.c | 18 +++++------ mm/slab_common.c | 18 +++++------ mm/slob.c | 20 ++++++------- mm/slub.c | 12 ++++---- 5 files changed, 35 insertions(+), 93 deletions(-)