@@ -571,23 +571,35 @@ static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
return __kmalloc(size, flags);
}
+#ifndef CONFIG_SLOB
static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
{
-#ifndef CONFIG_SLOB
- if (__builtin_constant_p(size) &&
- size <= KMALLOC_MAX_CACHE_SIZE) {
- unsigned int i = kmalloc_index(size);
+ if (__builtin_constant_p(size)) {
+ unsigned int index;
- if (!i)
+ if (size > KMALLOC_MAX_CACHE_SIZE)
+ return kmalloc_large_node(size, flags, node);
+
+ index = kmalloc_index(size);
+
+ if (!index)
return ZERO_SIZE_PTR;
return kmem_cache_alloc_node_trace(
- kmalloc_caches[kmalloc_type(flags)][i],
+ kmalloc_caches[kmalloc_type(flags)][index],
flags, node, size);
}
-#endif
return __kmalloc_node(size, flags, node);
}
+#else
+static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
+{
+ if (__builtin_constant_p(size) && size > KMALLOC_MAX_CACHE_SIZE)
+ return kmalloc_large_node(size, flags, node);
+
+ return __kmalloc_node(size, flags, node);
+}
+#endif
/**
* kmalloc_array - allocate memory for an array.
@@ -273,6 +273,8 @@ void create_kmalloc_caches(slab_flags_t);
/* Find the kmalloc slab corresponding for a certain size */
struct kmem_cache *kmalloc_slab(size_t, gfp_t);
+
+void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int node);
#endif
gfp_t kmalloc_fix_flags(gfp_t flags);
@@ -928,7 +928,7 @@ void *kmalloc_large(size_t size, gfp_t flags)
}
EXPORT_SYMBOL(kmalloc_large);
-void *kmalloc_large_node(size_t size, gfp_t flags, int node)
+void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int node)
{
struct page *page;
void *ptr = NULL;
@@ -948,6 +948,15 @@ void *kmalloc_large_node(size_t size, gfp_t flags, int node)
return ptr;
}
+
+void *kmalloc_large_node(size_t size, gfp_t flags, int node)
+{
+ void *ret = kmalloc_large_node_notrace(size, flags, node);
+
+ trace_kmalloc_node(_RET_IP_, ret, NULL, size,
+ PAGE_SIZE << get_order(size), flags, node);
+ return ret;
+}
EXPORT_SYMBOL(kmalloc_large_node);
#ifdef CONFIG_SLAB_FREELIST_RANDOM
@@ -4401,7 +4401,7 @@ void *__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller
void *ret;
if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
- ret = kmalloc_large_node(size, flags, node);
+ ret = kmalloc_large_node_notrace(size, flags, node);
trace_kmalloc_node(caller, ret, NULL,
size, PAGE_SIZE << get_order(size),