@@ -484,11 +484,14 @@ static __always_inline void *kmem_cache_alloc_node_trace(struct kmem_cache *s, g
}
#endif /* CONFIG_TRACING */
-extern void *kmalloc_large(size_t size, gfp_t flags) __assume_page_alignment
- __alloc_size(1);
-
extern void *kmalloc_large_node(size_t size, gfp_t flags, int node)
__assume_page_alignment __alloc_size(1);
+
+static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
+{
+ return kmalloc_large_node(size, flags, NUMA_NO_NODE);
+}
+
/**
* kmalloc - allocate memory
* @size: how many bytes of memory are required.
@@ -925,30 +925,6 @@ gfp_t kmalloc_fix_flags(gfp_t flags)
* directly to the page allocator. We use __GFP_COMP, because we will need to
* know the allocation order to free the pages properly in kfree.
*/
-void *kmalloc_large(size_t size, gfp_t flags)
-{
- void *ret = NULL;
- struct page *page;
- unsigned int order = get_order(size);
-
- if (unlikely(flags & GFP_SLAB_BUG_MASK))
- flags = kmalloc_fix_flags(flags);
-
- flags |= __GFP_COMP;
- page = alloc_pages(flags, order);
- if (likely(page)) {
- ret = page_address(page);
- mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
- PAGE_SIZE << order);
- }
- ret = kasan_kmalloc_large(ret, size, flags);
- /* As ret might get tagged, call kmemleak hook after KASAN. */
- kmemleak_alloc(ret, size, 1, flags);
- trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
- return ret;
-}
-EXPORT_SYMBOL(kmalloc_large);
-
void *kmalloc_large_node(size_t size, gfp_t flags, int node)
{
struct page *page;
Now that kmalloc_large() and kmalloc_large_node() do same job, make kmalloc_large() wrapper of kmalloc_large_node(). Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> --- include/linux/slab.h | 9 ++++++--- mm/slab_common.c | 24 ------------------------ 2 files changed, 6 insertions(+), 27 deletions(-)