diff mbox series

[v2,09/23] mm/slab_common: cleanup kmalloc_large()

Message ID 20220414085727.643099-10-42.hyeyoo@gmail.com (mailing list archive)
State New
Headers show
Series common kmalloc for SLUB and SLAB v2 | expand

Commit Message

Hyeonggon Yoo April 14, 2022, 8:57 a.m. UTC
Now that kmalloc_large() and kmalloc_large_node() do same job,
make kmalloc_large() wrapper of kmalloc_large_node().

Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
---
 include/linux/slab.h |  9 ++++++---
 mm/slab_common.c     | 24 ------------------------
 2 files changed, 6 insertions(+), 27 deletions(-)

Comments

Vlastimil Babka April 26, 2022, 5:18 p.m. UTC | #1
On 4/14/22 10:57, Hyeonggon Yoo wrote:
> Now that kmalloc_large() and kmalloc_large_node() do same job,
> make kmalloc_large() wrapper of kmalloc_large_node().
> 
> Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>

This would be fine, but depends on how the previous patch can be fixed up...
diff mbox series

Patch

diff --git a/include/linux/slab.h b/include/linux/slab.h
index 97336acbebbf..143830f57a7f 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -484,11 +484,14 @@  static __always_inline void *kmem_cache_alloc_node_trace(struct kmem_cache *s, g
 }
 #endif /* CONFIG_TRACING */
 
-extern void *kmalloc_large(size_t size, gfp_t flags) __assume_page_alignment
-						     __alloc_size(1);
-
 extern void *kmalloc_large_node(size_t size, gfp_t flags, int node)
 				__assume_page_alignment __alloc_size(1);
+
+static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
+{
+	return kmalloc_large_node(size, flags, NUMA_NO_NODE);
+}
+
 /**
  * kmalloc - allocate memory
  * @size: how many bytes of memory are required.
diff --git a/mm/slab_common.c b/mm/slab_common.c
index cf17be8cd9ad..30684efc89d7 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -925,30 +925,6 @@  gfp_t kmalloc_fix_flags(gfp_t flags)
  * directly to the page allocator. We use __GFP_COMP, because we will need to
  * know the allocation order to free the pages properly in kfree.
  */
-void *kmalloc_large(size_t size, gfp_t flags)
-{
-	void *ret = NULL;
-	struct page *page;
-	unsigned int order = get_order(size);
-
-	if (unlikely(flags & GFP_SLAB_BUG_MASK))
-		flags = kmalloc_fix_flags(flags);
-
-	flags |= __GFP_COMP;
-	page = alloc_pages(flags, order);
-	if (likely(page)) {
-		ret = page_address(page);
-		mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
-				      PAGE_SIZE << order);
-	}
-	ret = kasan_kmalloc_large(ret, size, flags);
-	/* As ret might get tagged, call kmemleak hook after KASAN. */
-	kmemleak_alloc(ret, size, 1, flags);
-	trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
-	return ret;
-}
-EXPORT_SYMBOL(kmalloc_large);
-
 void *kmalloc_large_node(size_t size, gfp_t flags, int node)
 {
 	struct page *page;