diff mbox series

[v3,09/15] mm/slab_common: cleanup kmalloc_large()

Message ID 20220712133946.307181-10-42.hyeyoo@gmail.com (mailing list archive)
State New
Headers show
Series common kmalloc v3 | expand

Commit Message

Hyeonggon Yoo July 12, 2022, 1:39 p.m. UTC
Now that kmalloc_large() and kmalloc_large_node() do mostly same job,
make kmalloc_large() wrapper of __kmalloc_large_node_notrace().

In the meantime, add missing flag fix code in
__kmalloc_large_node_notrace().

Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
---
 mm/slab_common.c | 36 +++++++++++++-----------------------
 1 file changed, 13 insertions(+), 23 deletions(-)

Comments

Vlastimil Babka July 28, 2022, 4:13 p.m. UTC | #1
On 7/12/22 15:39, Hyeonggon Yoo wrote:
> Now that kmalloc_large() and kmalloc_large_node() do mostly same job,
> make kmalloc_large() wrapper of __kmalloc_large_node_notrace().
> 
> In the meantime, add missing flag fix code in
> __kmalloc_large_node_notrace().
> 
> Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>

Reviewed-by: Vlastimil Babka <vbabka@suse.cz>

> ---
>  mm/slab_common.c | 36 +++++++++++++-----------------------
>  1 file changed, 13 insertions(+), 23 deletions(-)
> 
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index dc872e0ef0fc..9c46e2f9589f 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -932,29 +932,6 @@ gfp_t kmalloc_fix_flags(gfp_t flags)
>   * directly to the page allocator. We use __GFP_COMP, because we will need to
>   * know the allocation order to free the pages properly in kfree.
>   */
> -void *kmalloc_large(size_t size, gfp_t flags)
> -{
> -	void *ret = NULL;
> -	struct page *page;
> -	unsigned int order = get_order(size);
> -
> -	if (unlikely(flags & GFP_SLAB_BUG_MASK))
> -		flags = kmalloc_fix_flags(flags);
> -
> -	flags |= __GFP_COMP;
> -	page = alloc_pages(flags, order);
> -	if (likely(page)) {
> -		ret = page_address(page);
> -		mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
> -				      PAGE_SIZE << order);
> -	}
> -	ret = kasan_kmalloc_large(ret, size, flags);
> -	/* As ret might get tagged, call kmemleak hook after KASAN. */
> -	kmemleak_alloc(ret, size, 1, flags);
> -	trace_kmalloc(_RET_IP_, ret, NULL, size, PAGE_SIZE << order, flags);
> -	return ret;
> -}
> -EXPORT_SYMBOL(kmalloc_large);
>  
>  static __always_inline
>  void *__kmalloc_large_node_notrace(size_t size, gfp_t flags, int node)
> @@ -963,6 +940,9 @@ void *__kmalloc_large_node_notrace(size_t size, gfp_t flags, int node)
>  	void *ptr = NULL;
>  	unsigned int order = get_order(size);
>  
> +	if (unlikely(flags & GFP_SLAB_BUG_MASK))
> +		flags = kmalloc_fix_flags(flags);
> +
>  	flags |= __GFP_COMP;
>  	page = alloc_pages_node(node, flags, order);
>  	if (page) {
> @@ -978,6 +958,16 @@ void *__kmalloc_large_node_notrace(size_t size, gfp_t flags, int node)
>  	return ptr;
>  }
>  
> +void *kmalloc_large(size_t size, gfp_t flags)
> +{
> +	void *ret = __kmalloc_large_node_notrace(size, flags, NUMA_NO_NODE);
> +
> +	trace_kmalloc(_RET_IP_, ret, NULL, size,
> +		      PAGE_SIZE << get_order(size), flags);
> +	return ret;
> +}
> +EXPORT_SYMBOL(kmalloc_large);
> +
>  void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int node)
>  {
>  	return __kmalloc_large_node_notrace(size, flags, node);
diff mbox series

Patch

diff --git a/mm/slab_common.c b/mm/slab_common.c
index dc872e0ef0fc..9c46e2f9589f 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -932,29 +932,6 @@  gfp_t kmalloc_fix_flags(gfp_t flags)
  * directly to the page allocator. We use __GFP_COMP, because we will need to
  * know the allocation order to free the pages properly in kfree.
  */
-void *kmalloc_large(size_t size, gfp_t flags)
-{
-	void *ret = NULL;
-	struct page *page;
-	unsigned int order = get_order(size);
-
-	if (unlikely(flags & GFP_SLAB_BUG_MASK))
-		flags = kmalloc_fix_flags(flags);
-
-	flags |= __GFP_COMP;
-	page = alloc_pages(flags, order);
-	if (likely(page)) {
-		ret = page_address(page);
-		mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
-				      PAGE_SIZE << order);
-	}
-	ret = kasan_kmalloc_large(ret, size, flags);
-	/* As ret might get tagged, call kmemleak hook after KASAN. */
-	kmemleak_alloc(ret, size, 1, flags);
-	trace_kmalloc(_RET_IP_, ret, NULL, size, PAGE_SIZE << order, flags);
-	return ret;
-}
-EXPORT_SYMBOL(kmalloc_large);
 
 static __always_inline
 void *__kmalloc_large_node_notrace(size_t size, gfp_t flags, int node)
@@ -963,6 +940,9 @@  void *__kmalloc_large_node_notrace(size_t size, gfp_t flags, int node)
 	void *ptr = NULL;
 	unsigned int order = get_order(size);
 
+	if (unlikely(flags & GFP_SLAB_BUG_MASK))
+		flags = kmalloc_fix_flags(flags);
+
 	flags |= __GFP_COMP;
 	page = alloc_pages_node(node, flags, order);
 	if (page) {
@@ -978,6 +958,16 @@  void *__kmalloc_large_node_notrace(size_t size, gfp_t flags, int node)
 	return ptr;
 }
 
+void *kmalloc_large(size_t size, gfp_t flags)
+{
+	void *ret = __kmalloc_large_node_notrace(size, flags, NUMA_NO_NODE);
+
+	trace_kmalloc(_RET_IP_, ret, NULL, size,
+		      PAGE_SIZE << get_order(size), flags);
+	return ret;
+}
+EXPORT_SYMBOL(kmalloc_large);
+
 void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int node)
 {
 	return __kmalloc_large_node_notrace(size, flags, node);