diff mbox series

[v3,11/15] mm/sl[au]b: introduce common alloc/free functions without tracepoint

Message ID 20220712133946.307181-12-42.hyeyoo@gmail.com (mailing list archive)
State New
Headers show
Series common kmalloc v3 | expand

Commit Message

Hyeonggon Yoo July 12, 2022, 1:39 p.m. UTC
To unify kmalloc functions in later patch, introduce common alloc/free
functions that does not have tracepoint.

Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
---

v3: 
   Tried to avoid affecting existing functions.

 mm/slab.c | 36 +++++++++++++++++++++++++++++-------
 mm/slab.h |  4 ++++
 mm/slub.c | 13 +++++++++++++
 3 files changed, 46 insertions(+), 7 deletions(-)

Comments

Vlastimil Babka July 29, 2022, 9:49 a.m. UTC | #1
On 7/12/22 15:39, Hyeonggon Yoo wrote:
> To unify kmalloc functions in later patch, introduce common alloc/free
> functions that does not have tracepoint.
> 
> Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>

Reviewed-by: Vlastimil Babka <vbabka@suse.cz>

> ---
> 
> v3: 
>    Tried to avoid affecting existing functions.
> 
>  mm/slab.c | 36 +++++++++++++++++++++++++++++-------
>  mm/slab.h |  4 ++++
>  mm/slub.c | 13 +++++++++++++
>  3 files changed, 46 insertions(+), 7 deletions(-)
> 
> diff --git a/mm/slab.c b/mm/slab.c
> index a2f43425a0ae..375e35c14430 100644
> --- a/mm/slab.c
> +++ b/mm/slab.c
> @@ -3560,6 +3560,14 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
>  }
>  EXPORT_SYMBOL(kmem_cache_alloc_node);
>  
> +void *__kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
> +			     int nodeid, size_t orig_size,
> +			     unsigned long caller)
> +{
> +	return slab_alloc_node(cachep, NULL, flags, nodeid,
> +			       orig_size, caller);
> +}
> +
>  #ifdef CONFIG_TRACING
>  void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
>  				  gfp_t flags,
> @@ -3645,6 +3653,26 @@ void *__kmalloc(size_t size, gfp_t flags)
>  }
>  EXPORT_SYMBOL(__kmalloc);
>  
> +static __always_inline
> +void __do_kmem_cache_free(struct kmem_cache *cachep, void *objp,
> +			  unsigned long caller)
> +{
> +	unsigned long flags;
> +
> +	local_irq_save(flags);
> +	debug_check_no_locks_freed(objp, cachep->object_size);
> +	if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
> +		debug_check_no_obj_freed(objp, cachep->object_size);
> +	__cache_free(cachep, objp, caller);
> +	local_irq_restore(flags);
> +}
> +
> +void __kmem_cache_free(struct kmem_cache *cachep, void *objp,
> +		       unsigned long caller)
> +{
> +	__do_kmem_cache_free(cachep, objp, caller);
> +}
> +
>  /**
>   * kmem_cache_free - Deallocate an object
>   * @cachep: The cache the allocation was from.
> @@ -3655,18 +3683,12 @@ EXPORT_SYMBOL(__kmalloc);
>   */
>  void kmem_cache_free(struct kmem_cache *cachep, void *objp)
>  {
> -	unsigned long flags;
>  	cachep = cache_from_obj(cachep, objp);
>  	if (!cachep)
>  		return;
>  
>  	trace_kmem_cache_free(_RET_IP_, objp, cachep->name);
> -	local_irq_save(flags);
> -	debug_check_no_locks_freed(objp, cachep->object_size);
> -	if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
> -		debug_check_no_obj_freed(objp, cachep->object_size);
> -	__cache_free(cachep, objp, _RET_IP_);
> -	local_irq_restore(flags);
> +	__do_kmem_cache_free(cachep, objp, _RET_IP_);
>  }
>  EXPORT_SYMBOL(kmem_cache_free);
>  
> diff --git a/mm/slab.h b/mm/slab.h
> index c81c92d421f1..9193e9c1f040 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -275,6 +275,10 @@ void create_kmalloc_caches(slab_flags_t);
>  struct kmem_cache *kmalloc_slab(size_t, gfp_t);
>  
>  void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int node);
> +void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags,
> +			      int node, size_t orig_size,
> +			      unsigned long caller);
> +void __kmem_cache_free(struct kmem_cache *s, void *x, unsigned long caller);
>  #endif
>  
>  gfp_t kmalloc_fix_flags(gfp_t flags);
> diff --git a/mm/slub.c b/mm/slub.c
> index 6cb7ca27f3b7..74eb78678c98 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -3262,6 +3262,14 @@ void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
>  }
>  EXPORT_SYMBOL(kmem_cache_alloc_lru);
>  
> +void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags,
> +			      int node, size_t orig_size,
> +			      unsigned long caller)
> +{
> +	return slab_alloc_node(s, NULL, gfpflags, node,
> +			       caller, orig_size);
> +}
> +
>  #ifdef CONFIG_TRACING
>  void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
>  {
> @@ -3526,6 +3534,11 @@ void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
>  }
>  #endif
>  
> +void __kmem_cache_free(struct kmem_cache *s, void *x, unsigned long caller)
> +{
> +	slab_free(s, virt_to_slab(x), x, NULL, &x, 1, caller);
> +}
> +
>  void kmem_cache_free(struct kmem_cache *s, void *x)
>  {
>  	s = cache_from_obj(s, x);
diff mbox series

Patch

diff --git a/mm/slab.c b/mm/slab.c
index a2f43425a0ae..375e35c14430 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3560,6 +3560,14 @@  void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
 }
 EXPORT_SYMBOL(kmem_cache_alloc_node);
 
+void *__kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
+			     int nodeid, size_t orig_size,
+			     unsigned long caller)
+{
+	return slab_alloc_node(cachep, NULL, flags, nodeid,
+			       orig_size, caller);
+}
+
 #ifdef CONFIG_TRACING
 void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
 				  gfp_t flags,
@@ -3645,6 +3653,26 @@  void *__kmalloc(size_t size, gfp_t flags)
 }
 EXPORT_SYMBOL(__kmalloc);
 
+static __always_inline
+void __do_kmem_cache_free(struct kmem_cache *cachep, void *objp,
+			  unsigned long caller)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	debug_check_no_locks_freed(objp, cachep->object_size);
+	if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
+		debug_check_no_obj_freed(objp, cachep->object_size);
+	__cache_free(cachep, objp, caller);
+	local_irq_restore(flags);
+}
+
+void __kmem_cache_free(struct kmem_cache *cachep, void *objp,
+		       unsigned long caller)
+{
+	__do_kmem_cache_free(cachep, objp, caller);
+}
+
 /**
  * kmem_cache_free - Deallocate an object
  * @cachep: The cache the allocation was from.
@@ -3655,18 +3683,12 @@  EXPORT_SYMBOL(__kmalloc);
  */
 void kmem_cache_free(struct kmem_cache *cachep, void *objp)
 {
-	unsigned long flags;
 	cachep = cache_from_obj(cachep, objp);
 	if (!cachep)
 		return;
 
 	trace_kmem_cache_free(_RET_IP_, objp, cachep->name);
-	local_irq_save(flags);
-	debug_check_no_locks_freed(objp, cachep->object_size);
-	if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
-		debug_check_no_obj_freed(objp, cachep->object_size);
-	__cache_free(cachep, objp, _RET_IP_);
-	local_irq_restore(flags);
+	__do_kmem_cache_free(cachep, objp, _RET_IP_);
 }
 EXPORT_SYMBOL(kmem_cache_free);
 
diff --git a/mm/slab.h b/mm/slab.h
index c81c92d421f1..9193e9c1f040 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -275,6 +275,10 @@  void create_kmalloc_caches(slab_flags_t);
 struct kmem_cache *kmalloc_slab(size_t, gfp_t);
 
 void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int node);
+void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags,
+			      int node, size_t orig_size,
+			      unsigned long caller);
+void __kmem_cache_free(struct kmem_cache *s, void *x, unsigned long caller);
 #endif
 
 gfp_t kmalloc_fix_flags(gfp_t flags);
diff --git a/mm/slub.c b/mm/slub.c
index 6cb7ca27f3b7..74eb78678c98 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3262,6 +3262,14 @@  void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
 }
 EXPORT_SYMBOL(kmem_cache_alloc_lru);
 
+void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags,
+			      int node, size_t orig_size,
+			      unsigned long caller)
+{
+	return slab_alloc_node(s, NULL, gfpflags, node,
+			       caller, orig_size);
+}
+
 #ifdef CONFIG_TRACING
 void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
 {
@@ -3526,6 +3534,11 @@  void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
 }
 #endif
 
+void __kmem_cache_free(struct kmem_cache *s, void *x, unsigned long caller)
+{
+	slab_free(s, virt_to_slab(x), x, NULL, &x, 1, caller);
+}
+
 void kmem_cache_free(struct kmem_cache *s, void *x)
 {
 	s = cache_from_obj(s, x);