diff mbox series

[v3,1/15] mm/slab: move NUMA-related code to __do_cache_alloc()

Message ID 20220712133946.307181-2-42.hyeyoo@gmail.com (mailing list archive)
State New
Headers show
Series common kmalloc v3 | expand

Commit Message

Hyeonggon Yoo July 12, 2022, 1:39 p.m. UTC
To implement slab_alloc_node() independent of NUMA configuration,
move NUMA fallback/alternate allocation code into __do_cache_alloc().

One functional change here is not to check availability of node
when allocating from local node.

Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
---

v3:
	Fixed uninitialized variable bug due to missing
	NULL-initialization of variable objp.

 mm/slab.c | 68 +++++++++++++++++++++++++------------------------------
 1 file changed, 31 insertions(+), 37 deletions(-)

Comments

Christoph Lameter July 12, 2022, 2:29 p.m. UTC | #1
On Tue, 12 Jul 2022, Hyeonggon Yoo wrote:

> @@ -3241,31 +3219,46 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_
>  }
>
>  static __always_inline void *
> -__do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
> +__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags, int nodeid)
>  {
> -	void *objp;
> +	void *objp = NULL;
> +	int slab_node = numa_mem_id();
>
> -	if (current->mempolicy || cpuset_do_slab_mem_spread()) {
> -		objp = alternate_node_alloc(cache, flags);
> -		if (objp)
> -			goto out;
> +	if (nodeid == NUMA_NO_NODE) {
> +		if (current->mempolicy || cpuset_do_slab_mem_spread()) {
> +			objp = alternate_node_alloc(cachep, flags);
> +			if (objp)
> +				goto out;
> +		}
> +		/*
> +		 * Use the locally cached objects if possible.
> +		 * However ____cache_alloc does not allow fallback
> +		 * to other nodes. It may fail while we still have
> +		 * objects on other nodes available.
> +		 */
> +		objp = ____cache_alloc(cachep, flags);
> +		nodeid = slab_node;
> +	} else if (nodeid == slab_node) {
> +		objp = ____cache_alloc(cachep, flags);
> +	} else if (!get_node(cachep, nodeid)) {
> +		/* Node not bootstrapped yet */
> +		objp = fallback_alloc(cachep, flags);
> +		goto out;
>  	}
> -	objp = ____cache_alloc(cache, flags);
>
>  	/*
>  	 * We may just have run out of memory on the local node.
>  	 * ____cache_alloc_node() knows how to locate memory on other nodes
>  	 */
>  	if (!objp)
> -		objp = ____cache_alloc_node(cache, flags, numa_mem_id());
> -
> +		objp = ____cache_alloc_node(cachep, flags, nodeid);


Does this preserve the original behavior? nodeid is the parameter passed
to __do_cache_alloc(). numa_mem_id() is the nearest memory node.
Hyeonggon Yoo July 13, 2022, 9:39 a.m. UTC | #2
On Tue, Jul 12, 2022 at 04:29:10PM +0200, Christoph Lameter wrote:
> On Tue, 12 Jul 2022, Hyeonggon Yoo wrote:
> 
> > @@ -3241,31 +3219,46 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_
> >  }
> >
> >  static __always_inline void *
> > -__do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
> > +__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags, int nodeid)
> >  {
> > -	void *objp;
> > +	void *objp = NULL;
> > +	int slab_node = numa_mem_id();
> >
> > -	if (current->mempolicy || cpuset_do_slab_mem_spread()) {
> > -		objp = alternate_node_alloc(cache, flags);
> > -		if (objp)
> > -			goto out;
> > +	if (nodeid == NUMA_NO_NODE) {
> > +		if (current->mempolicy || cpuset_do_slab_mem_spread()) {
> > +			objp = alternate_node_alloc(cachep, flags);
> > +			if (objp)
> > +				goto out;
> > +		}
> > +		/*
> > +		 * Use the locally cached objects if possible.
> > +		 * However ____cache_alloc does not allow fallback
> > +		 * to other nodes. It may fail while we still have
> > +		 * objects on other nodes available.
> > +		 */
> > +		objp = ____cache_alloc(cachep, flags);
> > +		nodeid = slab_node;
> > +	} else if (nodeid == slab_node) {
> > +		objp = ____cache_alloc(cachep, flags);
> > +	} else if (!get_node(cachep, nodeid)) {
> > +		/* Node not bootstrapped yet */
> > +		objp = fallback_alloc(cachep, flags);
> > +		goto out;
> >  	}
> > -	objp = ____cache_alloc(cache, flags);
> >
> >  	/*
> >  	 * We may just have run out of memory on the local node.
> >  	 * ____cache_alloc_node() knows how to locate memory on other nodes
> >  	 */
> >  	if (!objp)
> > -		objp = ____cache_alloc_node(cache, flags, numa_mem_id());
> > -
> > +		objp = ____cache_alloc_node(cachep, flags, nodeid);
> 
> 
> Does this preserve the original behavior? nodeid is the parameter passed
> to __do_cache_alloc(). numa_mem_id() is the nearest memory node.

Yes it does preserve the original behavior.

nodeid equals to value of numa_mem_id() when nodeid was NUMA_NO_NODE and
____cache_alloc() failed to allocate.
diff mbox series

Patch

diff --git a/mm/slab.c b/mm/slab.c
index 764cbadba69c..3d83d17ff3b3 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3186,13 +3186,14 @@  static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
 	return obj ? obj : fallback_alloc(cachep, flags);
 }
 
+static void *__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags, int nodeid);
+
 static __always_inline void *
 slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_size,
 		   unsigned long caller)
 {
 	unsigned long save_flags;
 	void *ptr;
-	int slab_node = numa_mem_id();
 	struct obj_cgroup *objcg = NULL;
 	bool init = false;
 
@@ -3207,30 +3208,7 @@  slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_
 
 	cache_alloc_debugcheck_before(cachep, flags);
 	local_irq_save(save_flags);
-
-	if (nodeid == NUMA_NO_NODE)
-		nodeid = slab_node;
-
-	if (unlikely(!get_node(cachep, nodeid))) {
-		/* Node not bootstrapped yet */
-		ptr = fallback_alloc(cachep, flags);
-		goto out;
-	}
-
-	if (nodeid == slab_node) {
-		/*
-		 * Use the locally cached objects if possible.
-		 * However ____cache_alloc does not allow fallback
-		 * to other nodes. It may fail while we still have
-		 * objects on other nodes available.
-		 */
-		ptr = ____cache_alloc(cachep, flags);
-		if (ptr)
-			goto out;
-	}
-	/* ___cache_alloc_node can fall back to other nodes */
-	ptr = ____cache_alloc_node(cachep, flags, nodeid);
-out:
+	ptr = __do_cache_alloc(cachep, flags, nodeid);
 	local_irq_restore(save_flags);
 	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
 	init = slab_want_init_on_alloc(flags, cachep);
@@ -3241,31 +3219,46 @@  slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_
 }
 
 static __always_inline void *
-__do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
+__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags, int nodeid)
 {
-	void *objp;
+	void *objp = NULL;
+	int slab_node = numa_mem_id();
 
-	if (current->mempolicy || cpuset_do_slab_mem_spread()) {
-		objp = alternate_node_alloc(cache, flags);
-		if (objp)
-			goto out;
+	if (nodeid == NUMA_NO_NODE) {
+		if (current->mempolicy || cpuset_do_slab_mem_spread()) {
+			objp = alternate_node_alloc(cachep, flags);
+			if (objp)
+				goto out;
+		}
+		/*
+		 * Use the locally cached objects if possible.
+		 * However ____cache_alloc does not allow fallback
+		 * to other nodes. It may fail while we still have
+		 * objects on other nodes available.
+		 */
+		objp = ____cache_alloc(cachep, flags);
+		nodeid = slab_node;
+	} else if (nodeid == slab_node) {
+		objp = ____cache_alloc(cachep, flags);
+	} else if (!get_node(cachep, nodeid)) {
+		/* Node not bootstrapped yet */
+		objp = fallback_alloc(cachep, flags);
+		goto out;
 	}
-	objp = ____cache_alloc(cache, flags);
 
 	/*
 	 * We may just have run out of memory on the local node.
 	 * ____cache_alloc_node() knows how to locate memory on other nodes
 	 */
 	if (!objp)
-		objp = ____cache_alloc_node(cache, flags, numa_mem_id());
-
+		objp = ____cache_alloc_node(cachep, flags, nodeid);
 out:
 	return objp;
 }
 #else
 
 static __always_inline void *
-__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
+__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags, int nodeid __maybe_unused)
 {
 	return ____cache_alloc(cachep, flags);
 }
@@ -3292,7 +3285,7 @@  slab_alloc(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags,
 
 	cache_alloc_debugcheck_before(cachep, flags);
 	local_irq_save(save_flags);
-	objp = __do_cache_alloc(cachep, flags);
+	objp = __do_cache_alloc(cachep, flags, NUMA_NO_NODE);
 	local_irq_restore(save_flags);
 	objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
 	prefetchw(objp);
@@ -3531,7 +3524,8 @@  int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
 
 	local_irq_disable();
 	for (i = 0; i < size; i++) {
-		void *objp = kfence_alloc(s, s->object_size, flags) ?: __do_cache_alloc(s, flags);
+		void *objp = kfence_alloc(s, s->object_size, flags) ?:
+			     __do_cache_alloc(s, flags, NUMA_NO_NODE);
 
 		if (unlikely(!objp))
 			goto error;