diff mbox series

[RFC,11/26] mm, slub: move disabling irqs closer to get_partial() in ___slab_alloc()

Message ID 20210524233946.20352-12-vbabka@suse.cz (mailing list archive)
State New, archived
Headers show
Series SLUB: use local_lock for kmem_cache_cpu protection and reduce disabling irqs | expand

Commit Message

Vlastimil Babka May 24, 2021, 11:39 p.m. UTC
Continue reducing the irq disabled scope. Check for per-cpu partial slabs with
first with irqs enabled and then recheck with irqs disabled before grabbing
the slab page. Mostly preparatory for the following patches.

Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
---
 mm/slub.c | 30 +++++++++++++++++++++++-------
 1 file changed, 23 insertions(+), 7 deletions(-)

Comments

Jann Horn May 25, 2021, 4 p.m. UTC | #1
On Tue, May 25, 2021 at 1:40 AM Vlastimil Babka <vbabka@suse.cz> wrote:
> Continue reducing the irq disabled scope. Check for per-cpu partial slabs with
> first with irqs enabled and then recheck with irqs disabled before grabbing
> the slab page. Mostly preparatory for the following patches.
[...]
> diff --git a/mm/slub.c b/mm/slub.c
[...]
>         if (slub_percpu_partial(c)) {
> +               local_irq_save(flags);
> +               if (unlikely(c->page)) {
> +                       local_irq_restore(flags);
> +                       goto reread_page;
> +               }
> +               if (unlikely(!slub_percpu_partial(c))) /* stolen by IRQ? */
> +                       goto new_objects;

nit: I think this comment is wrong by the end of the patch series,
since at that point, in RT configurations, it could also be stolen by
another task, if I understand correctly what migrate_disable() means?

Similarly the comment above ___slab_alloc() still talks about
disabling preemption for bulk allocation.
diff mbox series

Patch

diff --git a/mm/slub.c b/mm/slub.c
index e243a991b15b..16f3cbb81627 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2653,11 +2653,6 @@  static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
 		if (unlikely(node != NUMA_NO_NODE &&
 			     !node_isset(node, slab_nodes)))
 			node = NUMA_NO_NODE;
-		local_irq_save(flags);
-		if (unlikely(c->page)) {
-			local_irq_restore(flags);
-			goto reread_page;
-		}
 		goto new_slab;
 	}
 redo:
@@ -2698,6 +2693,7 @@  static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
 
 	if (!freelist) {
 		c->page = NULL;
+		local_irq_restore(flags);
 		stat(s, DEACTIVATE_BYPASS);
 		goto new_slab;
 	}
@@ -2723,10 +2719,19 @@  static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
 		goto reread_page;
 	}
 	deactivate_slab(s, page, c->freelist, c);
+	local_irq_restore(flags);
 
 new_slab:
 
 	if (slub_percpu_partial(c)) {
+		local_irq_save(flags);
+		if (unlikely(c->page)) {
+			local_irq_restore(flags);
+			goto reread_page;
+		}
+		if (unlikely(!slub_percpu_partial(c))) /* stolen by IRQ? */
+			goto new_objects;
+
 		page = c->page = slub_percpu_partial(c);
 		slub_set_percpu_partial(c, page);
 		local_irq_restore(flags);
@@ -2734,6 +2739,14 @@  static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
 		goto redo;
 	}
 
+	local_irq_save(flags);
+	if (unlikely(c->page)) {
+		local_irq_restore(flags);
+		goto reread_page;
+	}
+
+new_objects:
+
 	freelist = get_partial(s, gfpflags, node, &page);
 	if (freelist) {
 		c->page = page;
@@ -2767,15 +2780,18 @@  static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
 check_new_page:
 
 	if (kmem_cache_debug(s)) {
-		if (!alloc_debug_processing(s, page, freelist, addr))
+		if (!alloc_debug_processing(s, page, freelist, addr)) {
 			/* Slab failed checks. Next slab needed */
+			c->page = NULL;
+			local_irq_restore(flags);
 			goto new_slab;
-		else
+		} else {
 			/*
 			 * For debug case, we don't load freelist so that all
 			 * allocations go through alloc_debug_processing()
 			 */
 			goto return_single;
+		}
 	}
 
 	if (unlikely(!pfmemalloc_match(page, gfpflags)))