@@ -1763,9 +1763,6 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
flags &= gfp_allowed_mask;
- if (gfpflags_allow_blocking(flags))
- local_irq_enable();
-
flags |= s->allocflags;
/*
@@ -1824,8 +1821,6 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
page->frozen = 1;
out:
- if (gfpflags_allow_blocking(flags))
- local_irq_disable();
if (!page)
return NULL;
@@ -2753,17 +2748,18 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
goto check_new_page;
}
+ local_irq_restore(flags);
migrate_enable();
page = new_slab(s, gfpflags, node);
migrate_disable();
c = this_cpu_ptr(s->cpu_slab);
if (unlikely(!page)) {
- local_irq_restore(flags);
slab_out_of_memory(s, gfpflags, node);
return NULL;
}
+ local_irq_save(flags);
if (c->page)
flush_slab(s, c);
allocate_slab() currently re-enables irqs before calling to the page allocator. It depends on gfpflags_allow_blocking() to determine if it's safe to do so. Now we can instead simply restore irq before calling it through new_slab(). The other caller early_kmem_cache_node_alloc() is unaffected Signed-off-by: Vlastimil Babka <vbabka@suse.cz> --- mm/slub.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-)