diff mbox series

[13/62] mm/slub: Convert new_slab() to return a struct slab

Message ID 20211004134650.4031813-14-willy@infradead.org (mailing list archive)
State New
Headers show
Series Separate struct slab from struct page | expand

Commit Message

Matthew Wilcox (Oracle) Oct. 4, 2021, 1:46 p.m. UTC
We can cast directly from struct page to struct slab in alloc_slab_page()
because the page pointer returned from the page allocator is guaranteed
to be a head page.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/slub.c | 62 +++++++++++++++++++++++++++----------------------------
 1 file changed, 31 insertions(+), 31 deletions(-)
diff mbox series

Patch

diff --git a/mm/slub.c b/mm/slub.c
index 0a566a03d424..555c46cbae1f 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1753,8 +1753,8 @@  static void *setup_object(struct kmem_cache *s, struct page *page,
 /*
  * Slab allocation and freeing
  */
-static inline struct page *alloc_slab_page(struct kmem_cache *s,
-		gfp_t flags, int node, struct kmem_cache_order_objects oo)
+static inline struct slab *alloc_slab(struct kmem_cache *s, gfp_t flags,
+				int node, struct kmem_cache_order_objects oo)
 {
 	struct page *page;
 	unsigned int order = oo_order(oo);
@@ -1764,7 +1764,7 @@  static inline struct page *alloc_slab_page(struct kmem_cache *s,
 	else
 		page = __alloc_pages_node(node, flags, order);
 
-	return page;
+	return (struct slab *)page;
 }
 
 #ifdef CONFIG_SLAB_FREELIST_RANDOM
@@ -1876,9 +1876,9 @@  static inline bool shuffle_freelist(struct kmem_cache *s, struct page *page)
 }
 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
 
-static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
+static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
 {
-	struct page *page;
+	struct slab *slab;
 	struct kmem_cache_order_objects oo = s->oo;
 	gfp_t alloc_gfp;
 	void *start, *p, *next;
@@ -1897,63 +1897,63 @@  static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
 	if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min))
 		alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
 
-	page = alloc_slab_page(s, alloc_gfp, node, oo);
-	if (unlikely(!page)) {
+	slab = alloc_slab(s, alloc_gfp, node, oo);
+	if (unlikely(!slab)) {
 		oo = s->min;
 		alloc_gfp = flags;
 		/*
 		 * Allocation may have failed due to fragmentation.
 		 * Try a lower order alloc if possible
 		 */
-		page = alloc_slab_page(s, alloc_gfp, node, oo);
-		if (unlikely(!page))
+		slab = alloc_slab(s, alloc_gfp, node, oo);
+		if (unlikely(!slab))
 			goto out;
 		stat(s, ORDER_FALLBACK);
 	}
 
-	page->objects = oo_objects(oo);
+	slab->objects = oo_objects(oo);
 
-	account_slab_page(page, oo_order(oo), s, flags);
+	account_slab(slab, oo_order(oo), s, flags);
 
-	page->slab_cache = s;
-	__SetPageSlab(page);
-	if (page_is_pfmemalloc(page))
-		SetPageSlabPfmemalloc(page);
+	slab->slab_cache = s;
+	__SetPageSlab(slab_page(slab));
+	if (page_is_pfmemalloc(slab_page(slab)))
+		slab_set_pfmemalloc(slab);
 
-	kasan_poison_slab(page);
+	kasan_poison_slab(slab_page(slab));
 
-	start = page_address(page);
+	start = slab_address(slab);
 
-	setup_page_debug(s, page, start);
+	setup_page_debug(s, slab_page(slab), start);
 
-	shuffle = shuffle_freelist(s, page);
+	shuffle = shuffle_freelist(s, slab_page(slab));
 
 	if (!shuffle) {
 		start = fixup_red_left(s, start);
-		start = setup_object(s, page, start);
-		page->freelist = start;
-		for (idx = 0, p = start; idx < page->objects - 1; idx++) {
+		start = setup_object(s, slab_page(slab), start);
+		slab->freelist = start;
+		for (idx = 0, p = start; idx < slab->objects - 1; idx++) {
 			next = p + s->size;
-			next = setup_object(s, page, next);
+			next = setup_object(s, slab_page(slab), next);
 			set_freepointer(s, p, next);
 			p = next;
 		}
 		set_freepointer(s, p, NULL);
 	}
 
-	page->inuse = page->objects;
-	page->frozen = 1;
+	slab->inuse = slab->objects;
+	slab->frozen = 1;
 
 out:
-	if (!page)
+	if (!slab)
 		return NULL;
 
-	inc_slabs_node(s, page_to_nid(page), page->objects);
+	inc_slabs_node(s, slab_nid(slab), slab->objects);
 
-	return page;
+	return slab;
 }
 
-static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
+static struct slab *new_slab(struct kmem_cache *s, gfp_t flags, int node)
 {
 	if (unlikely(flags & GFP_SLAB_BUG_MASK))
 		flags = kmalloc_fix_flags(flags);
@@ -2991,7 +2991,7 @@  static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
 		goto check_new_page;
 
 	slub_put_cpu_ptr(s->cpu_slab);
-	page = new_slab(s, gfpflags, node);
+	page = slab_page(new_slab(s, gfpflags, node));
 	c = slub_get_cpu_ptr(s->cpu_slab);
 
 	if (unlikely(!page)) {
@@ -3896,7 +3896,7 @@  static void early_kmem_cache_node_alloc(int node)
 
 	BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
 
-	page = new_slab(kmem_cache_node, GFP_NOWAIT, node);
+	page = slab_page(new_slab(kmem_cache_node, GFP_NOWAIT, node));
 
 	BUG_ON(!page);
 	if (page_to_nid(page) != node) {