diff mbox series

[27/62] mm/slub: Convert __unfreeze_partials to take a struct slab

Message ID 20211004134650.4031813-28-willy@infradead.org (mailing list archive)
State New
Headers show
Series Separate struct slab from struct page | expand

Commit Message

Matthew Wilcox (Oracle) Oct. 4, 2021, 1:46 p.m. UTC
Improves type safety while removing a few calls to slab_page().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/slub.c | 54 +++++++++++++++++++++++++++---------------------------
 1 file changed, 27 insertions(+), 27 deletions(-)
diff mbox series

Patch

diff --git a/mm/slub.c b/mm/slub.c
index f33a196fe64f..e6fd0619d1f2 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2437,20 +2437,20 @@  static void deactivate_slab(struct kmem_cache *s, struct page *page,
 }
 
 #ifdef CONFIG_SLUB_CPU_PARTIAL
-static void __unfreeze_partials(struct kmem_cache *s, struct page *partial_page)
+static void __unfreeze_partials(struct kmem_cache *s, struct slab *partial_slab)
 {
 	struct kmem_cache_node *n = NULL, *n2 = NULL;
-	struct page *page, *discard_page = NULL;
+	struct slab *slab, *unusable = NULL;
 	unsigned long flags = 0;
 
-	while (partial_page) {
-		struct page new;
-		struct page old;
+	while (partial_slab) {
+		struct slab new;
+		struct slab old;
 
-		page = partial_page;
-		partial_page = page->next;
+		slab = partial_slab;
+		partial_slab = slab->next;
 
-		n2 = get_node(s, page_to_nid(page));
+		n2 = get_node(s, slab_nid(slab));
 		if (n != n2) {
 			if (n)
 				spin_unlock_irqrestore(&n->list_lock, flags);
@@ -2461,8 +2461,8 @@  static void __unfreeze_partials(struct kmem_cache *s, struct page *partial_page)
 
 		do {
 
-			old.freelist = page->freelist;
-			old.counters = page->counters;
+			old.freelist = slab->freelist;
+			old.counters = slab->counters;
 			VM_BUG_ON(!old.frozen);
 
 			new.counters = old.counters;
@@ -2470,16 +2470,16 @@  static void __unfreeze_partials(struct kmem_cache *s, struct page *partial_page)
 
 			new.frozen = 0;
 
-		} while (!__cmpxchg_double_slab(s, page,
+		} while (!__cmpxchg_double_slab(s, slab_page(slab),
 				old.freelist, old.counters,
 				new.freelist, new.counters,
 				"unfreezing slab"));
 
 		if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) {
-			page->next = discard_page;
-			discard_page = page;
+			slab->next = unusable;
+			unusable = slab;
 		} else {
-			add_partial(n, page, DEACTIVATE_TO_TAIL);
+			add_partial(n, slab_page(slab), DEACTIVATE_TO_TAIL);
 			stat(s, FREE_ADD_PARTIAL);
 		}
 	}
@@ -2487,12 +2487,12 @@  static void __unfreeze_partials(struct kmem_cache *s, struct page *partial_page)
 	if (n)
 		spin_unlock_irqrestore(&n->list_lock, flags);
 
-	while (discard_page) {
-		page = discard_page;
-		discard_page = discard_page->next;
+	while (unusable) {
+		slab = unusable;
+		unusable = unusable->next;
 
 		stat(s, DEACTIVATE_EMPTY);
-		discard_slab(s, page);
+		discard_slab(s, slab_page(slab));
 		stat(s, FREE_SLAB);
 	}
 }
@@ -2502,28 +2502,28 @@  static void __unfreeze_partials(struct kmem_cache *s, struct page *partial_page)
  */
 static void unfreeze_partials(struct kmem_cache *s)
 {
-	struct page *partial_page;
+	struct slab *partial_slab;
 	unsigned long flags;
 
 	local_lock_irqsave(&s->cpu_slab->lock, flags);
-	partial_page = slab_page(this_cpu_read(s->cpu_slab->partial));
+	partial_slab = this_cpu_read(s->cpu_slab->partial);
 	this_cpu_write(s->cpu_slab->partial, NULL);
 	local_unlock_irqrestore(&s->cpu_slab->lock, flags);
 
-	if (partial_page)
-		__unfreeze_partials(s, partial_page);
+	if (partial_slab)
+		__unfreeze_partials(s, partial_slab);
 }
 
 static void unfreeze_partials_cpu(struct kmem_cache *s,
 				  struct kmem_cache_cpu *c)
 {
-	struct page *partial_page;
+	struct slab *partial_slab;
 
-	partial_page = slab_page(slub_percpu_partial(c));
+	partial_slab = slub_percpu_partial(c);
 	c->partial = NULL;
 
-	if (partial_page)
-		__unfreeze_partials(s, partial_page);
+	if (partial_slab)
+		__unfreeze_partials(s, partial_slab);
 }
 
 /*
@@ -2572,7 +2572,7 @@  static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain)
 	local_unlock_irqrestore(&s->cpu_slab->lock, flags);
 
 	if (slab_to_unfreeze) {
-		__unfreeze_partials(s, slab_page(slab_to_unfreeze));
+		__unfreeze_partials(s, slab_to_unfreeze);
 		stat(s, CPU_PARTIAL_DRAIN);
 	}
 }