diff mbox series

[36/62] mm/slub: Convert full slab management to struct slab

Message ID 20211004134650.4031813-37-willy@infradead.org (mailing list archive)
State New
Headers show
Series Separate struct slab from struct page | expand

Commit Message

Matthew Wilcox (Oracle) Oct. 4, 2021, 1:46 p.m. UTC
Pass struct slab to add_full() and remove_full().  Improves type
safety.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/slub.c | 20 ++++++++++----------
 1 file changed, 10 insertions(+), 10 deletions(-)
diff mbox series

Patch

diff --git a/mm/slub.c b/mm/slub.c
index 6d81e54e61df..32a1bd4c8a88 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1185,22 +1185,22 @@  static void trace(struct kmem_cache *s, struct page *page, void *object,
  * Tracking of fully allocated slabs for debugging purposes.
  */
 static void add_full(struct kmem_cache *s,
-	struct kmem_cache_node *n, struct page *page)
+	struct kmem_cache_node *n, struct slab *slab)
 {
 	if (!(s->flags & SLAB_STORE_USER))
 		return;
 
 	lockdep_assert_held(&n->list_lock);
-	list_add(&page->slab_list, &n->full);
+	list_add(&slab->slab_list, &n->full);
 }
 
-static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
+static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct slab *slab)
 {
 	if (!(s->flags & SLAB_STORE_USER))
 		return;
 
 	lockdep_assert_held(&n->list_lock);
-	list_del(&page->slab_list);
+	list_del(&slab->slab_list);
 }
 
 /* Tracking of the number of slabs for debugging purposes */
@@ -1616,9 +1616,9 @@  static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
 static inline int check_object(struct kmem_cache *s, struct page *page,
 			void *object, u8 val) { return 1; }
 static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
-					struct page *page) {}
+					struct slab *slab) {}
 static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
-					struct page *page) {}
+					struct slab *slab) {}
 slab_flags_t kmem_cache_flags(unsigned int object_size,
 	slab_flags_t flags, const char *name)
 {
@@ -2402,12 +2402,12 @@  static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
 		if (l == M_PARTIAL)
 			remove_partial(n, slab);
 		else if (l == M_FULL)
-			remove_full(s, n, slab_page(slab));
+			remove_full(s, n, slab);
 
 		if (m == M_PARTIAL)
 			add_partial(n, slab, tail);
 		else if (m == M_FULL)
-			add_full(s, n, slab_page(slab));
+			add_full(s, n, slab);
 	}
 
 	l = m;
@@ -3361,7 +3361,7 @@  static void __slab_free(struct kmem_cache *s, struct slab *slab,
 	 * then add it.
 	 */
 	if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
-		remove_full(s, n, slab_page(slab));
+		remove_full(s, n, slab);
 		add_partial(n, slab, DEACTIVATE_TO_TAIL);
 		stat(s, FREE_ADD_PARTIAL);
 	}
@@ -3377,7 +3377,7 @@  static void __slab_free(struct kmem_cache *s, struct slab *slab,
 		stat(s, FREE_REMOVE_PARTIAL);
 	} else {
 		/* Slab must be on the full list */
-		remove_full(s, n, slab_page(slab));
+		remove_full(s, n, slab);
 	}
 
 	spin_unlock_irqrestore(&n->list_lock, flags);