diff mbox series

[RFC,04/32] mm: Add account_slab() and unaccount_slab()

Message ID 20211116001628.24216-5-vbabka@suse.cz (mailing list archive)
State New
Headers show
Series Separate struct slab from struct page | expand

Commit Message

Vlastimil Babka Nov. 16, 2021, 12:16 a.m. UTC
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>

These functions take struct slab instead of struct page and replace
(un)account_slab_page(). For now their callers just convert page to slab.

[ vbabka@suse.cz: replace existing functions instead of calling them ]

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
---
 mm/slab.c |  4 ++--
 mm/slab.h | 17 ++++++++---------
 mm/slub.c |  4 ++--
 3 files changed, 12 insertions(+), 13 deletions(-)
diff mbox series

Patch

diff --git a/mm/slab.c b/mm/slab.c
index 8e71968300c8..a8e35265c1c5 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1380,7 +1380,7 @@  static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
 		return NULL;
 	}
 
-	account_slab_page(page, cachep->gfporder, cachep, flags);
+	account_slab(page_slab(page), cachep->gfporder, cachep, flags);
 	__SetPageSlab(page);
 	/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
 	if (sk_memalloc_socks() && page_is_pfmemalloc(page))
@@ -1405,7 +1405,7 @@  static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
 
 	if (current->reclaim_state)
 		current->reclaim_state->reclaimed_slab += 1 << order;
-	unaccount_slab_page(page, order, cachep);
+	unaccount_slab(page_slab(page), order, cachep);
 	__free_pages(page, order);
 }
 
diff --git a/mm/slab.h b/mm/slab.h
index 9b2c7072a290..be42a467eeac 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -583,24 +583,23 @@  static inline struct kmem_cache *virt_to_cache(const void *obj)
 	return page->slab_cache;
 }
 
-static __always_inline void account_slab_page(struct page *page, int order,
-					      struct kmem_cache *s,
-					      gfp_t gfp)
+static __always_inline void account_slab(struct slab *slab, int order,
+					 struct kmem_cache *s, gfp_t gfp)
 {
 	if (memcg_kmem_enabled() && (s->flags & SLAB_ACCOUNT))
-		memcg_alloc_page_obj_cgroups(page, s, gfp, true);
+		memcg_alloc_page_obj_cgroups(slab_page(slab), s, gfp, true);
 
-	mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
+	mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
 			    PAGE_SIZE << order);
 }
 
-static __always_inline void unaccount_slab_page(struct page *page, int order,
-						struct kmem_cache *s)
+static __always_inline void unaccount_slab(struct slab *slab, int order,
+					   struct kmem_cache *s)
 {
 	if (memcg_kmem_enabled())
-		memcg_free_page_obj_cgroups(page);
+		memcg_free_page_obj_cgroups(slab_page(slab));
 
-	mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
+	mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
 			    -(PAGE_SIZE << order));
 }
 
diff --git a/mm/slub.c b/mm/slub.c
index a0124ee8f865..725f1332f936 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1943,7 +1943,7 @@  static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
 
 	page->objects = oo_objects(oo);
 
-	account_slab_page(page, oo_order(oo), s, flags);
+	account_slab(page_slab(page), oo_order(oo), s, flags);
 
 	page->slab_cache = s;
 	__SetPageSlab(page);
@@ -2014,7 +2014,7 @@  static void __free_slab(struct kmem_cache *s, struct page *page)
 	page->slab_cache = NULL;
 	if (current->reclaim_state)
 		current->reclaim_state->reclaimed_slab += pages;
-	unaccount_slab_page(page, order, s);
+	unaccount_slab(page_slab(page), order, s);
 	__free_pages(page, order);
 }