diff mbox series

[v4,02/32] mm/slab: Dissolve slab_map_pages() in its caller

Message ID 20220104001046.12263-3-vbabka@suse.cz (mailing list archive)
State New
Headers show
Series Separate struct slab from struct page | expand

Commit Message

Vlastimil Babka Jan. 4, 2022, 12:10 a.m. UTC
The function no longer does what its name and comment suggests, and just
sets two struct page fields, which can be done directly in its sole
caller.

Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: Roman Gushchin <guro@fb.com>
---
 mm/slab.c | 15 ++-------------
 1 file changed, 2 insertions(+), 13 deletions(-)

Comments

Harry (Hyeonggon) Yoo Jan. 6, 2022, 6:40 a.m. UTC | #1
On Tue, Jan 04, 2022 at 01:10:16AM +0100, Vlastimil Babka wrote:
> The function no longer does what its name and comment suggests, and just
> sets two struct page fields, which can be done directly in its sole
> caller.
> 
> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
> Reviewed-by: Roman Gushchin <guro@fb.com>
> ---
>  mm/slab.c | 15 ++-------------
>  1 file changed, 2 insertions(+), 13 deletions(-)
> 
> diff --git a/mm/slab.c b/mm/slab.c
> index ca4822f6b2b6..381875e23277 100644
> --- a/mm/slab.c
> +++ b/mm/slab.c
> @@ -2546,18 +2546,6 @@ static void slab_put_obj(struct kmem_cache *cachep,
>  	set_free_obj(page, page->active, objnr);
>  }
>  
> -/*
> - * Map pages beginning at addr to the given cache and slab. This is required
> - * for the slab allocator to be able to lookup the cache and slab of a
> - * virtual address for kfree, ksize, and slab debugging.
> - */
> -static void slab_map_pages(struct kmem_cache *cache, struct page *page,
> -			   void *freelist)
> -{
> -	page->slab_cache = cache;
> -	page->freelist = freelist;
> -}
> -
>  /*
>   * Grow (by 1) the number of slabs within a cache.  This is called by
>   * kmem_cache_alloc() when there are no active objs left in a cache.
> @@ -2621,7 +2609,8 @@ static struct page *cache_grow_begin(struct kmem_cache *cachep,
>  	if (OFF_SLAB(cachep) && !freelist)
>  		goto opps1;
>  
> -	slab_map_pages(cachep, page, freelist);
> +	page->slab_cache = cachep;
> +	page->freelist = freelist;
>  
>  	cache_init_objs(cachep, page);
>  

Looks good.
Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>

> -- 
> 2.34.1
>
diff mbox series

Patch

diff --git a/mm/slab.c b/mm/slab.c
index ca4822f6b2b6..381875e23277 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2546,18 +2546,6 @@  static void slab_put_obj(struct kmem_cache *cachep,
 	set_free_obj(page, page->active, objnr);
 }
 
-/*
- * Map pages beginning at addr to the given cache and slab. This is required
- * for the slab allocator to be able to lookup the cache and slab of a
- * virtual address for kfree, ksize, and slab debugging.
- */
-static void slab_map_pages(struct kmem_cache *cache, struct page *page,
-			   void *freelist)
-{
-	page->slab_cache = cache;
-	page->freelist = freelist;
-}
-
 /*
  * Grow (by 1) the number of slabs within a cache.  This is called by
  * kmem_cache_alloc() when there are no active objs left in a cache.
@@ -2621,7 +2609,8 @@  static struct page *cache_grow_begin(struct kmem_cache *cachep,
 	if (OFF_SLAB(cachep) && !freelist)
 		goto opps1;
 
-	slab_map_pages(cachep, page, freelist);
+	page->slab_cache = cachep;
+	page->freelist = freelist;
 
 	cache_init_objs(cachep, page);