Message ID | 20230106174028.151384-3-sj@kernel.org (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | add folio_headpage() macro | expand |
On Fri, Jan 06, 2023 at 05:40:27PM +0000, SeongJae Park wrote: > diff --git a/mm/shmem.c b/mm/shmem.c > index bc5c156ef470..8ae73973a7fc 100644 > --- a/mm/shmem.c > +++ b/mm/shmem.c > @@ -3211,7 +3211,7 @@ static const char *shmem_get_link(struct dentry *dentry, > folio = filemap_get_folio(inode->i_mapping, 0); > if (!folio) > return ERR_PTR(-ECHILD); > - if (PageHWPoison(folio_page(folio, 0)) || > + if (PageHWPoison(folio_headpage(folio)) || This is actually incorrect. We don't want the head page, we want the page at index 0. It's a subtle but important difference later on. > @@ -3222,7 +3222,7 @@ static const char *shmem_get_link(struct dentry *dentry, > return ERR_PTR(error); > if (!folio) > return ERR_PTR(-ECHILD); > - if (PageHWPoison(folio_page(folio, 0))) { > + if (PageHWPoison(folio_headpage(folio))) { Same here. > +++ b/mm/slab.c > @@ -1373,7 +1373,7 @@ static struct slab *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, > /* Make the flag visible before any changes to folio->mapping */ > smp_wmb(); > /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */ > - if (sk_memalloc_socks() && page_is_pfmemalloc(folio_page(folio, 0))) > + if (sk_memalloc_socks() && page_is_pfmemalloc(folio_headpage(folio))) We should have a folio_is_pfmemalloc(). > @@ -1389,7 +1389,7 @@ static void kmem_freepages(struct kmem_cache *cachep, struct slab *slab) > > BUG_ON(!folio_test_slab(folio)); > __slab_clear_pfmemalloc(slab); > - page_mapcount_reset(folio_page(folio, 0)); > + page_mapcount_reset(folio_headpage(folio)); This one should be &folio->page. > @@ -1398,7 +1398,7 @@ static void kmem_freepages(struct kmem_cache *cachep, struct slab *slab) > if (current->reclaim_state) > current->reclaim_state->reclaimed_slab += 1 << order; > unaccount_slab(slab, order, cachep); > - __free_pages(folio_page(folio, 0), order); > + __free_pages(folio_headpage(folio), order); &folio->page. > @@ -939,9 +939,9 @@ void free_large_kmalloc(struct folio *folio, void *object) > kasan_kfree_large(object); > kmsan_kfree_large(object); > > - mod_lruvec_page_state(folio_page(folio, 0), NR_SLAB_UNRECLAIMABLE_B, > + mod_lruvec_page_state(folio_headpage(folio), NR_SLAB_UNRECLAIMABLE_B, > -(PAGE_SIZE << order)); lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, ... > - __free_pages(folio_page(folio, 0), order); > + __free_pages(folio_headpage(folio), order); &folio->page. > +++ b/mm/slub.c > @@ -1859,7 +1859,7 @@ static inline struct slab *alloc_slab_page(gfp_t flags, int node, > __folio_set_slab(folio); > /* Make the flag visible before any changes to folio->mapping */ > smp_wmb(); > - if (page_is_pfmemalloc(folio_page(folio, 0))) > + if (page_is_pfmemalloc(folio_headpage(folio))) folio_is_pfmemalloc() > @@ -2066,7 +2066,7 @@ static void __free_slab(struct kmem_cache *s, struct slab *slab) > if (current->reclaim_state) > current->reclaim_state->reclaimed_slab += pages; > unaccount_slab(slab, order, s); > - __free_pages(folio_page(folio, 0), order); > + __free_pages(folio_headpage(folio), order); &folio->page.
diff --git a/mm/shmem.c b/mm/shmem.c index bc5c156ef470..8ae73973a7fc 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -3211,7 +3211,7 @@ static const char *shmem_get_link(struct dentry *dentry, folio = filemap_get_folio(inode->i_mapping, 0); if (!folio) return ERR_PTR(-ECHILD); - if (PageHWPoison(folio_page(folio, 0)) || + if (PageHWPoison(folio_headpage(folio)) || !folio_test_uptodate(folio)) { folio_put(folio); return ERR_PTR(-ECHILD); @@ -3222,7 +3222,7 @@ static const char *shmem_get_link(struct dentry *dentry, return ERR_PTR(error); if (!folio) return ERR_PTR(-ECHILD); - if (PageHWPoison(folio_page(folio, 0))) { + if (PageHWPoison(folio_headpage(folio))) { folio_unlock(folio); folio_put(folio); return ERR_PTR(-ECHILD); diff --git a/mm/slab.c b/mm/slab.c index 7a269db050ee..a6f8f95678c9 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1373,7 +1373,7 @@ static struct slab *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, /* Make the flag visible before any changes to folio->mapping */ smp_wmb(); /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */ - if (sk_memalloc_socks() && page_is_pfmemalloc(folio_page(folio, 0))) + if (sk_memalloc_socks() && page_is_pfmemalloc(folio_headpage(folio))) slab_set_pfmemalloc(slab); return slab; @@ -1389,7 +1389,7 @@ static void kmem_freepages(struct kmem_cache *cachep, struct slab *slab) BUG_ON(!folio_test_slab(folio)); __slab_clear_pfmemalloc(slab); - page_mapcount_reset(folio_page(folio, 0)); + page_mapcount_reset(folio_headpage(folio)); folio->mapping = NULL; /* Make the mapping reset visible before clearing the flag */ smp_wmb(); @@ -1398,7 +1398,7 @@ static void kmem_freepages(struct kmem_cache *cachep, struct slab *slab) if (current->reclaim_state) current->reclaim_state->reclaimed_slab += 1 << order; unaccount_slab(slab, order, cachep); - __free_pages(folio_page(folio, 0), order); + __free_pages(folio_headpage(folio), order); } static void kmem_rcu_free(struct rcu_head *head) diff --git a/mm/slab_common.c b/mm/slab_common.c index bf4e777cfe90..34a0b9988d12 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -939,9 +939,9 @@ void free_large_kmalloc(struct folio *folio, void *object) kasan_kfree_large(object); kmsan_kfree_large(object); - mod_lruvec_page_state(folio_page(folio, 0), NR_SLAB_UNRECLAIMABLE_B, + mod_lruvec_page_state(folio_headpage(folio), NR_SLAB_UNRECLAIMABLE_B, -(PAGE_SIZE << order)); - __free_pages(folio_page(folio, 0), order); + __free_pages(folio_headpage(folio), order); } static void *__kmalloc_large_node(size_t size, gfp_t flags, int node); diff --git a/mm/slub.c b/mm/slub.c index 13459c69095a..1f0cbb4c2288 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1859,7 +1859,7 @@ static inline struct slab *alloc_slab_page(gfp_t flags, int node, __folio_set_slab(folio); /* Make the flag visible before any changes to folio->mapping */ smp_wmb(); - if (page_is_pfmemalloc(folio_page(folio, 0))) + if (page_is_pfmemalloc(folio_headpage(folio))) slab_set_pfmemalloc(slab); return slab; @@ -2066,7 +2066,7 @@ static void __free_slab(struct kmem_cache *s, struct slab *slab) if (current->reclaim_state) current->reclaim_state->reclaimed_slab += pages; unaccount_slab(slab, order, s); - __free_pages(folio_page(folio, 0), order); + __free_pages(folio_headpage(folio), order); } static void rcu_free_slab(struct rcu_head *h)
Several code in mm is using 'folio_page(folio, 0)' for getting the head pages of folios. It's not the standard idiom and inefficient. Replace the calls to 'folio_headpage()'. Signed-off-by: SeongJae Park <sj@kernel.org> --- mm/shmem.c | 4 ++-- mm/slab.c | 6 +++--- mm/slab_common.c | 4 ++-- mm/slub.c | 4 ++-- 4 files changed, 9 insertions(+), 9 deletions(-)