Message ID | 20240711072929.3590000-2-ryan.roberts@arm.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | mTHP allocation stats for file-backed memory | expand |
On Thu, Jul 11, 2024 at 7:29 PM Ryan Roberts <ryan.roberts@arm.com> wrote: > > Let's move count_mthp_stat() so that it's always defined, even when THP > is disabled. Previously uses of the function in files such as shmem.c, > which are compiled even when THP is disabled, required ugly THP > ifdeferry. With this cleanup, we can remove those ifdefs and the > function resolves to a nop when THP is disabled. > > I shortly plan to call count_mthp_stat() from more THP-invariant source > files. > > Signed-off-by: Ryan Roberts <ryan.roberts@arm.com> Acked-by: Barry Song <baohua@kernel.org> > --- > include/linux/huge_mm.h | 70 ++++++++++++++++++++--------------------- > mm/memory.c | 2 -- > mm/shmem.c | 6 ---- > 3 files changed, 35 insertions(+), 43 deletions(-) > > diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h > index cff002be83eb..cb93b9009ce4 100644 > --- a/include/linux/huge_mm.h > +++ b/include/linux/huge_mm.h > @@ -108,6 +108,41 @@ extern struct kobj_attribute thpsize_shmem_enabled_attr; > #define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1)) > #define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT) > > +enum mthp_stat_item { > + MTHP_STAT_ANON_FAULT_ALLOC, > + MTHP_STAT_ANON_FAULT_FALLBACK, > + MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE, > + MTHP_STAT_SWPOUT, > + MTHP_STAT_SWPOUT_FALLBACK, > + MTHP_STAT_SHMEM_ALLOC, > + MTHP_STAT_SHMEM_FALLBACK, > + MTHP_STAT_SHMEM_FALLBACK_CHARGE, > + MTHP_STAT_SPLIT, > + MTHP_STAT_SPLIT_FAILED, > + MTHP_STAT_SPLIT_DEFERRED, > + __MTHP_STAT_COUNT > +}; > + > +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS) > +struct mthp_stat { > + unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT]; > +}; > + > +DECLARE_PER_CPU(struct mthp_stat, mthp_stats); > + > +static inline void count_mthp_stat(int order, enum mthp_stat_item item) > +{ > + if (order <= 0 || order > PMD_ORDER) > + return; > + > + this_cpu_inc(mthp_stats.stats[order][item]); > +} > +#else > +static inline void count_mthp_stat(int order, enum mthp_stat_item item) > +{ > +} > +#endif > + > #ifdef CONFIG_TRANSPARENT_HUGEPAGE > > extern unsigned long transparent_hugepage_flags; > @@ -263,41 +298,6 @@ struct thpsize { > > #define to_thpsize(kobj) container_of(kobj, struct thpsize, kobj) > > -enum mthp_stat_item { > - MTHP_STAT_ANON_FAULT_ALLOC, > - MTHP_STAT_ANON_FAULT_FALLBACK, > - MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE, > - MTHP_STAT_SWPOUT, > - MTHP_STAT_SWPOUT_FALLBACK, > - MTHP_STAT_SHMEM_ALLOC, > - MTHP_STAT_SHMEM_FALLBACK, > - MTHP_STAT_SHMEM_FALLBACK_CHARGE, > - MTHP_STAT_SPLIT, > - MTHP_STAT_SPLIT_FAILED, > - MTHP_STAT_SPLIT_DEFERRED, > - __MTHP_STAT_COUNT > -}; > - > -struct mthp_stat { > - unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT]; > -}; > - > -#ifdef CONFIG_SYSFS > -DECLARE_PER_CPU(struct mthp_stat, mthp_stats); > - > -static inline void count_mthp_stat(int order, enum mthp_stat_item item) > -{ > - if (order <= 0 || order > PMD_ORDER) > - return; > - > - this_cpu_inc(mthp_stats.stats[order][item]); > -} > -#else > -static inline void count_mthp_stat(int order, enum mthp_stat_item item) > -{ > -} > -#endif > - > #define transparent_hugepage_use_zero_page() \ > (transparent_hugepage_flags & \ > (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG)) > diff --git a/mm/memory.c b/mm/memory.c > index 802d0d8a40f9..a50fdefb8f0b 100644 > --- a/mm/memory.c > +++ b/mm/memory.c > @@ -4597,9 +4597,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) > > folio_ref_add(folio, nr_pages - 1); > add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages); > -#ifdef CONFIG_TRANSPARENT_HUGEPAGE > count_mthp_stat(folio_order(folio), MTHP_STAT_ANON_FAULT_ALLOC); > -#endif > folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE); > folio_add_lru_vma(folio, vma); > setpte: > diff --git a/mm/shmem.c b/mm/shmem.c > index f24dfbd387ba..fce1343f44e6 100644 > --- a/mm/shmem.c > +++ b/mm/shmem.c > @@ -1776,9 +1776,7 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf, > > if (pages == HPAGE_PMD_NR) > count_vm_event(THP_FILE_FALLBACK); > -#ifdef CONFIG_TRANSPARENT_HUGEPAGE > count_mthp_stat(order, MTHP_STAT_SHMEM_FALLBACK); > -#endif > order = next_order(&suitable_orders, order); > } > } else { > @@ -1803,10 +1801,8 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf, > count_vm_event(THP_FILE_FALLBACK); > count_vm_event(THP_FILE_FALLBACK_CHARGE); > } > -#ifdef CONFIG_TRANSPARENT_HUGEPAGE > count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK); > count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK_CHARGE); > -#endif > } > goto unlock; > } > @@ -2180,9 +2176,7 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, > if (!IS_ERR(folio)) { > if (folio_test_pmd_mappable(folio)) > count_vm_event(THP_FILE_ALLOC); > -#ifdef CONFIG_TRANSPARENT_HUGEPAGE > count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_ALLOC); > -#endif > goto alloced; > } > if (PTR_ERR(folio) == -EEXIST) > -- > 2.43.0 >
On 2024/7/11 15:29, Ryan Roberts wrote: > Let's move count_mthp_stat() so that it's always defined, even when THP > is disabled. Previously uses of the function in files such as shmem.c, > which are compiled even when THP is disabled, required ugly THP > ifdeferry. With this cleanup, we can remove those ifdefs and the > function resolves to a nop when THP is disabled. > > I shortly plan to call count_mthp_stat() from more THP-invariant source > files. > > Signed-off-by: Ryan Roberts <ryan.roberts@arm.com> LGTM. Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com> > --- > include/linux/huge_mm.h | 70 ++++++++++++++++++++--------------------- > mm/memory.c | 2 -- > mm/shmem.c | 6 ---- > 3 files changed, 35 insertions(+), 43 deletions(-) > > diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h > index cff002be83eb..cb93b9009ce4 100644 > --- a/include/linux/huge_mm.h > +++ b/include/linux/huge_mm.h > @@ -108,6 +108,41 @@ extern struct kobj_attribute thpsize_shmem_enabled_attr; > #define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1)) > #define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT) > > +enum mthp_stat_item { > + MTHP_STAT_ANON_FAULT_ALLOC, > + MTHP_STAT_ANON_FAULT_FALLBACK, > + MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE, > + MTHP_STAT_SWPOUT, > + MTHP_STAT_SWPOUT_FALLBACK, > + MTHP_STAT_SHMEM_ALLOC, > + MTHP_STAT_SHMEM_FALLBACK, > + MTHP_STAT_SHMEM_FALLBACK_CHARGE, > + MTHP_STAT_SPLIT, > + MTHP_STAT_SPLIT_FAILED, > + MTHP_STAT_SPLIT_DEFERRED, > + __MTHP_STAT_COUNT > +}; > + > +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS) > +struct mthp_stat { > + unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT]; > +}; > + > +DECLARE_PER_CPU(struct mthp_stat, mthp_stats); > + > +static inline void count_mthp_stat(int order, enum mthp_stat_item item) > +{ > + if (order <= 0 || order > PMD_ORDER) > + return; > + > + this_cpu_inc(mthp_stats.stats[order][item]); > +} > +#else > +static inline void count_mthp_stat(int order, enum mthp_stat_item item) > +{ > +} > +#endif > + > #ifdef CONFIG_TRANSPARENT_HUGEPAGE > > extern unsigned long transparent_hugepage_flags; > @@ -263,41 +298,6 @@ struct thpsize { > > #define to_thpsize(kobj) container_of(kobj, struct thpsize, kobj) > > -enum mthp_stat_item { > - MTHP_STAT_ANON_FAULT_ALLOC, > - MTHP_STAT_ANON_FAULT_FALLBACK, > - MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE, > - MTHP_STAT_SWPOUT, > - MTHP_STAT_SWPOUT_FALLBACK, > - MTHP_STAT_SHMEM_ALLOC, > - MTHP_STAT_SHMEM_FALLBACK, > - MTHP_STAT_SHMEM_FALLBACK_CHARGE, > - MTHP_STAT_SPLIT, > - MTHP_STAT_SPLIT_FAILED, > - MTHP_STAT_SPLIT_DEFERRED, > - __MTHP_STAT_COUNT > -}; > - > -struct mthp_stat { > - unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT]; > -}; > - > -#ifdef CONFIG_SYSFS > -DECLARE_PER_CPU(struct mthp_stat, mthp_stats); > - > -static inline void count_mthp_stat(int order, enum mthp_stat_item item) > -{ > - if (order <= 0 || order > PMD_ORDER) > - return; > - > - this_cpu_inc(mthp_stats.stats[order][item]); > -} > -#else > -static inline void count_mthp_stat(int order, enum mthp_stat_item item) > -{ > -} > -#endif > - > #define transparent_hugepage_use_zero_page() \ > (transparent_hugepage_flags & \ > (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG)) > diff --git a/mm/memory.c b/mm/memory.c > index 802d0d8a40f9..a50fdefb8f0b 100644 > --- a/mm/memory.c > +++ b/mm/memory.c > @@ -4597,9 +4597,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) > > folio_ref_add(folio, nr_pages - 1); > add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages); > -#ifdef CONFIG_TRANSPARENT_HUGEPAGE > count_mthp_stat(folio_order(folio), MTHP_STAT_ANON_FAULT_ALLOC); > -#endif > folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE); > folio_add_lru_vma(folio, vma); > setpte: > diff --git a/mm/shmem.c b/mm/shmem.c > index f24dfbd387ba..fce1343f44e6 100644 > --- a/mm/shmem.c > +++ b/mm/shmem.c > @@ -1776,9 +1776,7 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf, > > if (pages == HPAGE_PMD_NR) > count_vm_event(THP_FILE_FALLBACK); > -#ifdef CONFIG_TRANSPARENT_HUGEPAGE > count_mthp_stat(order, MTHP_STAT_SHMEM_FALLBACK); > -#endif > order = next_order(&suitable_orders, order); > } > } else { > @@ -1803,10 +1801,8 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf, > count_vm_event(THP_FILE_FALLBACK); > count_vm_event(THP_FILE_FALLBACK_CHARGE); > } > -#ifdef CONFIG_TRANSPARENT_HUGEPAGE > count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK); > count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK_CHARGE); > -#endif > } > goto unlock; > } > @@ -2180,9 +2176,7 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, > if (!IS_ERR(folio)) { > if (folio_test_pmd_mappable(folio)) > count_vm_event(THP_FILE_ALLOC); > -#ifdef CONFIG_TRANSPARENT_HUGEPAGE > count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_ALLOC); > -#endif > goto alloced; > } > if (PTR_ERR(folio) == -EEXIST)
On Thu, Jul 11, 2024 at 3:29 PM Ryan Roberts <ryan.roberts@arm.com> wrote: > > Let's move count_mthp_stat() so that it's always defined, even when THP > is disabled. Previously uses of the function in files such as shmem.c, > which are compiled even when THP is disabled, required ugly THP > ifdeferry. With this cleanup, we can remove those ifdefs and the > function resolves to a nop when THP is disabled. > > I shortly plan to call count_mthp_stat() from more THP-invariant source > files. > > Signed-off-by: Ryan Roberts <ryan.roberts@arm.com> LGTM. Feel free to add: Reviewed-by: Lance Yang <ioworker0@gmail.com> Thanks, Lance > --- > include/linux/huge_mm.h | 70 ++++++++++++++++++++--------------------- > mm/memory.c | 2 -- > mm/shmem.c | 6 ---- > 3 files changed, 35 insertions(+), 43 deletions(-) > > diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h > index cff002be83eb..cb93b9009ce4 100644 > --- a/include/linux/huge_mm.h > +++ b/include/linux/huge_mm.h > @@ -108,6 +108,41 @@ extern struct kobj_attribute thpsize_shmem_enabled_attr; > #define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1)) > #define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT) > > +enum mthp_stat_item { > + MTHP_STAT_ANON_FAULT_ALLOC, > + MTHP_STAT_ANON_FAULT_FALLBACK, > + MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE, > + MTHP_STAT_SWPOUT, > + MTHP_STAT_SWPOUT_FALLBACK, > + MTHP_STAT_SHMEM_ALLOC, > + MTHP_STAT_SHMEM_FALLBACK, > + MTHP_STAT_SHMEM_FALLBACK_CHARGE, > + MTHP_STAT_SPLIT, > + MTHP_STAT_SPLIT_FAILED, > + MTHP_STAT_SPLIT_DEFERRED, > + __MTHP_STAT_COUNT > +}; > + > +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS) > +struct mthp_stat { > + unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT]; > +}; > + > +DECLARE_PER_CPU(struct mthp_stat, mthp_stats); > + > +static inline void count_mthp_stat(int order, enum mthp_stat_item item) > +{ > + if (order <= 0 || order > PMD_ORDER) > + return; > + > + this_cpu_inc(mthp_stats.stats[order][item]); > +} > +#else > +static inline void count_mthp_stat(int order, enum mthp_stat_item item) > +{ > +} > +#endif > + > #ifdef CONFIG_TRANSPARENT_HUGEPAGE > > extern unsigned long transparent_hugepage_flags; > @@ -263,41 +298,6 @@ struct thpsize { > > #define to_thpsize(kobj) container_of(kobj, struct thpsize, kobj) > > -enum mthp_stat_item { > - MTHP_STAT_ANON_FAULT_ALLOC, > - MTHP_STAT_ANON_FAULT_FALLBACK, > - MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE, > - MTHP_STAT_SWPOUT, > - MTHP_STAT_SWPOUT_FALLBACK, > - MTHP_STAT_SHMEM_ALLOC, > - MTHP_STAT_SHMEM_FALLBACK, > - MTHP_STAT_SHMEM_FALLBACK_CHARGE, > - MTHP_STAT_SPLIT, > - MTHP_STAT_SPLIT_FAILED, > - MTHP_STAT_SPLIT_DEFERRED, > - __MTHP_STAT_COUNT > -}; > - > -struct mthp_stat { > - unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT]; > -}; > - > -#ifdef CONFIG_SYSFS > -DECLARE_PER_CPU(struct mthp_stat, mthp_stats); > - > -static inline void count_mthp_stat(int order, enum mthp_stat_item item) > -{ > - if (order <= 0 || order > PMD_ORDER) > - return; > - > - this_cpu_inc(mthp_stats.stats[order][item]); > -} > -#else > -static inline void count_mthp_stat(int order, enum mthp_stat_item item) > -{ > -} > -#endif > - > #define transparent_hugepage_use_zero_page() \ > (transparent_hugepage_flags & \ > (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG)) > diff --git a/mm/memory.c b/mm/memory.c > index 802d0d8a40f9..a50fdefb8f0b 100644 > --- a/mm/memory.c > +++ b/mm/memory.c > @@ -4597,9 +4597,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) > > folio_ref_add(folio, nr_pages - 1); > add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages); > -#ifdef CONFIG_TRANSPARENT_HUGEPAGE > count_mthp_stat(folio_order(folio), MTHP_STAT_ANON_FAULT_ALLOC); > -#endif > folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE); > folio_add_lru_vma(folio, vma); > setpte: > diff --git a/mm/shmem.c b/mm/shmem.c > index f24dfbd387ba..fce1343f44e6 100644 > --- a/mm/shmem.c > +++ b/mm/shmem.c > @@ -1776,9 +1776,7 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf, > > if (pages == HPAGE_PMD_NR) > count_vm_event(THP_FILE_FALLBACK); > -#ifdef CONFIG_TRANSPARENT_HUGEPAGE > count_mthp_stat(order, MTHP_STAT_SHMEM_FALLBACK); > -#endif > order = next_order(&suitable_orders, order); > } > } else { > @@ -1803,10 +1801,8 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf, > count_vm_event(THP_FILE_FALLBACK); > count_vm_event(THP_FILE_FALLBACK_CHARGE); > } > -#ifdef CONFIG_TRANSPARENT_HUGEPAGE > count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK); > count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK_CHARGE); > -#endif > } > goto unlock; > } > @@ -2180,9 +2176,7 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, > if (!IS_ERR(folio)) { > if (folio_test_pmd_mappable(folio)) > count_vm_event(THP_FILE_ALLOC); > -#ifdef CONFIG_TRANSPARENT_HUGEPAGE > count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_ALLOC); > -#endif > goto alloced; > } > if (PTR_ERR(folio) == -EEXIST) > -- > 2.43.0 >
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index cff002be83eb..cb93b9009ce4 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -108,6 +108,41 @@ extern struct kobj_attribute thpsize_shmem_enabled_attr; #define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1)) #define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT) +enum mthp_stat_item { + MTHP_STAT_ANON_FAULT_ALLOC, + MTHP_STAT_ANON_FAULT_FALLBACK, + MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE, + MTHP_STAT_SWPOUT, + MTHP_STAT_SWPOUT_FALLBACK, + MTHP_STAT_SHMEM_ALLOC, + MTHP_STAT_SHMEM_FALLBACK, + MTHP_STAT_SHMEM_FALLBACK_CHARGE, + MTHP_STAT_SPLIT, + MTHP_STAT_SPLIT_FAILED, + MTHP_STAT_SPLIT_DEFERRED, + __MTHP_STAT_COUNT +}; + +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS) +struct mthp_stat { + unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT]; +}; + +DECLARE_PER_CPU(struct mthp_stat, mthp_stats); + +static inline void count_mthp_stat(int order, enum mthp_stat_item item) +{ + if (order <= 0 || order > PMD_ORDER) + return; + + this_cpu_inc(mthp_stats.stats[order][item]); +} +#else +static inline void count_mthp_stat(int order, enum mthp_stat_item item) +{ +} +#endif + #ifdef CONFIG_TRANSPARENT_HUGEPAGE extern unsigned long transparent_hugepage_flags; @@ -263,41 +298,6 @@ struct thpsize { #define to_thpsize(kobj) container_of(kobj, struct thpsize, kobj) -enum mthp_stat_item { - MTHP_STAT_ANON_FAULT_ALLOC, - MTHP_STAT_ANON_FAULT_FALLBACK, - MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE, - MTHP_STAT_SWPOUT, - MTHP_STAT_SWPOUT_FALLBACK, - MTHP_STAT_SHMEM_ALLOC, - MTHP_STAT_SHMEM_FALLBACK, - MTHP_STAT_SHMEM_FALLBACK_CHARGE, - MTHP_STAT_SPLIT, - MTHP_STAT_SPLIT_FAILED, - MTHP_STAT_SPLIT_DEFERRED, - __MTHP_STAT_COUNT -}; - -struct mthp_stat { - unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT]; -}; - -#ifdef CONFIG_SYSFS -DECLARE_PER_CPU(struct mthp_stat, mthp_stats); - -static inline void count_mthp_stat(int order, enum mthp_stat_item item) -{ - if (order <= 0 || order > PMD_ORDER) - return; - - this_cpu_inc(mthp_stats.stats[order][item]); -} -#else -static inline void count_mthp_stat(int order, enum mthp_stat_item item) -{ -} -#endif - #define transparent_hugepage_use_zero_page() \ (transparent_hugepage_flags & \ (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG)) diff --git a/mm/memory.c b/mm/memory.c index 802d0d8a40f9..a50fdefb8f0b 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4597,9 +4597,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) folio_ref_add(folio, nr_pages - 1); add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages); -#ifdef CONFIG_TRANSPARENT_HUGEPAGE count_mthp_stat(folio_order(folio), MTHP_STAT_ANON_FAULT_ALLOC); -#endif folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE); folio_add_lru_vma(folio, vma); setpte: diff --git a/mm/shmem.c b/mm/shmem.c index f24dfbd387ba..fce1343f44e6 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1776,9 +1776,7 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf, if (pages == HPAGE_PMD_NR) count_vm_event(THP_FILE_FALLBACK); -#ifdef CONFIG_TRANSPARENT_HUGEPAGE count_mthp_stat(order, MTHP_STAT_SHMEM_FALLBACK); -#endif order = next_order(&suitable_orders, order); } } else { @@ -1803,10 +1801,8 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf, count_vm_event(THP_FILE_FALLBACK); count_vm_event(THP_FILE_FALLBACK_CHARGE); } -#ifdef CONFIG_TRANSPARENT_HUGEPAGE count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK); count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK_CHARGE); -#endif } goto unlock; } @@ -2180,9 +2176,7 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, if (!IS_ERR(folio)) { if (folio_test_pmd_mappable(folio)) count_vm_event(THP_FILE_ALLOC); -#ifdef CONFIG_TRANSPARENT_HUGEPAGE count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_ALLOC); -#endif goto alloced; } if (PTR_ERR(folio) == -EEXIST)
Let's move count_mthp_stat() so that it's always defined, even when THP is disabled. Previously uses of the function in files such as shmem.c, which are compiled even when THP is disabled, required ugly THP ifdeferry. With this cleanup, we can remove those ifdefs and the function resolves to a nop when THP is disabled. I shortly plan to call count_mthp_stat() from more THP-invariant source files. Signed-off-by: Ryan Roberts <ryan.roberts@arm.com> --- include/linux/huge_mm.h | 70 ++++++++++++++++++++--------------------- mm/memory.c | 2 -- mm/shmem.c | 6 ---- 3 files changed, 35 insertions(+), 43 deletions(-)