Message ID | 20230612093514.689846-1-tsahu@linux.ibm.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | [mm-unstable] mm/folio: Replace set_compound_order with folio_set_order | expand |
On 6/12/23 2:35 AM, Tarun Sahu wrote: > The patch [1] removed the need for special handling of order = 0 > in folio_set_order. Now, folio_set_order and set_compound_order becomes > similar function. This patch removes the set_compound_order and uses > folio_set_order instead. > > [1] https://lore.kernel.org/all/20230609183032.13E08C433D2@smtp.kernel.org/ > > Signed-off-by: Tarun Sahu <tsahu@linux.ibm.com> > --- > Moved folio_set_order to the top instead of moving prep_compound_head below > to it so that git blame can show the proper change in prep_compound_head > which is replacement of set_compound_order with folio_set_order. > > include/linux/mm.h | 10 ---------- > mm/internal.h | 32 ++++++++++++++++---------------- > 2 files changed, 16 insertions(+), 26 deletions(-) > > diff --git a/include/linux/mm.h b/include/linux/mm.h > index 27ce77080c79..61d75e0e5b40 100644 > --- a/include/linux/mm.h > +++ b/include/linux/mm.h > @@ -1229,16 +1229,6 @@ static inline void folio_set_compound_dtor(struct folio *folio, > > void destroy_large_folio(struct folio *folio); > > -static inline void set_compound_order(struct page *page, unsigned int order) > -{ > - struct folio *folio = (struct folio *)page; > - > - folio->_folio_order = order; > -#ifdef CONFIG_64BIT > - folio->_folio_nr_pages = 1U << order; > -#endif > -} > - > /* Returns the number of bytes in this potentially compound page. */ > static inline unsigned long page_size(struct page *page) > { > diff --git a/mm/internal.h b/mm/internal.h > index c59fe08c5b39..c460b2fde977 100644 > --- a/mm/internal.h > +++ b/mm/internal.h > @@ -378,12 +378,27 @@ extern void memblock_free_pages(struct page *page, unsigned long pfn, > unsigned int order); > extern void __free_pages_core(struct page *page, unsigned int order); > > +/* > + * This will have no effect, other than possibly generating a warning, if the > + * caller passes in a non-large folio. > + */ > +static inline void folio_set_order(struct folio *folio, unsigned int order) > +{ > + if (WARN_ON_ONCE(!order || !folio_test_large(folio))) > + return; > + > + folio->_folio_order = order; > +#ifdef CONFIG_64BIT > + folio->_folio_nr_pages = 1U << order; > +#endif > +} > + > static inline void prep_compound_head(struct page *page, unsigned int order) > { > struct folio *folio = (struct folio *)page; > > set_compound_page_dtor(page, COMPOUND_PAGE_DTOR); > - set_compound_order(page, order); > + folio_set_order(folio, order); > atomic_set(&folio->_entire_mapcount, -1); > atomic_set(&folio->_nr_pages_mapped, 0); > atomic_set(&folio->_pincount, 0); > @@ -419,21 +434,6 @@ extern void *memmap_alloc(phys_addr_t size, phys_addr_t align, > int split_free_page(struct page *free_page, > unsigned int order, unsigned long split_pfn_offset); > > -/* > - * This will have no effect, other than possibly generating a warning, if the > - * caller passes in a non-large folio. > - */ > -static inline void folio_set_order(struct folio *folio, unsigned int order) > -{ > - if (WARN_ON_ONCE(!order || !folio_test_large(folio))) > - return; > - > - folio->_folio_order = order; > -#ifdef CONFIG_64BIT > - folio->_folio_nr_pages = 1U << order; > -#endif > -} > - > #if defined CONFIG_COMPACTION || defined CONFIG_CMA > > /* Reviewed-by Sidhartha Kumar <sidhartha.kumar@oracle.com>
> On Jun 12, 2023, at 17:35, Tarun Sahu <tsahu@linux.ibm.com> wrote: > > The patch [1] removed the need for special handling of order = 0 > in folio_set_order. Now, folio_set_order and set_compound_order becomes > similar function. This patch removes the set_compound_order and uses > folio_set_order instead. > > [1] https://lore.kernel.org/all/20230609183032.13E08C433D2@smtp.kernel.org/ > > Signed-off-by: Tarun Sahu <tsahu@linux.ibm.com> Reviewed-by: Muchun Song <songmuchun@bytedance.com> Thanks.
diff --git a/include/linux/mm.h b/include/linux/mm.h index 27ce77080c79..61d75e0e5b40 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1229,16 +1229,6 @@ static inline void folio_set_compound_dtor(struct folio *folio, void destroy_large_folio(struct folio *folio); -static inline void set_compound_order(struct page *page, unsigned int order) -{ - struct folio *folio = (struct folio *)page; - - folio->_folio_order = order; -#ifdef CONFIG_64BIT - folio->_folio_nr_pages = 1U << order; -#endif -} - /* Returns the number of bytes in this potentially compound page. */ static inline unsigned long page_size(struct page *page) { diff --git a/mm/internal.h b/mm/internal.h index c59fe08c5b39..c460b2fde977 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -378,12 +378,27 @@ extern void memblock_free_pages(struct page *page, unsigned long pfn, unsigned int order); extern void __free_pages_core(struct page *page, unsigned int order); +/* + * This will have no effect, other than possibly generating a warning, if the + * caller passes in a non-large folio. + */ +static inline void folio_set_order(struct folio *folio, unsigned int order) +{ + if (WARN_ON_ONCE(!order || !folio_test_large(folio))) + return; + + folio->_folio_order = order; +#ifdef CONFIG_64BIT + folio->_folio_nr_pages = 1U << order; +#endif +} + static inline void prep_compound_head(struct page *page, unsigned int order) { struct folio *folio = (struct folio *)page; set_compound_page_dtor(page, COMPOUND_PAGE_DTOR); - set_compound_order(page, order); + folio_set_order(folio, order); atomic_set(&folio->_entire_mapcount, -1); atomic_set(&folio->_nr_pages_mapped, 0); atomic_set(&folio->_pincount, 0); @@ -419,21 +434,6 @@ extern void *memmap_alloc(phys_addr_t size, phys_addr_t align, int split_free_page(struct page *free_page, unsigned int order, unsigned long split_pfn_offset); -/* - * This will have no effect, other than possibly generating a warning, if the - * caller passes in a non-large folio. - */ -static inline void folio_set_order(struct folio *folio, unsigned int order) -{ - if (WARN_ON_ONCE(!order || !folio_test_large(folio))) - return; - - folio->_folio_order = order; -#ifdef CONFIG_64BIT - folio->_folio_nr_pages = 1U << order; -#endif -} - #if defined CONFIG_COMPACTION || defined CONFIG_CMA /*
The patch [1] removed the need for special handling of order = 0 in folio_set_order. Now, folio_set_order and set_compound_order becomes similar function. This patch removes the set_compound_order and uses folio_set_order instead. [1] https://lore.kernel.org/all/20230609183032.13E08C433D2@smtp.kernel.org/ Signed-off-by: Tarun Sahu <tsahu@linux.ibm.com> --- Moved folio_set_order to the top instead of moving prep_compound_head below to it so that git blame can show the proper change in prep_compound_head which is replacement of set_compound_order with folio_set_order. include/linux/mm.h | 10 ---------- mm/internal.h | 32 ++++++++++++++++---------------- 2 files changed, 16 insertions(+), 26 deletions(-)