diff mbox series

[4/5] mm/khugepaged: Convert alloc_charge_hpage() to use folios

Message ID 20231016200510.7387-5-vishal.moola@gmail.com (mailing list archive)
State New
Headers show
Series Some khugepaged folio conversions | expand

Commit Message

Vishal Moola Oct. 16, 2023, 8:05 p.m. UTC
Also convert hpage_collapse_alloc_page() to
hpage_collapse_alloc_folio().

This removes 1 call to compound_head() and helps convert khugepaged to
use folios throughout.

Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
---
 mm/khugepaged.c | 13 +++++++------
 1 file changed, 7 insertions(+), 6 deletions(-)

Comments

Kefeng Wang Oct. 17, 2023, 3:48 a.m. UTC | #1
On 2023/10/17 4:05, Vishal Moola (Oracle) wrote:
> Also convert hpage_collapse_alloc_page() to
> hpage_collapse_alloc_folio().
> 
> This removes 1 call to compound_head() and helps convert khugepaged to
> use folios throughout.
> 
> Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
> ---
>   mm/khugepaged.c | 13 +++++++------
>   1 file changed, 7 insertions(+), 6 deletions(-)
> 
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index fa21a53ce0c0..70bba8ddea13 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -886,16 +886,16 @@ static int hpage_collapse_find_target_node(struct collapse_control *cc)
>   }
>   #endif
>   
> -static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node,
> +static bool hpage_collapse_alloc_folio(struct folio **folio, gfp_t gfp, int node,
>   				      nodemask_t *nmask)
>   {
> -	*hpage = __alloc_pages(gfp, HPAGE_PMD_ORDER, node, nmask);
> -	if (unlikely(!*hpage)) {
> +	*folio = __folio_alloc(gfp, HPAGE_PMD_ORDER, node, nmask);
> +
> +	if (unlikely(!*folio)) {
>   		count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
>   		return false;
>   	}
>   
> -	folio_prep_large_rmappable((struct folio *)*hpage);
>   	count_vm_event(THP_COLLAPSE_ALLOC);
>   	return true;
>   }
> @@ -1062,15 +1062,16 @@ static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
>   	int node = hpage_collapse_find_target_node(cc);
>   	struct folio *folio;
>   
> -	if (!hpage_collapse_alloc_page(hpage, gfp, node, &cc->alloc_nmask))
> +	if (!hpage_collapse_alloc_folio(&folio, gfp, node, &cc->alloc_nmask))
>   		return SCAN_ALLOC_HUGE_PAGE_FAIL;
>   
> -	folio = page_folio(*hpage);
>   	if (unlikely(mem_cgroup_charge(folio, mm, gfp))) {
>   		folio_put(folio);
>   		*hpage = NULL;
>   		return SCAN_CGROUP_CHARGE_FAIL;
>   	}
> +
> +	*hpage = folio_page(folio, 0);
>   	count_memcg_page_event(*hpage, THP_COLLAPSE_ALLOC);

count_memcg_folio_events()and kill count_memcg_page_event?
>   
>   	return SCAN_SUCCEED;
Vishal Moola Oct. 18, 2023, 5:21 p.m. UTC | #2
On Mon, Oct 16, 2023 at 8:48 PM Kefeng Wang <wangkefeng.wang@huawei.com> wrote:
>
>
>
> On 2023/10/17 4:05, Vishal Moola (Oracle) wrote:
> > Also convert hpage_collapse_alloc_page() to
> > hpage_collapse_alloc_folio().
> >
> > This removes 1 call to compound_head() and helps convert khugepaged to
> > use folios throughout.
> >
> > Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
> > ---
> >   mm/khugepaged.c | 13 +++++++------
> >   1 file changed, 7 insertions(+), 6 deletions(-)
> >
> > diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> > index fa21a53ce0c0..70bba8ddea13 100644
> > --- a/mm/khugepaged.c
> > +++ b/mm/khugepaged.c
> > @@ -886,16 +886,16 @@ static int hpage_collapse_find_target_node(struct collapse_control *cc)
> >   }
> >   #endif
> >
> > -static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node,
> > +static bool hpage_collapse_alloc_folio(struct folio **folio, gfp_t gfp, int node,
> >                                     nodemask_t *nmask)
> >   {
> > -     *hpage = __alloc_pages(gfp, HPAGE_PMD_ORDER, node, nmask);
> > -     if (unlikely(!*hpage)) {
> > +     *folio = __folio_alloc(gfp, HPAGE_PMD_ORDER, node, nmask);
> > +
> > +     if (unlikely(!*folio)) {
> >               count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
> >               return false;
> >       }
> >
> > -     folio_prep_large_rmappable((struct folio *)*hpage);
> >       count_vm_event(THP_COLLAPSE_ALLOC);
> >       return true;
> >   }
> > @@ -1062,15 +1062,16 @@ static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
> >       int node = hpage_collapse_find_target_node(cc);
> >       struct folio *folio;
> >
> > -     if (!hpage_collapse_alloc_page(hpage, gfp, node, &cc->alloc_nmask))
> > +     if (!hpage_collapse_alloc_folio(&folio, gfp, node, &cc->alloc_nmask))
> >               return SCAN_ALLOC_HUGE_PAGE_FAIL;
> >
> > -     folio = page_folio(*hpage);
> >       if (unlikely(mem_cgroup_charge(folio, mm, gfp))) {
> >               folio_put(folio);
> >               *hpage = NULL;
> >               return SCAN_CGROUP_CHARGE_FAIL;
> >       }
> > +
> > +     *hpage = folio_page(folio, 0);
> >       count_memcg_page_event(*hpage, THP_COLLAPSE_ALLOC);
>
> count_memcg_folio_events()and kill count_memcg_page_event?
> >
> >       return SCAN_SUCCEED;

Thanks, I didn't notice that was the last caller.
diff mbox series

Patch

diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index fa21a53ce0c0..70bba8ddea13 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -886,16 +886,16 @@  static int hpage_collapse_find_target_node(struct collapse_control *cc)
 }
 #endif
 
-static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node,
+static bool hpage_collapse_alloc_folio(struct folio **folio, gfp_t gfp, int node,
 				      nodemask_t *nmask)
 {
-	*hpage = __alloc_pages(gfp, HPAGE_PMD_ORDER, node, nmask);
-	if (unlikely(!*hpage)) {
+	*folio = __folio_alloc(gfp, HPAGE_PMD_ORDER, node, nmask);
+
+	if (unlikely(!*folio)) {
 		count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
 		return false;
 	}
 
-	folio_prep_large_rmappable((struct folio *)*hpage);
 	count_vm_event(THP_COLLAPSE_ALLOC);
 	return true;
 }
@@ -1062,15 +1062,16 @@  static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
 	int node = hpage_collapse_find_target_node(cc);
 	struct folio *folio;
 
-	if (!hpage_collapse_alloc_page(hpage, gfp, node, &cc->alloc_nmask))
+	if (!hpage_collapse_alloc_folio(&folio, gfp, node, &cc->alloc_nmask))
 		return SCAN_ALLOC_HUGE_PAGE_FAIL;
 
-	folio = page_folio(*hpage);
 	if (unlikely(mem_cgroup_charge(folio, mm, gfp))) {
 		folio_put(folio);
 		*hpage = NULL;
 		return SCAN_CGROUP_CHARGE_FAIL;
 	}
+
+	*hpage = folio_page(folio, 0);
 	count_memcg_page_event(*hpage, THP_COLLAPSE_ALLOC);
 
 	return SCAN_SUCCEED;