diff mbox series

[v2,4/8] mm: migrate: use a folio in migrate_misplaced_page()

Message ID 20230821115624.158759-5-wangkefeng.wang@huawei.com (mailing list archive)
State New
Headers show
Series mm: migrate: more folio conversion and unify | expand

Commit Message

Kefeng Wang Aug. 21, 2023, 11:56 a.m. UTC
Use a folio in migrate_misplaced_page() to save compound_head() calls.

Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
 mm/migrate.c | 23 ++++++++++++-----------
 1 file changed, 12 insertions(+), 11 deletions(-)

Comments

Zi Yan Aug. 28, 2023, 2:10 p.m. UTC | #1
On 21 Aug 2023, at 7:56, Kefeng Wang wrote:

> Use a folio in migrate_misplaced_page() to save compound_head() calls.
>
> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
> ---
>  mm/migrate.c | 23 ++++++++++++-----------
>  1 file changed, 12 insertions(+), 11 deletions(-)

LGTM. And a comment below. Reveiwed-by: Zi Yan <ziy@nvidia.com>

>
> diff --git a/mm/migrate.c b/mm/migrate.c
> index 281eafdf8e63..fc728f9a383f 100644
> --- a/mm/migrate.c
> +++ b/mm/migrate.c
> @@ -2521,17 +2521,18 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
>  			   int node)
>  {
>  	pg_data_t *pgdat = NODE_DATA(node);
> +	struct folio *folio = page_folio(page);
>  	int isolated;
>  	int nr_remaining;
>  	unsigned int nr_succeeded;
>  	LIST_HEAD(migratepages);
> -	int nr_pages = thp_nr_pages(page);
> +	int nr_pages = folio_nr_pages(folio);
>
>  	/*
>  	 * Don't migrate file pages that are mapped in multiple processes
>  	 * with execute permissions as they are probably shared libraries.
>  	 */
> -	if (page_mapcount(page) != 1 && page_is_file_lru(page) &&
> +	if (page_mapcount(page) != 1 && folio_is_file_lru(folio) &&

page_mapcount() is not converted, since folio_mapcount() is not equivalent
to page_mapcount(). It can be converted and this function can be converted
to migrate_misplaced_folio() once we have something like folio_num_sharers().

>  	    (vma->vm_flags & VM_EXEC))
>  		goto out;
>
> @@ -2539,29 +2540,29 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
>  	 * Also do not migrate dirty pages as not all filesystems can move
>  	 * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles.
>  	 */
> -	if (page_is_file_lru(page) && PageDirty(page))
> +	if (folio_is_file_lru(folio) && folio_test_dirty(folio))
>  		goto out;
>
> -	isolated = numamigrate_isolate_folio(pgdat, page_folio(page));
> +	isolated = numamigrate_isolate_folio(pgdat, folio);
>  	if (!isolated)
>  		goto out;
>
> -	list_add(&page->lru, &migratepages);
> +	list_add(&folio->lru, &migratepages);
>  	nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio,
>  				     NULL, node, MIGRATE_ASYNC,
>  				     MR_NUMA_MISPLACED, &nr_succeeded);
>  	if (nr_remaining) {
>  		if (!list_empty(&migratepages)) {
> -			list_del(&page->lru);
> -			mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
> -					page_is_file_lru(page), -nr_pages);
> -			putback_lru_page(page);
> +			list_del(&folio->lru);
> +			node_stat_mod_folio(folio, NR_ISOLATED_ANON +
> +					folio_is_file_lru(folio), -nr_pages);
> +			folio_putback_lru(folio);
>  		}
>  		isolated = 0;
>  	}
>  	if (nr_succeeded) {
>  		count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
> -		if (!node_is_toptier(page_to_nid(page)) && node_is_toptier(node))
> +		if (!node_is_toptier(folio_nid(folio)) && node_is_toptier(node))
>  			mod_node_page_state(pgdat, PGPROMOTE_SUCCESS,
>  					    nr_succeeded);
>  	}
> @@ -2569,7 +2570,7 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
>  	return isolated;
>
>  out:
> -	put_page(page);
> +	folio_put(folio);
>  	return 0;
>  }
>  #endif /* CONFIG_NUMA_BALANCING */
> -- 
> 2.41.0


--
Best Regards,
Yan, Zi
Huang, Ying Aug. 29, 2023, 12:49 a.m. UTC | #2
Zi Yan <ziy@nvidia.com> writes:

> On 21 Aug 2023, at 7:56, Kefeng Wang wrote:
>
>> Use a folio in migrate_misplaced_page() to save compound_head() calls.
>>
>> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
>> ---
>>  mm/migrate.c | 23 ++++++++++++-----------
>>  1 file changed, 12 insertions(+), 11 deletions(-)
>
> LGTM. And a comment below. Reveiwed-by: Zi Yan <ziy@nvidia.com>
>
>>
>> diff --git a/mm/migrate.c b/mm/migrate.c
>> index 281eafdf8e63..fc728f9a383f 100644
>> --- a/mm/migrate.c
>> +++ b/mm/migrate.c
>> @@ -2521,17 +2521,18 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
>>  			   int node)
>>  {
>>  	pg_data_t *pgdat = NODE_DATA(node);
>> +	struct folio *folio = page_folio(page);
>>  	int isolated;
>>  	int nr_remaining;
>>  	unsigned int nr_succeeded;
>>  	LIST_HEAD(migratepages);
>> -	int nr_pages = thp_nr_pages(page);
>> +	int nr_pages = folio_nr_pages(folio);
>>
>>  	/*
>>  	 * Don't migrate file pages that are mapped in multiple processes
>>  	 * with execute permissions as they are probably shared libraries.
>>  	 */
>> -	if (page_mapcount(page) != 1 && page_is_file_lru(page) &&
>> +	if (page_mapcount(page) != 1 && folio_is_file_lru(folio) &&
>
> page_mapcount() is not converted, since folio_mapcount() is not equivalent
> to page_mapcount(). It can be converted and this function can be converted
> to migrate_misplaced_folio() once we have something like folio_num_sharers().

It seems that we can use folio_estimated_sharers() here.

--
Best Regards,
Huang, Ying

>>  	    (vma->vm_flags & VM_EXEC))
>>  		goto out;
>>
>> @@ -2539,29 +2540,29 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
>>  	 * Also do not migrate dirty pages as not all filesystems can move
>>  	 * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles.
>>  	 */
>> -	if (page_is_file_lru(page) && PageDirty(page))
>> +	if (folio_is_file_lru(folio) && folio_test_dirty(folio))
>>  		goto out;
>>
>> -	isolated = numamigrate_isolate_folio(pgdat, page_folio(page));
>> +	isolated = numamigrate_isolate_folio(pgdat, folio);
>>  	if (!isolated)
>>  		goto out;
>>
>> -	list_add(&page->lru, &migratepages);
>> +	list_add(&folio->lru, &migratepages);
>>  	nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio,
>>  				     NULL, node, MIGRATE_ASYNC,
>>  				     MR_NUMA_MISPLACED, &nr_succeeded);
>>  	if (nr_remaining) {
>>  		if (!list_empty(&migratepages)) {
>> -			list_del(&page->lru);
>> -			mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
>> -					page_is_file_lru(page), -nr_pages);
>> -			putback_lru_page(page);
>> +			list_del(&folio->lru);
>> +			node_stat_mod_folio(folio, NR_ISOLATED_ANON +
>> +					folio_is_file_lru(folio), -nr_pages);
>> +			folio_putback_lru(folio);
>>  		}
>>  		isolated = 0;
>>  	}
>>  	if (nr_succeeded) {
>>  		count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
>> -		if (!node_is_toptier(page_to_nid(page)) && node_is_toptier(node))
>> +		if (!node_is_toptier(folio_nid(folio)) && node_is_toptier(node))
>>  			mod_node_page_state(pgdat, PGPROMOTE_SUCCESS,
>>  					    nr_succeeded);
>>  	}
>> @@ -2569,7 +2570,7 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
>>  	return isolated;
>>
>>  out:
>> -	put_page(page);
>> +	folio_put(folio);
>>  	return 0;
>>  }
>>  #endif /* CONFIG_NUMA_BALANCING */
>> -- 
>> 2.41.0
>
>
> --
> Best Regards,
> Yan, Zi
Matthew Wilcox Aug. 29, 2023, 2:10 a.m. UTC | #3
On Tue, Aug 29, 2023 at 08:49:31AM +0800, Huang, Ying wrote:
> Zi Yan <ziy@nvidia.com> writes:
> 
> > On 21 Aug 2023, at 7:56, Kefeng Wang wrote:
> >
> >> Use a folio in migrate_misplaced_page() to save compound_head() calls.
> >>
> >> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
> >> ---
> >>  mm/migrate.c | 23 ++++++++++++-----------
> >>  1 file changed, 12 insertions(+), 11 deletions(-)
> >
> > LGTM. And a comment below. Reveiwed-by: Zi Yan <ziy@nvidia.com>
> >
> >>
> >> diff --git a/mm/migrate.c b/mm/migrate.c
> >> index 281eafdf8e63..fc728f9a383f 100644
> >> --- a/mm/migrate.c
> >> +++ b/mm/migrate.c
> >> @@ -2521,17 +2521,18 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
> >>  			   int node)
> >>  {
> >>  	pg_data_t *pgdat = NODE_DATA(node);
> >> +	struct folio *folio = page_folio(page);
> >>  	int isolated;
> >>  	int nr_remaining;
> >>  	unsigned int nr_succeeded;
> >>  	LIST_HEAD(migratepages);
> >> -	int nr_pages = thp_nr_pages(page);
> >> +	int nr_pages = folio_nr_pages(folio);
> >>
> >>  	/*
> >>  	 * Don't migrate file pages that are mapped in multiple processes
> >>  	 * with execute permissions as they are probably shared libraries.
> >>  	 */
> >> -	if (page_mapcount(page) != 1 && page_is_file_lru(page) &&
> >> +	if (page_mapcount(page) != 1 && folio_is_file_lru(folio) &&
> >
> > page_mapcount() is not converted, since folio_mapcount() is not equivalent
> > to page_mapcount(). It can be converted and this function can be converted
> > to migrate_misplaced_folio() once we have something like folio_num_sharers().
> 
> It seems that we can use folio_estimated_sharers() here.

So, funny thing, page_mapcount() was always wrong here.  We have two
callers, do_huge_pmd_numa_page() and do_numa_page().  do_numa_page()
has a check for PageCompound() (and /* TODO: handle PTE-mapped THP */).
do_huge_pmd_numa_page() returns pfn_to_page(), after it got the pfn
fromm pmd_pfn(pmd).

That makes folio_estimated_sharers() an improvement, possibly even
a bugfix.  Also, we should look at removing the PageCompound() check
in do_numa_page().
Kefeng Wang Aug. 30, 2023, 9:45 a.m. UTC | #4
On 2023/8/29 10:10, Matthew Wilcox wrote:
> On Tue, Aug 29, 2023 at 08:49:31AM +0800, Huang, Ying wrote:
>> Zi Yan <ziy@nvidia.com> writes:
>>
>>> On 21 Aug 2023, at 7:56, Kefeng Wang wrote:
>>>
>>>> Use a folio in migrate_misplaced_page() to save compound_head() calls.
>>>>
>>>> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
>>>> ---
>>>>   mm/migrate.c | 23 ++++++++++++-----------
>>>>   1 file changed, 12 insertions(+), 11 deletions(-)
>>>
>>> LGTM. And a comment below. Reveiwed-by: Zi Yan <ziy@nvidia.com>
>>>
>>>>
>>>> diff --git a/mm/migrate.c b/mm/migrate.c
>>>> index 281eafdf8e63..fc728f9a383f 100644
>>>> --- a/mm/migrate.c
>>>> +++ b/mm/migrate.c
>>>> @@ -2521,17 +2521,18 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
>>>>   			   int node)
>>>>   {
>>>>   	pg_data_t *pgdat = NODE_DATA(node);
>>>> +	struct folio *folio = page_folio(page);
>>>>   	int isolated;
>>>>   	int nr_remaining;
>>>>   	unsigned int nr_succeeded;
>>>>   	LIST_HEAD(migratepages);
>>>> -	int nr_pages = thp_nr_pages(page);
>>>> +	int nr_pages = folio_nr_pages(folio);
>>>>
>>>>   	/*
>>>>   	 * Don't migrate file pages that are mapped in multiple processes
>>>>   	 * with execute permissions as they are probably shared libraries.
>>>>   	 */
>>>> -	if (page_mapcount(page) != 1 && page_is_file_lru(page) &&
>>>> +	if (page_mapcount(page) != 1 && folio_is_file_lru(folio) &&
>>>
>>> page_mapcount() is not converted, since folio_mapcount() is not equivalent
>>> to page_mapcount(). It can be converted and this function can be converted
>>> to migrate_misplaced_folio() once we have something like folio_num_sharers().
>>
>> It seems that we can use folio_estimated_sharers() here.
> 
> So, funny thing, page_mapcount() was always wrong here.  We have two
> callers, do_huge_pmd_numa_page() and do_numa_page().  do_numa_page()
> has a check for PageCompound() (and /* TODO: handle PTE-mapped THP */).
> do_huge_pmd_numa_page() returns pfn_to_page(), after it got the pfn
> fromm pmd_pfn(pmd).

for PMD-mapped page,do_huge_pmd_numa_page() return head page

    unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
    page = vm_normal_page_pmd(vma, haddr, pmd);

do_numa_page() only handle base page with PageCompound() check, so we
could use folio_estimated_sharers(), I am not clear why page_mapcount
is wrong here, could you explain more, thank.

> 
> That makes folio_estimated_sharers() an improvement, possibly even
> a bugfix.  Also, we should look at removing the PageCompound() check
> in do_numa_page().

Yes, for pte-mapped thp and the large folio numa migrate support, we
need remove the check, we could do it in another patchset.

> 
>
diff mbox series

Patch

diff --git a/mm/migrate.c b/mm/migrate.c
index 281eafdf8e63..fc728f9a383f 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2521,17 +2521,18 @@  int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
 			   int node)
 {
 	pg_data_t *pgdat = NODE_DATA(node);
+	struct folio *folio = page_folio(page);
 	int isolated;
 	int nr_remaining;
 	unsigned int nr_succeeded;
 	LIST_HEAD(migratepages);
-	int nr_pages = thp_nr_pages(page);
+	int nr_pages = folio_nr_pages(folio);
 
 	/*
 	 * Don't migrate file pages that are mapped in multiple processes
 	 * with execute permissions as they are probably shared libraries.
 	 */
-	if (page_mapcount(page) != 1 && page_is_file_lru(page) &&
+	if (page_mapcount(page) != 1 && folio_is_file_lru(folio) &&
 	    (vma->vm_flags & VM_EXEC))
 		goto out;
 
@@ -2539,29 +2540,29 @@  int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
 	 * Also do not migrate dirty pages as not all filesystems can move
 	 * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles.
 	 */
-	if (page_is_file_lru(page) && PageDirty(page))
+	if (folio_is_file_lru(folio) && folio_test_dirty(folio))
 		goto out;
 
-	isolated = numamigrate_isolate_folio(pgdat, page_folio(page));
+	isolated = numamigrate_isolate_folio(pgdat, folio);
 	if (!isolated)
 		goto out;
 
-	list_add(&page->lru, &migratepages);
+	list_add(&folio->lru, &migratepages);
 	nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio,
 				     NULL, node, MIGRATE_ASYNC,
 				     MR_NUMA_MISPLACED, &nr_succeeded);
 	if (nr_remaining) {
 		if (!list_empty(&migratepages)) {
-			list_del(&page->lru);
-			mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
-					page_is_file_lru(page), -nr_pages);
-			putback_lru_page(page);
+			list_del(&folio->lru);
+			node_stat_mod_folio(folio, NR_ISOLATED_ANON +
+					folio_is_file_lru(folio), -nr_pages);
+			folio_putback_lru(folio);
 		}
 		isolated = 0;
 	}
 	if (nr_succeeded) {
 		count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
-		if (!node_is_toptier(page_to_nid(page)) && node_is_toptier(node))
+		if (!node_is_toptier(folio_nid(folio)) && node_is_toptier(node))
 			mod_node_page_state(pgdat, PGPROMOTE_SUCCESS,
 					    nr_succeeded);
 	}
@@ -2569,7 +2570,7 @@  int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
 	return isolated;
 
 out:
-	put_page(page);
+	folio_put(folio);
 	return 0;
 }
 #endif /* CONFIG_NUMA_BALANCING */