@@ -588,20 +588,20 @@ void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
* Copy NUMA information to the new page, to prevent over-eager
* future migrations of this same page.
*/
- cpupid = page_cpupid_xchg_last(&folio->page, -1);
+ cpupid = folio_xchg_last_cpupid(folio, -1);
/*
* For memory tiering mode, when migrate between slow and fast
* memory node, reset cpupid, because that is used to record
* page access time in slow memory node.
*/
if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) {
- bool f_toptier = node_is_toptier(page_to_nid(&folio->page));
- bool t_toptier = node_is_toptier(page_to_nid(&newfolio->page));
+ bool f_toptier = node_is_toptier(folio_nid(folio));
+ bool t_toptier = node_is_toptier(folio_nid(newfolio));
if (f_toptier != t_toptier)
cpupid = -1;
}
- page_cpupid_xchg_last(&newfolio->page, cpupid);
+ folio_xchg_last_cpupid(newfolio, cpupid);
folio_migrate_ksm(newfolio, folio);
/*
Convert to use folio_xchg_last_cpupid() in folio_migrate_flags(), also directly use folio_nid() instead of page_to_nid(&folio->page). Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> --- mm/migrate.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-)