diff mbox series

[4/4] mm: migrate: change to return the number of pages migrated successfully

Message ID ee51fd4effeb8c398566a766ada2da475320b400.1692440586.git.baolin.wang@linux.alibaba.com (mailing list archive)
State New
Headers show
Series Extend migrate_misplaced_page() to support batch migration | expand

Commit Message

Baolin Wang Aug. 19, 2023, 10:52 a.m. UTC
Change the migrate_misplaced_page() to return the number of pages migrated
successfully, which is used to calculate how many pages are failed to
migrate for batch migration.

Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
---
 mm/huge_memory.c | 7 ++++---
 mm/memory.c      | 5 +++--
 mm/migrate.c     | 5 +----
 3 files changed, 8 insertions(+), 9 deletions(-)
diff mbox series

Patch

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 53a9d63cfb1e..a9c454160984 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1494,11 +1494,12 @@  vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
 	int page_nid = NUMA_NO_NODE;
 	int target_nid, last_cpupid = (-1 & LAST_CPUPID_MASK);
-	bool migrated = false, writable = false;
+	bool writable = false;
 	int flags = 0;
 	pg_data_t *pgdat;
 	int isolated;
 	LIST_HEAD(migratepages);
+	int nr_successed = 0;
 
 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
 	if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
@@ -1551,8 +1552,8 @@  vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
 	}
 
 	list_add(&page->lru, &migratepages);
-	migrated = migrate_misplaced_page(&migratepages, vma, page_nid, target_nid);
-	if (migrated) {
+	nr_successed = migrate_misplaced_page(&migratepages, vma, page_nid, target_nid);
+	if (nr_successed) {
 		flags |= TNF_MIGRATED;
 		page_nid = target_nid;
 	} else {
diff --git a/mm/memory.c b/mm/memory.c
index 973403a83797..edfd2d528e7e 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4780,7 +4780,7 @@  static vm_fault_t do_numa_page(struct vm_fault *vmf)
 	pte_t pte, old_pte;
 	int flags = 0;
 	pg_data_t *pgdat;
-	int isolated;
+	int isolated, nr_succeeded;
 	LIST_HEAD(migratepages);
 
 	/*
@@ -4861,7 +4861,8 @@  static vm_fault_t do_numa_page(struct vm_fault *vmf)
 
 	list_add(&page->lru, &migratepages);
 	/* Migrate to the requested node */
-	if (migrate_misplaced_page(&migratepages, vma, page_nid, target_nid)) {
+	nr_succeeded = migrate_misplaced_page(&migratepages, vma, page_nid, target_nid);
+	if (nr_succeeded) {
 		page_nid = target_nid;
 		flags |= TNF_MIGRATED;
 	} else {
diff --git a/mm/migrate.c b/mm/migrate.c
index 93d359471b95..45f92376ba6f 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2523,7 +2523,6 @@  int migrate_misplaced_page(struct list_head *migratepages, struct vm_area_struct
 			   int source_nid, int target_nid)
 {
 	pg_data_t *pgdat = NODE_DATA(target_nid);
-	int migrated = 1;
 	int nr_remaining;
 	unsigned int nr_succeeded;
 
@@ -2533,8 +2532,6 @@  int migrate_misplaced_page(struct list_head *migratepages, struct vm_area_struct
 	if (nr_remaining) {
 		if (!list_empty(migratepages))
 			putback_movable_pages(migratepages);
-
-		migrated = 0;
 	}
 	if (nr_succeeded) {
 		count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
@@ -2543,7 +2540,7 @@  int migrate_misplaced_page(struct list_head *migratepages, struct vm_area_struct
 					    nr_succeeded);
 	}
 	BUG_ON(!list_empty(migratepages));
-	return migrated;
+	return nr_succeeded;
 }
 #endif /* CONFIG_NUMA_BALANCING */
 #endif /* CONFIG_NUMA */