diff mbox series

[v2,1/5] mm/mglru: Create a new helper iterate_mm_list_walk

Message ID 20230706062044.816068-2-aneesh.kumar@linux.ibm.com (mailing list archive)
State New
Headers show
Series Avoid building lrugen page table walk code | expand

Commit Message

Aneesh Kumar K.V July 6, 2023, 6:20 a.m. UTC
In a later patch we will not build this on ppc64 architecture.
No functional change in this patch.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
---
 mm/vmscan.c | 52 ++++++++++++++++++++++++++++++----------------------
 1 file changed, 30 insertions(+), 22 deletions(-)
diff mbox series

Patch

diff --git a/mm/vmscan.c b/mm/vmscan.c
index eb23bb1afc64..3b183f704d5d 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -4491,12 +4491,37 @@  static void inc_max_seq(struct lruvec *lruvec, bool can_swap, bool force_scan)
 	spin_unlock_irq(&lruvec->lru_lock);
 }
 
+static bool iterate_mm_list_walk(struct lruvec *lruvec, unsigned long max_seq,
+				 bool can_swap, bool force_scan)
+{
+	bool success;
+	struct mm_struct *mm = NULL;
+	struct lru_gen_mm_walk *walk;
+
+	walk = set_mm_walk(NULL, true);
+	if (!walk) {
+		success = iterate_mm_list_nowalk(lruvec, max_seq);
+		return success;
+	}
+
+	walk->lruvec = lruvec;
+	walk->max_seq = max_seq;
+	walk->can_swap = can_swap;
+	walk->force_scan = force_scan;
+
+	do {
+		success = iterate_mm_list(lruvec, walk, &mm);
+		if (mm)
+			walk_mm(lruvec, mm, walk);
+	} while (mm);
+
+	return success;
+}
+
 static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
 			       struct scan_control *sc, bool can_swap, bool force_scan)
 {
 	bool success;
-	struct lru_gen_mm_walk *walk;
-	struct mm_struct *mm = NULL;
 	struct lru_gen_folio *lrugen = &lruvec->lrugen;
 
 	VM_WARN_ON_ONCE(max_seq > READ_ONCE(lrugen->max_seq));
@@ -4506,34 +4531,17 @@  static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
 		success = false;
 		goto done;
 	}
-
 	/*
 	 * If the hardware doesn't automatically set the accessed bit, fallback
 	 * to lru_gen_look_around(), which only clears the accessed bit in a
 	 * handful of PTEs. Spreading the work out over a period of time usually
 	 * is less efficient, but it avoids bursty page faults.
 	 */
-	if (!should_walk_mmu()) {
-		success = iterate_mm_list_nowalk(lruvec, max_seq);
-		goto done;
-	}
-
-	walk = set_mm_walk(NULL, true);
-	if (!walk) {
+	if (!should_walk_mmu())
 		success = iterate_mm_list_nowalk(lruvec, max_seq);
-		goto done;
-	}
-
-	walk->lruvec = lruvec;
-	walk->max_seq = max_seq;
-	walk->can_swap = can_swap;
-	walk->force_scan = force_scan;
+	else
+		success = iterate_mm_list_walk(lruvec, max_seq, can_swap, force_scan);
 
-	do {
-		success = iterate_mm_list(lruvec, walk, &mm);
-		if (mm)
-			walk_mm(lruvec, mm, walk);
-	} while (mm);
 done:
 	if (success)
 		inc_max_seq(lruvec, can_swap, force_scan);