@@ -4491,12 +4491,37 @@ static void inc_max_seq(struct lruvec *lruvec, bool can_swap, bool force_scan)
spin_unlock_irq(&lruvec->lru_lock);
}
+static bool iterate_mm_list_walk(struct lruvec *lruvec, unsigned long max_seq,
+ bool can_swap, bool force_scan)
+{
+ bool success;
+ struct mm_struct *mm = NULL;
+ struct lru_gen_mm_walk *walk;
+
+ walk = set_mm_walk(NULL, true);
+ if (!walk) {
+ success = iterate_mm_list_nowalk(lruvec, max_seq);
+ return success;
+ }
+
+ walk->lruvec = lruvec;
+ walk->max_seq = max_seq;
+ walk->can_swap = can_swap;
+ walk->force_scan = force_scan;
+
+ do {
+ success = iterate_mm_list(lruvec, walk, &mm);
+ if (mm)
+ walk_mm(lruvec, mm, walk);
+ } while (mm);
+
+ return success;
+}
+
static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
struct scan_control *sc, bool can_swap, bool force_scan)
{
bool success;
- struct lru_gen_mm_walk *walk;
- struct mm_struct *mm = NULL;
struct lru_gen_folio *lrugen = &lruvec->lrugen;
VM_WARN_ON_ONCE(max_seq > READ_ONCE(lrugen->max_seq));
@@ -4506,34 +4531,17 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
success = false;
goto done;
}
-
/*
* If the hardware doesn't automatically set the accessed bit, fallback
* to lru_gen_look_around(), which only clears the accessed bit in a
* handful of PTEs. Spreading the work out over a period of time usually
* is less efficient, but it avoids bursty page faults.
*/
- if (!should_walk_mmu()) {
- success = iterate_mm_list_nowalk(lruvec, max_seq);
- goto done;
- }
-
- walk = set_mm_walk(NULL, true);
- if (!walk) {
+ if (!should_walk_mmu())
success = iterate_mm_list_nowalk(lruvec, max_seq);
- goto done;
- }
-
- walk->lruvec = lruvec;
- walk->max_seq = max_seq;
- walk->can_swap = can_swap;
- walk->force_scan = force_scan;
+ else
+ success = iterate_mm_list_walk(lruvec, max_seq, can_swap, force_scan);
- do {
- success = iterate_mm_list(lruvec, walk, &mm);
- if (mm)
- walk_mm(lruvec, mm, walk);
- } while (mm);
done:
if (success)
inc_max_seq(lruvec, can_swap, force_scan);
In a later patch we will not build this on ppc64 architecture. No functional change in this patch. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> --- mm/vmscan.c | 52 ++++++++++++++++++++++++++++++---------------------- 1 file changed, 30 insertions(+), 22 deletions(-)