diff mbox series

[v2,3/5] mm/mglru: Move code around to make future patch easy

Message ID 20230706062044.816068-4-aneesh.kumar@linux.ibm.com (mailing list archive)
State New
Headers show
Series Avoid building lrugen page table walk code | expand

Commit Message

Aneesh Kumar K.V July 6, 2023, 6:20 a.m. UTC
No functional change in this patch.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
---
 mm/vmscan.c | 64 ++++++++++++++++++++++++++---------------------------
 1 file changed, 32 insertions(+), 32 deletions(-)
diff mbox series

Patch

diff --git a/mm/vmscan.c b/mm/vmscan.c
index c5fbc3babcd8..a846a62df0ba 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3556,6 +3556,38 @@  static void reset_batch_size(struct lruvec *lruvec, struct lru_gen_mm_walk *walk
 	}
 }
 
+static struct lru_gen_mm_walk *set_mm_walk(struct pglist_data *pgdat, bool force_alloc)
+{
+	struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk;
+
+	if (pgdat && current_is_kswapd()) {
+		VM_WARN_ON_ONCE(walk);
+
+		walk = &pgdat->mm_walk;
+	} else if (!walk && force_alloc) {
+		VM_WARN_ON_ONCE(current_is_kswapd());
+
+		walk = kzalloc(sizeof(*walk), __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN);
+	}
+
+	current->reclaim_state->mm_walk = walk;
+
+	return walk;
+}
+
+static void clear_mm_walk(void)
+{
+	struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk;
+
+	VM_WARN_ON_ONCE(walk && memchr_inv(walk->nr_pages, 0, sizeof(walk->nr_pages)));
+	VM_WARN_ON_ONCE(walk && memchr_inv(walk->mm_stats, 0, sizeof(walk->mm_stats)));
+
+	current->reclaim_state->mm_walk = NULL;
+
+	if (!current_is_kswapd())
+		kfree(walk);
+}
+
 /******************************************************************************
  *                          Bloom filters
  ******************************************************************************/
@@ -4324,38 +4356,6 @@  static void walk_mm(struct lruvec *lruvec, struct mm_struct *mm, struct lru_gen_
 	} while (err == -EAGAIN);
 }
 
-static struct lru_gen_mm_walk *set_mm_walk(struct pglist_data *pgdat, bool force_alloc)
-{
-	struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk;
-
-	if (pgdat && current_is_kswapd()) {
-		VM_WARN_ON_ONCE(walk);
-
-		walk = &pgdat->mm_walk;
-	} else if (!walk && force_alloc) {
-		VM_WARN_ON_ONCE(current_is_kswapd());
-
-		walk = kzalloc(sizeof(*walk), __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN);
-	}
-
-	current->reclaim_state->mm_walk = walk;
-
-	return walk;
-}
-
-static void clear_mm_walk(void)
-{
-	struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk;
-
-	VM_WARN_ON_ONCE(walk && memchr_inv(walk->nr_pages, 0, sizeof(walk->nr_pages)));
-	VM_WARN_ON_ONCE(walk && memchr_inv(walk->mm_stats, 0, sizeof(walk->mm_stats)));
-
-	current->reclaim_state->mm_walk = NULL;
-
-	if (!current_is_kswapd())
-		kfree(walk);
-}
-
 static bool inc_min_seq(struct lruvec *lruvec, int type, bool can_swap)
 {
 	int zone;