@@ -3122,8 +3122,45 @@ static int folio_update_gen(struct folio *folio, int gen)
*/
struct lru_gen_inc_batch {
int delta;
+ struct folio *head, *tail;
};
+static inline void lru_gen_inc_bulk_done(struct lru_gen_folio *lrugen,
+ int bulk_gen, bool type, int zone,
+ struct lru_gen_inc_batch *batch)
+{
+ if (!batch->head)
+ return;
+
+ list_bulk_move_tail(&lrugen->folios[bulk_gen][type][zone],
+ &batch->head->lru,
+ &batch->tail->lru);
+
+ batch->head = NULL;
+}
+
+/*
+ * When aging, protected pages will go to the tail of the same higher
+ * gen, so the can be moved in batches. Besides reduced overhead, this
+ * also avoids changing their LRU order in a small scope.
+ */
+static inline void lru_gen_try_bulk_move(struct lru_gen_folio *lrugen, struct folio *folio,
+ int bulk_gen, int new_gen, bool type, int zone,
+ struct lru_gen_inc_batch *batch)
+{
+ /*
+ * If folio not moving to the bulk_gen, it's raced with promotion
+ * so it need to go to the head of another LRU.
+ */
+ if (bulk_gen != new_gen)
+ list_move(&folio->lru, &lrugen->folios[new_gen][type][zone]);
+
+ if (!batch->head)
+ batch->tail = folio;
+
+ batch->head = folio;
+}
+
static void lru_gen_inc_batch_done(struct lruvec *lruvec, int gen, int type, int zone,
struct lru_gen_inc_batch *batch)
{
@@ -3132,6 +3169,8 @@ static void lru_gen_inc_batch_done(struct lruvec *lruvec, int gen, int type, int
struct lru_gen_folio *lrugen = &lruvec->lrugen;
enum lru_list lru = type ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON;
+ lru_gen_inc_bulk_done(lrugen, new_gen, type, zone, batch);
+
if (!delta)
return;
@@ -3709,6 +3748,7 @@ static bool inc_min_seq(struct lruvec *lruvec, int type, bool can_swap)
struct lru_gen_inc_batch batch = { };
struct lru_gen_folio *lrugen = &lruvec->lrugen;
int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]);
+ int bulk_gen = (old_gen + 1) % MAX_NR_GENS;
if (type == LRU_GEN_ANON && !can_swap)
goto done;
@@ -3737,7 +3777,7 @@ static bool inc_min_seq(struct lruvec *lruvec, int type, bool can_swap)
}
new_gen = folio_inc_gen(folio, old_gen, false, &batch);
- list_move_tail(&folio->lru, &lrugen->folios[new_gen][type][zone]);
+ lru_gen_try_bulk_move(lrugen, folio, bulk_gen, new_gen, type, zone, &batch);
if (!--remaining) {
lru_gen_inc_batch_done(lruvec, old_gen, type, zone, &batch);
@@ -4275,6 +4315,7 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_c
int tier = lru_tier_from_refs(refs);
struct lru_gen_folio *lrugen = &lruvec->lrugen;
int old_gen = lru_gen_from_seq(lrugen->min_seq[type]);
+ int bulk_gen = (old_gen + 1) % MAX_NR_GENS;
VM_WARN_ON_ONCE_FOLIO(gen >= MAX_NR_GENS, folio);
@@ -4308,7 +4349,7 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_c
int hist = lru_hist_from_seq(lrugen->min_seq[type]);
gen = folio_inc_gen(folio, old_gen, false, batch);
- list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
+ lru_gen_try_bulk_move(lrugen, folio, bulk_gen, gen, type, zone, batch);
WRITE_ONCE(lrugen->protected[hist][type][tier - 1],
lrugen->protected[hist][type][tier - 1] + delta);
@@ -4318,7 +4359,7 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_c
/* ineligible */
if (zone > sc->reclaim_idx || skip_cma(folio, sc)) {
gen = folio_inc_gen(folio, old_gen, false, batch);
- list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
+ lru_gen_try_bulk_move(lrugen, folio, bulk_gen, gen, type, zone, batch);
return true;
}