@@ -379,7 +379,8 @@ static void __lru_cache_activate_folio(struct folio *folio)
}
#ifdef CONFIG_LRU_GEN
-static void folio_inc_refs(struct folio *folio)
+
+static void lru_gen_inc_refs(struct folio *folio)
{
unsigned long new_flags, old_flags = READ_ONCE(folio->flags);
@@ -406,10 +407,30 @@ static void folio_inc_refs(struct folio *folio)
new_flags |= old_flags & ~LRU_REFS_MASK;
} while (!try_cmpxchg(&folio->flags, &old_flags, new_flags));
}
-#else
-static void folio_inc_refs(struct folio *folio)
+
+static bool lru_gen_clear_refs(struct folio *folio)
{
+ int gen = folio_lru_gen(folio);
+ int type = folio_is_file_lru(folio);
+ struct lru_gen_folio *lrugen = &folio_lruvec(folio)->lrugen;
+
+ set_mask_bits(&folio->flags, LRU_REFS_MASK | LRU_REFS_FLAGS, 0);
+
+ /* whether can do without shuffling under the LRU lock */
+ return gen == lru_gen_from_seq(READ_ONCE(lrugen->min_seq[type]));
}
+
+#else /* !CONFIG_LRU_GEN */
+
+static void lru_gen_inc_refs(struct folio *folio)
+{
+}
+
+static bool lru_gen_clear_refs(struct folio *folio)
+{
+ return false;
+}
+
#endif /* CONFIG_LRU_GEN */
/**
@@ -428,7 +449,7 @@ static void folio_inc_refs(struct folio *folio)
void folio_mark_accessed(struct folio *folio)
{
if (lru_gen_enabled()) {
- folio_inc_refs(folio);
+ lru_gen_inc_refs(folio);
return;
}
@@ -524,7 +545,7 @@ void folio_add_lru_vma(struct folio *folio, struct vm_area_struct *vma)
*/
static void lru_deactivate_file(struct lruvec *lruvec, struct folio *folio)
{
- bool active = folio_test_active(folio);
+ bool active = folio_test_active(folio) || lru_gen_enabled();
long nr_pages = folio_nr_pages(folio);
if (folio_test_unevictable(folio))
@@ -589,7 +610,10 @@ static void lru_lazyfree(struct lruvec *lruvec, struct folio *folio)
lruvec_del_folio(lruvec, folio);
folio_clear_active(folio);
- folio_clear_referenced(folio);
+ if (lru_gen_enabled())
+ lru_gen_clear_refs(folio);
+ else
+ folio_clear_referenced(folio);
/*
* Lazyfree folios are clean anonymous folios. They have
* the swapbacked flag cleared, to distinguish them from normal
@@ -657,6 +681,9 @@ void deactivate_file_folio(struct folio *folio)
if (folio_test_unevictable(folio))
return;
+ if (lru_gen_enabled() && lru_gen_clear_refs(folio))
+ return;
+
folio_batch_add_and_move(folio, lru_deactivate_file, true);
}
@@ -670,7 +697,10 @@ void deactivate_file_folio(struct folio *folio)
*/
void folio_deactivate(struct folio *folio)
{
- if (folio_test_unevictable(folio) || !(folio_test_active(folio) || lru_gen_enabled()))
+ if (folio_test_unevictable(folio))
+ return;
+
+ if (lru_gen_enabled() ? lru_gen_clear_refs(folio) : !folio_test_active(folio))
return;
folio_batch_add_and_move(folio, lru_deactivate, true);