new file mode 100644
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef _LINUX_PAGE_AGING_H
+#define _LINUX_PAGE_AGING_H
+
+#ifndef arch_supports_page_access_count
+static inline bool arch_supports_page_access_count(void)
+{
+ return false;
+}
+#endif
+
+#ifdef CONFIG_LRU_GEN
+#ifndef arch_get_lru_gen_seq
+static inline unsigned long arch_get_lru_gen_seq(struct lruvec *lruvec, struct folio *folio)
+{
+ int type = folio_is_file_lru(folio);
+
+ return lruvec->lrugen.min_seq[type];
+}
+#endif
+#endif /* CONFIG_LRU_GEN */
+
+#endif
+
+
@@ -74,6 +74,7 @@
#include <linux/memremap.h>
#include <linux/userfaultfd_k.h>
#include <linux/mm_inline.h>
+#include <linux/page_aging.h>
#include <asm/tlbflush.h>
@@ -825,7 +826,8 @@ static bool folio_referenced_one(struct folio *folio,
if (pvmw.pte) {
if (lru_gen_enabled() && pte_young(*pvmw.pte) &&
!(vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ))) {
- lru_gen_look_around(&pvmw);
+ if (!arch_supports_page_access_count())
+ lru_gen_look_around(&pvmw);
referenced++;
}
@@ -62,6 +62,7 @@
#include <linux/swapops.h>
#include <linux/balloon_compaction.h>
#include <linux/sched/sysctl.h>
+#include <linux/page_aging.h>
#include "internal.h"
#include "swap.h"
@@ -4934,7 +4935,7 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
static bool sort_folio(struct lruvec *lruvec, struct folio *folio, int tier_idx)
{
bool success;
- int gen = folio_lru_gen(folio);
+ int gen;
int type = folio_is_file_lru(folio);
int zone = folio_zonenum(folio);
int delta = folio_nr_pages(folio);
@@ -4942,7 +4943,6 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, int tier_idx)
int tier = lru_tier_from_refs(refs);
struct lru_gen_struct *lrugen = &lruvec->lrugen;
- VM_WARN_ON_ONCE_FOLIO(gen >= MAX_NR_GENS, folio);
/* unevictable */
if (!folio_evictable(folio)) {
@@ -4963,8 +4963,14 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, int tier_idx)
return true;
}
- /* promoted */
+ if (!arch_supports_page_access_count()) {
+ gen = folio_lru_gen(folio);
+ VM_WARN_ON_ONCE_FOLIO(gen >= MAX_NR_GENS, folio);
+ } else
+ gen = lru_gen_from_seq(arch_get_lru_gen_seq(lruvec, folio));
+
if (gen != lru_gen_from_seq(lrugen->min_seq[type])) {
+ /* promote the folio */
list_move(&folio->lru, &lrugen->lists[gen][type][zone]);
return true;
}
@@ -5464,12 +5470,22 @@ bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaimi
*/
if (folio_test_active(folio))
seq = lrugen->max_seq;
- else if ((type == LRU_GEN_ANON && !folio_test_swapcache(folio)) ||
- (folio_test_reclaim(folio) &&
- (folio_test_dirty(folio) || folio_test_writeback(folio))))
- seq = lrugen->min_seq[type] + 1;
- else
- seq = lrugen->min_seq[type];
+ else {
+ /*
+ * For a non active folio use the arch based
+ * aging details to derive the MGLRU generation.
+ */
+ seq = arch_get_lru_gen_seq(lruvec, folio);
+
+ if (seq == lrugen->min_seq[type]) {
+ if ((type == LRU_GEN_ANON &&
+ !folio_test_swapcache(folio)) ||
+ (folio_test_reclaim(folio) &&
+ (folio_test_dirty(folio) ||
+ folio_test_writeback(folio))))
+ seq = lrugen->min_seq[type] + 1;
+ }
+ }
gen = lru_gen_from_seq(seq);
flags = (gen + 1UL) << LRU_GEN_PGOFF;
Some architectures can handle different methods for determining page access count. In such case, we may not really need to use page flags for tracking generation. We can possibly derive generation directly from arch-supported access count values. Hence avoid using page flags to store generation in that case. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> --- include/linux/page_aging.h | 26 ++++++++++++++++++++++++++ mm/rmap.c | 4 +++- mm/vmscan.c | 34 +++++++++++++++++++++++++--------- 3 files changed, 54 insertions(+), 10 deletions(-) create mode 100644 include/linux/page_aging.h