@@ -283,17 +283,26 @@ static void show_vma_header_prefix(struct seq_file *m,
unsigned long start, end;
dev_t dev = 0;
const char *name = NULL;
+ long nrpages = 0, gen = 0, tier = 0;
if (file) {
struct inode *inode = file_inode(vma->vm_file);
dev = inode->i_sb->s_dev;
ino = inode->i_ino;
pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
+ nrpages = inode->i_mapping->nrpages;
+ gen = atomic_long_read(&inode->i_mapping->gen);
+ tier = atomic_long_read(&inode->i_mapping->tier);
}
start = vma->vm_start;
end = vma->vm_end;
show_vma_header_prefix(m, start, end, flags, pgoff, dev, ino);
+
+ seq_put_hex_ll(m, NULL, nrpages, 8);
+ seq_put_hex_ll(m, ":", gen, 8);
+ seq_put_hex_ll(m, ":", tier, 8);
+
if (mm)
anon_name = anon_vma_name(vma);
@@ -434,6 +434,8 @@ struct address_space {
struct rb_root_cached i_mmap;
struct rw_semaphore i_mmap_rwsem;
unsigned long nrpages;
+ atomic_long_t gen;
+ atomic_long_t tier;
pgoff_t writeback_index;
const struct address_space_operations *a_ops;
unsigned long flags;
@@ -307,6 +307,20 @@ static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio,
return false;
}
+static inline int lru_tier_from_refs(int refs)
+{
+ return 0;
+}
+
+static inline int folio_lru_refs(struct folio *folio)
+{
+ return 0;
+}
+
+static inline int folio_lru_gen(struct folio *folio)
+{
+ return 0;
+}
#endif /* CONFIG_LRU_GEN */
static __always_inline
@@ -45,6 +45,7 @@
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include "internal.h"
+#include <linux/mm_inline.h>
#define CREATE_TRACE_POINTS
#include <trace/events/filemap.h>
@@ -126,6 +127,9 @@ static void page_cache_delete(struct address_space *mapping,
{
XA_STATE(xas, &mapping->i_pages, folio->index);
long nr = 1;
+ int refs = folio_lru_refs(folio);
+ int tier = lru_tier_from_refs(refs);
+ int gen = folio_lru_gen(folio);
mapping_set_update(&xas, mapping);
@@ -143,6 +147,8 @@ static void page_cache_delete(struct address_space *mapping,
folio->mapping = NULL;
/* Leave page->index set: truncation lookup relies upon it */
mapping->nrpages -= nr;
+ atomic_long_sub(gen, &mapping->gen);
+ atomic_long_sub(tier, &mapping->tier);
}
static void filemap_unaccount_folio(struct address_space *mapping,
@@ -844,6 +850,9 @@ noinline int __filemap_add_folio(struct address_space *mapping,
int huge = folio_test_hugetlb(folio);
bool charged = false;
long nr = 1;
+ int refs = folio_lru_refs(folio);
+ int tier = lru_tier_from_refs(refs);
+ int gen = folio_lru_gen(folio);
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
VM_BUG_ON_FOLIO(folio_test_swapbacked(folio), folio);
@@ -898,6 +907,8 @@ noinline int __filemap_add_folio(struct address_space *mapping,
goto unlock;
mapping->nrpages += nr;
+ atomic_long_add(gen, &mapping->gen);
+ atomic_long_add(tier, &mapping->tier);
/* hugetlb pages do not participate in page cache accounting */
if (!huge) {
@@ -468,6 +468,7 @@ static void folio_inc_refs(struct folio *folio)
new_flags += BIT(LRU_REFS_PGOFF);
new_flags |= old_flags & ~LRU_REFS_MASK;
} while (!try_cmpxchg(&folio->flags, &old_flags, new_flags));
+ atomic_long_inc(&folio->mapping->tier);
}
#else
static void folio_inc_refs(struct folio *folio)