@@ -4619,6 +4619,39 @@ static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
* rmap/PT walk feedback
******************************************************************************/
+static void __look_around_gen_update(struct folio *folio, int new_gen)
+{
+ int old_gen;
+
+ old_gen = folio_lru_gen(folio);
+ if (old_gen < 0)
+ folio_set_referenced(folio);
+ else if (old_gen != new_gen)
+ folio_activate(folio);
+}
+
+static inline bool current_reclaim_state_can_swap(void)
+{
+ if (current->reclaim_state)
+ return current->reclaim_state->mm_walk->can_swap;
+ return true;
+}
+
+static void look_around_gen_update(struct folio *folio, int new_gen)
+{
+ int old_gen;
+ struct lru_gen_mm_walk *walk;
+
+ walk = current->reclaim_state ? current->reclaim_state->mm_walk : NULL;
+ if (walk) {
+ old_gen = folio_update_gen(folio, new_gen);
+ if (old_gen >= 0 && old_gen != new_gen)
+ update_batch_size(walk, folio, old_gen, new_gen);
+ return;
+ }
+ return __look_around_gen_update(folio, new_gen);
+}
+
/*
* This function exploits spatial locality when shrink_folio_list() walks the
* rmap. It scans the adjacent PTEs of a young PTE and promotes hot pages. If
@@ -4631,7 +4664,6 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
int i;
unsigned long start;
unsigned long end;
- struct lru_gen_mm_walk *walk;
int young = 0;
pte_t *pte = pvmw->pte;
unsigned long addr = pvmw->address;
@@ -4640,7 +4672,7 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
struct pglist_data *pgdat = folio_pgdat(folio);
struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
DEFINE_MAX_SEQ(lruvec);
- int old_gen, new_gen = lru_gen_from_seq(max_seq);
+ int new_gen = lru_gen_from_seq(max_seq);
lockdep_assert_held(pvmw->ptl);
VM_WARN_ON_ONCE_FOLIO(folio_test_lru(folio), folio);
@@ -4648,9 +4680,6 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
if (spin_is_contended(pvmw->ptl))
return;
- /* avoid taking the LRU lock under the PTL when possible */
- walk = current->reclaim_state ? current->reclaim_state->mm_walk : NULL;
-
start = max(addr & PMD_MASK, pvmw->vma->vm_start);
end = min(addr | ~PMD_MASK, pvmw->vma->vm_end - 1) + 1;
@@ -4683,7 +4712,9 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
if (!pte_young(pte[i]))
continue;
- folio = get_pfn_folio(pfn, memcg, pgdat, !walk || walk->can_swap);
+ folio = get_pfn_folio(pfn, memcg, pgdat,
+ current_reclaim_state_can_swap());
+
if (!folio)
continue;
@@ -4697,19 +4728,7 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
!folio_test_swapcache(folio)))
folio_mark_dirty(folio);
- if (walk) {
- old_gen = folio_update_gen(folio, new_gen);
- if (old_gen >= 0 && old_gen != new_gen)
- update_batch_size(walk, folio, old_gen, new_gen);
-
- continue;
- }
-
- old_gen = folio_lru_gen(folio);
- if (old_gen < 0)
- folio_set_referenced(folio);
- else if (old_gen != new_gen)
- folio_activate(folio);
+ look_around_gen_update(folio, new_gen);
}
arch_leave_lazy_mmu_mode();
To store generation details in folio_flags we need lru_gen_mm_walk structure in which we batch the nr_pages update. A follow-up patch wants to avoid compiling all the lru_gen mm walk-related code on architectures that don't support it. Split out the look-around generation update by marking folio active into a separate helper which will be used in that case. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> --- mm/vmscan.c | 57 +++++++++++++++++++++++++++++++++++------------------ 1 file changed, 38 insertions(+), 19 deletions(-)