diff mbox series

[3/4] mm/vmscan: remove ignore_references argument of reclaim_pages()

Message ID 20240429224451.67081-4-sj@kernel.org (mailing list archive)
State New
Headers show
Series mm/damon/paddr: simplify page level access re-check for pageout | expand

Commit Message

SeongJae Park April 29, 2024, 10:44 p.m. UTC
All reclaim_pages() callers are setting 'ignore_references' parameter
'true'.  In other words, the parameter is not really being used.  Remove
the argument to make it simple.

Signed-off-by: SeongJae Park <sj@kernel.org>
---
 mm/damon/paddr.c | 2 +-
 mm/internal.h    | 2 +-
 mm/madvise.c     | 4 ++--
 mm/vmscan.c      | 6 +++---
 4 files changed, 7 insertions(+), 7 deletions(-)
diff mbox series

Patch

diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c
index 974edef1740d..18797c1b419b 100644
--- a/mm/damon/paddr.c
+++ b/mm/damon/paddr.c
@@ -283,7 +283,7 @@  static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s)
 	}
 	if (install_young_filter)
 		damos_destroy_filter(filter);
-	applied = reclaim_pages(&folio_list, true);
+	applied = reclaim_pages(&folio_list);
 	cond_resched();
 	return applied * PAGE_SIZE;
 }
diff --git a/mm/internal.h b/mm/internal.h
index c5552d35d995..b2c75b12014e 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1052,7 +1052,7 @@  extern unsigned long  __must_check vm_mmap_pgoff(struct file *, unsigned long,
         unsigned long, unsigned long);
 
 extern void set_pageblock_order(void);
-unsigned long reclaim_pages(struct list_head *folio_list, bool ignore_references);
+unsigned long reclaim_pages(struct list_head *folio_list);
 unsigned int reclaim_clean_pages_from_list(struct zone *zone,
 					    struct list_head *folio_list);
 /* The ALLOC_WMARK bits are used as an index to zone->watermark */
diff --git a/mm/madvise.c b/mm/madvise.c
index c49fef453491..a77893462b92 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -423,7 +423,7 @@  static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
 huge_unlock:
 		spin_unlock(ptl);
 		if (pageout)
-			reclaim_pages(&folio_list, true);
+			reclaim_pages(&folio_list);
 		return 0;
 	}
 
@@ -547,7 +547,7 @@  static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
 		pte_unmap_unlock(start_pte, ptl);
 	}
 	if (pageout)
-		reclaim_pages(&folio_list, true);
+		reclaim_pages(&folio_list);
 	cond_resched();
 
 	return 0;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 49bd94423961..fc9dd9a24739 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2150,7 +2150,7 @@  static unsigned int reclaim_folio_list(struct list_head *folio_list,
 	return nr_reclaimed;
 }
 
-unsigned long reclaim_pages(struct list_head *folio_list, bool ignore_references)
+unsigned long reclaim_pages(struct list_head *folio_list)
 {
 	int nid;
 	unsigned int nr_reclaimed = 0;
@@ -2173,11 +2173,11 @@  unsigned long reclaim_pages(struct list_head *folio_list, bool ignore_references
 		}
 
 		nr_reclaimed += reclaim_folio_list(&node_folio_list, NODE_DATA(nid),
-						   ignore_references);
+						   true);
 		nid = folio_nid(lru_to_folio(folio_list));
 	} while (!list_empty(folio_list));
 
-	nr_reclaimed += reclaim_folio_list(&node_folio_list, NODE_DATA(nid), ignore_references);
+	nr_reclaimed += reclaim_folio_list(&node_folio_list, NODE_DATA(nid), true);
 
 	memalloc_noreclaim_restore(noreclaim_flag);