@@ -388,7 +388,6 @@ void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages);
void lru_note_cost_folio(struct folio *);
void folio_add_lru(struct folio *);
void folio_add_lru_vma(struct folio *, struct vm_area_struct *);
-void lru_cache_add(struct page *);
void mark_page_accessed(struct page *);
void folio_mark_accessed(struct folio *);
@@ -82,12 +82,6 @@ bool redirty_page_for_writepage(struct writeback_control *wbc,
}
EXPORT_SYMBOL(redirty_page_for_writepage);
-void lru_cache_add(struct page *page)
-{
- folio_add_lru(page_folio(page));
-}
-EXPORT_SYMBOL(lru_cache_add);
-
void lru_cache_add_inactive_or_unevictable(struct page *page,
struct vm_area_struct *vma)
{
@@ -573,7 +573,7 @@ EXPORT_SYMBOL(invalidate_mapping_pages);
* refcount. We do this because invalidate_inode_pages2() needs stronger
* invalidation guarantees, and cannot afford to leave pages behind because
* shrink_page_list() has a temp ref on them, or because they're transiently
- * sitting in the lru_cache_add() pagevecs.
+ * sitting in the folio_add_lru() pagevecs.
*/
static int invalidate_complete_folio2(struct address_space *mapping,
struct folio *folio)
@@ -492,7 +492,7 @@ void workingset_refault(struct folio *folio, void *shadow)
/* Folio was active prior to eviction */
if (workingset) {
folio_set_workingset(folio);
- /* XXX: Move to lru_cache_add() when it supports new vs putback */
+ /* XXX: Move to folio_add_lru() when it supports new vs putback */
lru_note_cost_folio(folio);
mod_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + file, nr);
}
There are no longer any callers of lru_cache_add(), so remove it. This saves 107 bytes of kernel text. Also cleanup some comments such that they reference the new folio_add_lru() instead. Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com> --- include/linux/swap.h | 1 - mm/folio-compat.c | 6 ------ mm/truncate.c | 2 +- mm/workingset.c | 2 +- 4 files changed, 2 insertions(+), 9 deletions(-)