@@ -409,7 +409,7 @@ extern void lru_add_drain(void);
extern void lru_add_drain_cpu(int cpu);
extern void lru_add_drain_cpu_zone(struct zone *zone);
extern void lru_add_drain_all(void);
-extern void deactivate_page(struct page *page);
+extern bool deactivate_page(struct page *page);
extern void mark_page_lazyfree(struct page *page);
extern void swap_setup(void);
@@ -725,9 +725,9 @@ void deactivate_file_folio(struct folio *folio)
*
* deactivate_page() moves @page to the inactive list if @page was on the active
* list and was not an unevictable page. This is done to accelerate the reclaim
- * of @page.
+ * of @page. If page was deactivated successfully, returns true.
*/
-void deactivate_page(struct page *page)
+bool deactivate_page(struct page *page)
{
struct folio *folio = page_folio(page);
@@ -740,7 +740,9 @@ void deactivate_page(struct page *page)
fbatch = this_cpu_ptr(&cpu_fbatches.lru_deactivate);
folio_batch_add_and_move(fbatch, folio, lru_deactivate_fn);
local_unlock(&cpu_fbatches.lock);
+ return true;
}
+ return false;
}
/**
Returns true if the page was successfully deactivated. The return value will be used for statistics in next patch. Signed-off-by: Minchan Kim <minchan@kernel.org> --- include/linux/swap.h | 2 +- mm/swap.c | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-)