@@ -70,6 +70,7 @@ int migrate_pages(struct list_head *l, new_folio_t new, free_folio_t free,
unsigned int *ret_succeeded);
struct folio *alloc_migration_target(struct folio *src, unsigned long private);
bool isolate_movable_page(struct page *page, isolate_mode_t mode);
+bool folio_isolate_movable(struct folio *folio, isolate_mode_t mode);
bool isolate_folio_to_list(struct folio *folio, struct list_head *list);
int migrate_huge_page_move_mapping(struct address_space *mapping,
@@ -92,6 +93,9 @@ static inline struct folio *alloc_migration_target(struct folio *src,
{ return NULL; }
static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode)
{ return false; }
+static inline bool folio_isolate_movable(struct folio *folio,
+ isolate_mode_t mode)
+ { return false; }
static inline bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
{ return false; }
@@ -58,21 +58,20 @@
#include "internal.h"
-bool isolate_movable_page(struct page *page, isolate_mode_t mode)
+bool folio_isolate_movable(struct folio *folio, isolate_mode_t mode)
{
- struct folio *folio = folio_get_nontail_page(page);
const struct movable_operations *mops;
/*
- * Avoid burning cycles with pages that are yet under __free_pages(),
+ * Avoid burning cycles with folios that are yet under __free_pages(),
* or just got freed under us.
*
- * In case we 'win' a race for a movable page being freed under us and
+ * In case we 'win' a race for a movable folio being freed under us and
* raise its refcount preventing __free_pages() from doing its job
- * the put_page() at the end of this block will take care of
- * release this page, thus avoiding a nasty leakage.
+ * the folio_put() at the end of this block will take care of
+ * release this folio, thus avoiding a nasty leakage.
*/
- if (!folio)
+ if (!folio_try_get(folio))
goto out;
if (unlikely(folio_test_slab(folio)))
@@ -80,9 +79,9 @@ bool isolate_movable_page(struct page *page, isolate_mode_t mode)
/* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */
smp_rmb();
/*
- * Check movable flag before taking the page lock because
- * we use non-atomic bitops on newly allocated page flags so
- * unconditionally grabbing the lock ruins page's owner side.
+ * Check movable flag before taking the folio lock because
+ * we use non-atomic bitops on newly allocated folio flags so
+ * unconditionally grabbing the lock ruins folio's owner side.
*/
if (unlikely(!__folio_test_movable(folio)))
goto out_putfolio;
@@ -92,15 +91,15 @@ bool isolate_movable_page(struct page *page, isolate_mode_t mode)
goto out_putfolio;
/*
- * As movable pages are not isolated from LRU lists, concurrent
- * compaction threads can race against page migration functions
- * as well as race against the releasing a page.
+ * As movable folios are not isolated from LRU lists, concurrent
+ * compaction threads can race against folio migration functions
+ * as well as race against the releasing a folio.
*
- * In order to avoid having an already isolated movable page
+ * In order to avoid having an already isolated movable folio
* being (wrongly) re-isolated while it is under migration,
- * or to avoid attempting to isolate pages being released,
- * lets be sure we have the page lock
- * before proceeding with the movable page isolation steps.
+ * or to avoid attempting to isolate folios being released,
+ * lets be sure we have the folio lock
+ * before proceeding with the movable folio isolation steps.
*/
if (unlikely(!folio_trylock(folio)))
goto out_putfolio;
@@ -129,6 +128,14 @@ bool isolate_movable_page(struct page *page, isolate_mode_t mode)
return false;
}
+bool isolate_movable_page(struct page *page, isolate_mode_t mode)
+{
+ if (PageTail(page))
+ return false;
+
+ return folio_isolate_movable((struct folio *)page, mode);
+}
+
static void putback_movable_folio(struct folio *folio)
{
const struct movable_operations *mops = folio_movable_ops(folio);
Like isolate_lru_page(), make isolate_movable_page() as a wrapper around folio_isolate_movable(), since isolate_movable_page() always fails on a tail page, return immediately for a tail page in the warpper, and the wrapper will be removed once all callers are converted to folio_isolate_movable(). Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> --- include/linux/migrate.h | 4 ++++ mm/migrate.c | 41 ++++++++++++++++++++++++----------------- 2 files changed, 28 insertions(+), 17 deletions(-)