@@ -437,7 +437,7 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
* events from being lost.
*/
spin_lock_irqsave(&ctx->completion_lock, flags);
- migrate_page_copy(new, old);
+ migrate_page_copy(new, old, MIGRATE_SINGLETHREAD);
BUG_ON(ctx->ring_pages[idx] != old);
ctx->ring_pages[idx] = new;
spin_unlock_irqrestore(&ctx->completion_lock, flags);
@@ -2826,7 +2826,7 @@ int f2fs_migrate_page(struct address_space *mapping,
}
if ((mode & MIGRATE_MODE_MASK) != MIGRATE_SYNC_NO_COPY)
- migrate_page_copy(newpage, page);
+ migrate_page_copy(newpage, page, MIGRATE_SINGLETHREAD);
else
migrate_page_states(newpage, page);
@@ -886,7 +886,7 @@ static int hugetlbfs_migrate_page(struct address_space *mapping,
}
if ((mode & MIGRATE_MODE_MASK) != MIGRATE_SYNC_NO_COPY)
- migrate_page_copy(newpage, page);
+ migrate_page_copy(newpage, page, MIGRATE_SINGLETHREAD);
else
migrate_page_states(newpage, page);
@@ -585,7 +585,7 @@ iomap_migrate_page(struct address_space *mapping, struct page *newpage,
}
if ((mode & MIGRATE_MODE_MASK) != MIGRATE_SYNC_NO_COPY)
- migrate_page_copy(newpage, page);
+ migrate_page_copy(newpage, page, MIGRATE_SINGLETHREAD);
else
migrate_page_states(newpage, page);
return MIGRATEPAGE_SUCCESS;
@@ -1491,7 +1491,7 @@ static int ubifs_migrate_page(struct address_space *mapping,
}
if ((mode & MIGRATE_MODE_MASK) != MIGRATE_SYNC_NO_COPY)
- migrate_page_copy(newpage, page);
+ migrate_page_copy(newpage, page, MIGRATE_SINGLETHREAD);
else
migrate_page_states(newpage, page);
return MIGRATEPAGE_SUCCESS;
@@ -73,7 +73,8 @@ extern void putback_movable_page(struct page *page);
extern int migrate_prep(void);
extern int migrate_prep_local(void);
extern void migrate_page_states(struct page *newpage, struct page *page);
-extern void migrate_page_copy(struct page *newpage, struct page *page);
+extern void migrate_page_copy(struct page *newpage, struct page *page,
+ enum migrate_mode mode);
extern int migrate_huge_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page);
extern int migrate_page_move_mapping(struct address_space *mapping,
@@ -97,7 +98,8 @@ static inline void migrate_page_states(struct page *newpage, struct page *page)
}
static inline void migrate_page_copy(struct page *newpage,
- struct page *page) {}
+ struct page *page,
+ enum migrate_mode mode) {}
static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page)
@@ -11,6 +11,8 @@
* with the CPU. Instead, page copy happens outside the migratepage()
* callback and is likely using a DMA engine. See migrate_vma() and HMM
* (mm/hmm.c) for users of this mode.
+ * MIGRATE_SINGLETHREAD uses a single thread to move pages, it is the default
+ * behavior
*/
enum migrate_mode {
MIGRATE_ASYNC,
@@ -19,6 +21,7 @@ enum migrate_mode {
MIGRATE_SYNC_NO_COPY,
MIGRATE_MODE_MASK = 3,
+ MIGRATE_SINGLETHREAD = 0,
};
#endif /* MIGRATE_MODE_H_INCLUDED */
@@ -567,7 +567,8 @@ static void __copy_gigantic_page(struct page *dst, struct page *src,
}
}
-static void copy_huge_page(struct page *dst, struct page *src)
+static void copy_huge_page(struct page *dst, struct page *src,
+ enum migrate_mode mode)
{
int i;
int nr_pages;
@@ -657,10 +658,11 @@ void migrate_page_states(struct page *newpage, struct page *page)
}
EXPORT_SYMBOL(migrate_page_states);
-void migrate_page_copy(struct page *newpage, struct page *page)
+void migrate_page_copy(struct page *newpage, struct page *page,
+ enum migrate_mode mode)
{
if (PageHuge(page) || PageTransHuge(page))
- copy_huge_page(newpage, page);
+ copy_huge_page(newpage, page, mode);
else
copy_highpage(newpage, page);
@@ -692,7 +694,7 @@ int migrate_page(struct address_space *mapping,
return rc;
if ((mode & MIGRATE_MODE_MASK) != MIGRATE_SYNC_NO_COPY)
- migrate_page_copy(newpage, page);
+ migrate_page_copy(newpage, page, mode);
else
migrate_page_states(newpage, page);
return MIGRATEPAGE_SUCCESS;
@@ -805,7 +807,7 @@ static int __buffer_migrate_page(struct address_space *mapping,
SetPagePrivate(newpage);
if ((mode & MIGRATE_MODE_MASK) != MIGRATE_SYNC_NO_COPY)
- migrate_page_copy(newpage, page);
+ migrate_page_copy(newpage, page, MIGRATE_SINGLETHREAD);
else
migrate_page_states(newpage, page);
@@ -2024,7 +2026,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
new_page->index = page->index;
/* flush the cache before copying using the kernel virtual address */
flush_cache_range(vma, start, start + HPAGE_PMD_SIZE);
- migrate_page_copy(new_page, page);
+ migrate_page_copy(new_page, page, MIGRATE_SINGLETHREAD);
WARN_ON(PageLRU(new_page));
/* Recheck the target PMD */