@@ -337,6 +337,42 @@ void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
}
#endif
+#define EI_MP_ENOSYS 0x0001
+#define EI_MP_THP_ENOMEM 0x0002
+#define EI_MP_NP_ENOMEM 0x0004
+#define EI_MP_EAGAIN 0x0008
+#define EI_MP_EOTHER 0x0010
+#define EI_MP_NOSPLIT 0x0020
+#define EI_MP_SPLIT_FAIL 0x0040
+#define EI_MP_EAGAIN_PERM 0x0080
+#define EI_MP_EBUSY 0x0100
+
+static unsigned int ei_migrate_pages;
+
+module_param(ei_migrate_pages, uint, 0644);
+
+static bool ei_thp_migration_supported(void)
+{
+ if (ei_migrate_pages & EI_MP_ENOSYS)
+ return false;
+ else
+ return thp_migration_supported();
+}
+
+static int ei_trylock_page(struct page *page)
+{
+ if (ei_migrate_pages & EI_MP_EAGAIN)
+ return 0;
+ return trylock_page(page);
+}
+
+static int ei_split_huge_page_to_list(struct page *page, struct list_head *list)
+{
+ if (ei_migrate_pages & EI_MP_SPLIT_FAIL)
+ return -EBUSY;
+ return split_huge_page_to_list(page, list);
+}
+
static int expected_page_refs(struct address_space *mapping, struct page *page)
{
int expected_count = 1;
@@ -368,6 +404,9 @@ int folio_migrate_mapping(struct address_space *mapping,
if (folio_ref_count(folio) != expected_count)
return -EAGAIN;
+ if (ei_migrate_pages & EI_MP_EAGAIN_PERM)
+ return -EAGAIN;
+
/* No turning back from here */
newfolio->index = folio->index;
newfolio->mapping = folio->mapping;
@@ -929,7 +968,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
struct anon_vma *anon_vma = NULL;
bool is_lru = !__PageMovable(page);
- if (!trylock_page(page)) {
+ if (!ei_trylock_page(page)) {
if (!force || mode == MIGRATE_ASYNC)
goto out;
@@ -952,6 +991,11 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
lock_page(page);
}
+ if (ei_migrate_pages & EI_MP_EBUSY) {
+ rc = -EBUSY;
+ goto out_unlock;
+ }
+
if (PageWriteback(page)) {
/*
* Only in the case of a full synchronous migration is it
@@ -1086,7 +1130,7 @@ static int unmap_and_move(new_page_t get_new_page,
int rc = MIGRATEPAGE_SUCCESS;
struct page *newpage = NULL;
- if (!thp_migration_supported() && PageTransHuge(page))
+ if (!ei_thp_migration_supported() && PageTransHuge(page))
return -ENOSYS;
if (page_count(page) == 1) {
@@ -1102,6 +1146,11 @@ static int unmap_and_move(new_page_t get_new_page,
goto out;
}
+ if ((ei_migrate_pages & EI_MP_THP_ENOMEM) && PageTransHuge(page))
+ return -ENOMEM;
+ if ((ei_migrate_pages & EI_MP_NP_ENOMEM) && !PageTransHuge(page))
+ return -ENOMEM;
+
newpage = get_new_page(page, private);
if (!newpage)
return -ENOMEM;
@@ -1305,7 +1354,7 @@ static inline int try_split_thp(struct page *page, struct list_head *split_pages
int rc;
lock_page(page);
- rc = split_huge_page_to_list(page, split_pages);
+ rc = ei_split_huge_page_to_list(page, split_pages);
unlock_page(page);
return rc;
@@ -1358,6 +1407,9 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
bool nosplit = (reason == MR_NUMA_MISPLACED);
bool no_subpage_counting = false;
+ if (ei_migrate_pages & EI_MP_NOSPLIT)
+ nosplit = true;
+
trace_mm_migrate_pages_start(mode, reason);
thp_subpage_migration: