@@ -2566,7 +2566,7 @@ static bool migrate_vma_check_page(struct page *page)
* FIXME proper solution is to rework migration_entry_wait() so
* it does not need to take a reference on page.
*/
- return is_device_private_page(page);
+ return is_device_page(page);
}
/* For file back page */
@@ -2855,7 +2855,7 @@ EXPORT_SYMBOL(migrate_vma_setup);
* handle_pte_fault()
* do_anonymous_page()
* to map in an anonymous zero page but the struct page will be a ZONE_DEVICE
- * private page.
+ * private or public page.
*/
static void migrate_vma_insert_page(struct migrate_vma *migrate,
unsigned long addr,
@@ -2926,10 +2926,15 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
swp_entry = make_device_private_entry(page, vma->vm_flags & VM_WRITE);
entry = swp_entry_to_pte(swp_entry);
+ } else if (is_device_page(page)) {
+ entry = pte_mkold(mk_pte(page,
+ READ_ONCE(vma->vm_page_prot)));
+ if (vma->vm_flags & VM_WRITE)
+ entry = pte_mkwrite(pte_mkdirty(entry));
} else {
/*
- * For now we only support migrating to un-addressable
- * device memory.
+ * We support migrating to private and public types
+ * for device zone memory.
*/
pr_warn_once("Unsupported ZONE_DEVICE page type.\n");
goto abort;
@@ -3035,10 +3040,10 @@ void migrate_vma_pages(struct migrate_vma *migrate)
mapping = page_mapping(page);
if (is_zone_device_page(newpage)) {
- if (is_device_private_page(newpage)) {
+ if (is_device_page(newpage)) {
/*
- * For now only support private anonymous when
- * migrating to un-addressable device memory.
+ * For now only support private and public
+ * anonymous when migrating to device memory.
*/
if (mapping) {
migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
Add device public type case to migrate_vma_insert_page, migrate_vma_pages and migrate_vma_check_page helpers. Signed-off-by: Alex Sierra <alex.sierra@amd.com> --- mm/migrate.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-)