@@ -206,13 +206,6 @@ int balloon_page_migrate(struct address_space *mapping,
if (!isolated_balloon_page(page))
return rc;
- /*
- * Block others from accessing the 'newpage' when we get around to
- * establishing additional references. We should be the only one
- * holding a reference to the 'newpage' at this point.
- */
- BUG_ON(!trylock_page(newpage));
-
if (WARN_ON(!__is_movable_balloon_page(page))) {
dump_page(page, "not movable balloon page");
unlock_page(newpage);
@@ -222,7 +215,6 @@ int balloon_page_migrate(struct address_space *mapping,
if (balloon && balloon->migratepage)
rc = balloon->migratepage(balloon, newpage, page, mode);
- unlock_page(newpage);
return rc;
}
@@ -844,21 +844,6 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
}
}
- if (unlikely(driver_page_migratable(page))) {
- /*
- * A driver page does not need any special attention from
- * physical to virtual reverse mapping procedures.
- * Skip any attempt to unmap PTEs or to remap swap cache,
- * in order to avoid burning cycles at rmap level, and perform
- * the page migration right away (proteced by page lock).
- */
- rc = page->mapping->a_ops->migratepage(page->mapping,
- newpage,
- page,
- mode);
- goto out_unlock;
- }
-
/*
* Corner case handling:
* 1. When a new swap-cache page is read into, it is added to the LRU
Migration is completely generalized. Signed-off-by: Gioh Kim <gioh.kim@lge.com> --- mm/balloon_compaction.c | 8 -------- mm/migrate.c | 15 --------------- 2 files changed, 23 deletions(-)