@@ -2136,7 +2136,7 @@ static int migrate_vma_collect_hole(unsigned long start,
struct migrate_vma *migrate = walk->private;
unsigned long addr;
- for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
+ for (addr = start; addr < end; addr += PAGE_SIZE) {
migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE;
migrate->dst[migrate->npages] = 0;
migrate->npages++;
@@ -2153,7 +2153,7 @@ static int migrate_vma_collect_skip(unsigned long start,
struct migrate_vma *migrate = walk->private;
unsigned long addr;
- for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
+ for (addr = start; addr < end; addr += PAGE_SIZE) {
migrate->dst[migrate->npages] = 0;
migrate->src[migrate->npages++] = 0;
}
Addresses passed to walk_page_range() callback functions are already page aligned and don't need to be masked with PAGE_MASK. Signed-off-by: Ralph Campbell <rcampbell@nvidia.com> --- mm/migrate.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)