@@ -5338,7 +5338,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
break;
}
ret = copy_user_large_folio(new_folio, pte_folio,
- ALIGN_DOWN(addr, sz), dst_vma);
+ addr, dst_vma);
folio_put(pte_folio);
if (ret) {
folio_put(new_folio);
@@ -6637,8 +6637,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
*foliop = NULL;
goto out;
}
- ret = copy_user_large_folio(folio, *foliop,
- ALIGN_DOWN(dst_addr, size), dst_vma);
+ ret = copy_user_large_folio(folio, *foliop, dst_addr, dst_vma);
folio_put(*foliop);
*foliop = NULL;
if (ret) {
@@ -6841,6 +6841,7 @@ static int copy_user_gigantic_page(struct folio *dst, struct folio *src,
struct page *dst_page;
struct page *src_page;
+ addr = ALIGN_DOWN(addr, folio_size(dst));
for (i = 0; i < nr_pages; i++) {
dst_page = folio_page(dst, i);
src_page = folio_page(src, i);
When copy gigantic page, it copies page from the first subpage to the last subpage, that is, aligned base address is needed in it, and we don't need to aligned down the address in the caller as the real address will be passed to process_huge_page(). Fixes: 530dd9926dc1 ("mm: memory: improve copy_user_large_folio()") Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> --- mm/hugetlb.c | 5 ++--- mm/memory.c | 1 + 2 files changed, 3 insertions(+), 3 deletions(-)