@@ -3678,10 +3678,10 @@ extern const struct attribute_group memory_failure_attr_group;
extern void clear_huge_page(struct page *page,
unsigned long addr_hint,
unsigned int pages_per_huge_page);
-extern void copy_user_huge_page(struct page *dst, struct page *src,
- unsigned long addr_hint,
- struct vm_area_struct *vma,
- unsigned int pages_per_huge_page);
+void copy_user_folio(struct folio *dst, struct folio *src,
+ unsigned long addr_hint,
+ struct vm_area_struct *vma,
+ unsigned int pages_per_huge_page);
long copy_folio_from_user(struct folio *dst_folio,
const void __user *usr_src,
bool allow_pagefault);
@@ -5097,8 +5097,8 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
ret = PTR_ERR(new_folio);
break;
}
- copy_user_huge_page(&new_folio->page, ptepage, addr, dst_vma,
- npages);
+ copy_user_folio(new_folio, page_folio(ptepage), addr, dst_vma,
+ npages);
put_page(ptepage);
/* Install the new hugetlb folio if src pte stable */
@@ -5616,8 +5616,8 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
goto out_release_all;
}
- copy_user_huge_page(&new_folio->page, old_page, address, vma,
- pages_per_huge_page(h));
+ copy_user_folio(new_folio, page_folio(old_page), address, vma,
+ pages_per_huge_page(h));
__folio_mark_uptodate(new_folio);
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, haddr,
@@ -6260,8 +6260,8 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
*foliop = NULL;
goto out;
}
- copy_user_huge_page(&folio->page, &(*foliop)->page, dst_addr, dst_vma,
- pages_per_huge_page(h));
+ copy_user_folio(folio, *foliop, dst_addr, dst_vma,
+ pages_per_huge_page(h));
folio_put(*foliop);
*foliop = NULL;
}
@@ -5801,21 +5801,21 @@ void clear_huge_page(struct page *page,
process_huge_page(addr_hint, pages_per_huge_page, clear_subpage, page);
}
-static void copy_user_gigantic_page(struct page *dst, struct page *src,
- unsigned long addr,
- struct vm_area_struct *vma,
- unsigned int pages_per_huge_page)
+static void copy_user_gigantic_page(struct folio *dst, struct folio *src,
+ unsigned long addr,
+ struct vm_area_struct *vma,
+ unsigned int pages_per_huge_page)
{
int i;
- struct page *dst_base = dst;
- struct page *src_base = src;
+ struct page *dst_page;
+ struct page *src_page;
for (i = 0; i < pages_per_huge_page; i++) {
- dst = nth_page(dst_base, i);
- src = nth_page(src_base, i);
+ dst_page = folio_page(dst, i);
+ src_page = folio_page(src, i);
cond_resched();
- copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
+ copy_user_highpage(dst_page, src_page, addr + i*PAGE_SIZE, vma);
}
}
@@ -5833,15 +5833,15 @@ static void copy_subpage(unsigned long addr, int idx, void *arg)
addr, copy_arg->vma);
}
-void copy_user_huge_page(struct page *dst, struct page *src,
- unsigned long addr_hint, struct vm_area_struct *vma,
- unsigned int pages_per_huge_page)
+void copy_user_folio(struct folio *dst, struct folio *src,
+ unsigned long addr_hint, struct vm_area_struct *vma,
+ unsigned int pages_per_huge_page)
{
unsigned long addr = addr_hint &
~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
struct copy_subpage_arg arg = {
- .dst = dst,
- .src = src,
+ .dst = &dst->page,
+ .src = &src->page,
.vma = vma,
};