Message ID | 20240613105344.2876119-1-wangkefeng.wang@huawei.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | [1/2] mm: convert clear_huge_page() to clear_large_folio() | expand |
On Thu, Jun 13, 2024 at 06:53:43PM +0800, Kefeng Wang wrote: > +++ b/include/linux/mm.h > @@ -4071,9 +4071,7 @@ enum mf_action_page_type { > }; > > #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) > -extern void clear_huge_page(struct page *page, > - unsigned long addr_hint, > - unsigned int pages_per_huge_page); > +void clear_large_folio(struct folio *folio, unsigned long addr_hint); I think this is properly called void folio_zero_user(struct folio *folio, unsigned long user_addr); > -void clear_huge_page(struct page *page, > - unsigned long addr_hint, unsigned int pages_per_huge_page) > +void clear_large_folio(struct folio *folio, unsigned long addr_hint) > { > + unsigned int pages_per_huge_page = folio_nr_pages(folio); I think it's worth renaming to nr_pages here. > unsigned long addr = addr_hint & > ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1); This can just be: unsigned long addr = user_addr & ~(folio_size(folio) - 1); Umm. Except this assumes that the folio is always mapped to userspace at its natural alignment. And that's true for hugetlb, but not true for THP. So I think this needs to be moved into the callers for which it is true, and we need documentation that user_addr is expected to be the base address that the folio is mapped at.
On 2024/6/13 22:51, Matthew Wilcox wrote: > On Thu, Jun 13, 2024 at 06:53:43PM +0800, Kefeng Wang wrote: >> +++ b/include/linux/mm.h >> @@ -4071,9 +4071,7 @@ enum mf_action_page_type { >> }; >> >> #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) >> -extern void clear_huge_page(struct page *page, >> - unsigned long addr_hint, >> - unsigned int pages_per_huge_page); >> +void clear_large_folio(struct folio *folio, unsigned long addr_hint); > > I think this is properly called > > void folio_zero_user(struct folio *folio, unsigned long user_addr); OK, > >> -void clear_huge_page(struct page *page, >> - unsigned long addr_hint, unsigned int pages_per_huge_page) >> +void clear_large_folio(struct folio *folio, unsigned long addr_hint) >> { >> + unsigned int pages_per_huge_page = folio_nr_pages(folio); > > I think it's worth renaming to nr_pages here. > >> unsigned long addr = addr_hint & >> ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1); > > This can just be: > > unsigned long addr = user_addr & ~(folio_size(folio) - 1); > > Umm. Except this assumes that the folio is always mapped to userspace > at its natural alignment. And that's true for hugetlb, but not true > for THP. So I think this needs to be moved into the callers for which > it is true, and we need documentation that user_addr is expected to be > the base address that the folio is mapped at. > It's better to move it into the callers, but one more question, the user_addr is that the app will access, or the base addres if we can't know it, so even in hugetlb, we should use vmf->real_address in hugetlb_no_page(), also Cced Huang Ying, correct me if I'm wrong. Thanks.
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 986c1df63361..0e71ee8fee4a 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -893,7 +893,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, error = PTR_ERR(folio); goto out; } - clear_huge_page(&folio->page, addr, pages_per_huge_page(h)); + clear_large_folio(folio, addr); __folio_mark_uptodate(folio); error = hugetlb_add_to_page_cache(folio, mapping, index); if (unlikely(error)) { diff --git a/include/linux/mm.h b/include/linux/mm.h index 106bb0310352..4c5b20ee1106 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -4071,9 +4071,7 @@ enum mf_action_page_type { }; #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) -extern void clear_huge_page(struct page *page, - unsigned long addr_hint, - unsigned int pages_per_huge_page); +void clear_large_folio(struct folio *folio, unsigned long addr_hint); int copy_user_large_folio(struct folio *dst, struct folio *src, unsigned long addr_hint, struct vm_area_struct *vma); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index f409ea9fcc18..0a33eda80790 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -943,10 +943,10 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, goto release; } - clear_huge_page(page, vmf->address, HPAGE_PMD_NR); + clear_large_folio(folio, vmf->address); /* * The memory barrier inside __folio_mark_uptodate makes sure that - * clear_huge_page writes become visible before the set_pmd_at() + * clear_large_folio writes become visible before the set_pmd_at() * write. */ __folio_mark_uptodate(folio); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 3518321f6598..99d8cd0f7f11 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -6296,8 +6296,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping, ret = 0; goto out; } - clear_huge_page(&folio->page, vmf->real_address, - pages_per_huge_page(h)); + clear_large_folio(folio, vmf->real_address); __folio_mark_uptodate(folio); new_folio = true; diff --git a/mm/memory.c b/mm/memory.c index 3f11664590d2..6ef84cd0b2bf 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4488,7 +4488,7 @@ static struct folio *alloc_anon_folio(struct vm_fault *vmf) goto next; } folio_throttle_swaprate(folio, gfp); - clear_huge_page(&folio->page, vmf->address, 1 << order); + clear_large_folio(folio, vmf->address); return folio; } next: @@ -6419,41 +6419,38 @@ static inline int process_huge_page( return 0; } -static void clear_gigantic_page(struct page *page, - unsigned long addr, +static void clear_gigantic_page(struct folio *folio, unsigned long addr, unsigned int pages_per_huge_page) { int i; - struct page *p; might_sleep(); for (i = 0; i < pages_per_huge_page; i++) { - p = nth_page(page, i); cond_resched(); - clear_user_highpage(p, addr + i * PAGE_SIZE); + clear_user_highpage(folio_page(folio, i), addr + i * PAGE_SIZE); } } static int clear_subpage(unsigned long addr, int idx, void *arg) { - struct page *page = arg; + struct folio *folio = arg; - clear_user_highpage(nth_page(page, idx), addr); + clear_user_highpage(folio_page(folio, idx), addr); return 0; } -void clear_huge_page(struct page *page, - unsigned long addr_hint, unsigned int pages_per_huge_page) +void clear_large_folio(struct folio *folio, unsigned long addr_hint) { + unsigned int pages_per_huge_page = folio_nr_pages(folio); unsigned long addr = addr_hint & ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1); if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) { - clear_gigantic_page(page, addr, pages_per_huge_page); + clear_gigantic_page(folio, addr, pages_per_huge_page); return; } - process_huge_page(addr_hint, pages_per_huge_page, clear_subpage, page); + process_huge_page(addr_hint, pages_per_huge_page, clear_subpage, folio); } static int copy_user_gigantic_page(struct folio *dst, struct folio *src,
Replace clear_huge_page() with clear_large_folio(), and take a folio instead of a page. Directly get number of pages by folio_nr_pages() to remove pages_per_huge_page argument. Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> --- fs/hugetlbfs/inode.c | 2 +- include/linux/mm.h | 4 +--- mm/huge_memory.c | 4 ++-- mm/hugetlb.c | 3 +-- mm/memory.c | 21 +++++++++------------ 5 files changed, 14 insertions(+), 20 deletions(-)