diff mbox series

[v5,5/6] mm: convert copy_user_huge_page() to copy_user_folio()

Message ID 20230331093937.945725-6-zhangpeng362@huawei.com (mailing list archive)
State New
Headers show
Series userfaultfd: convert userfaultfd functions to use folios | expand

Commit Message

Peng Zhang March 31, 2023, 9:39 a.m. UTC
From: ZhangPeng <zhangpeng362@huawei.com>

Replace copy_user_huge_page() with copy_user_folio(). copy_user_folio()
does the same as copy_user_huge_page(), but takes in folios instead of
pages. Convert copy_user_gigantic_page() to take in folios.
Remove pages_per_huge_page from copy_user_folio(), because we can get
that from folio_nr_pages(dst).

Signed-off-by: ZhangPeng <zhangpeng362@huawei.com>
---
 include/linux/mm.h |  7 +++----
 mm/hugetlb.c       | 10 ++++------
 mm/memory.c        | 28 ++++++++++++++--------------
 3 files changed, 21 insertions(+), 24 deletions(-)

Comments

Mike Kravetz April 6, 2023, 11:55 p.m. UTC | #1
On 03/31/23 17:39, Peng Zhang wrote:
> From: ZhangPeng <zhangpeng362@huawei.com>
> 
> Replace copy_user_huge_page() with copy_user_folio(). copy_user_folio()
> does the same as copy_user_huge_page(), but takes in folios instead of
> pages. Convert copy_user_gigantic_page() to take in folios.
> Remove pages_per_huge_page from copy_user_folio(), because we can get
> that from folio_nr_pages(dst).
> 
> Signed-off-by: ZhangPeng <zhangpeng362@huawei.com>
> ---
>  include/linux/mm.h |  7 +++----
>  mm/hugetlb.c       | 10 ++++------
>  mm/memory.c        | 28 ++++++++++++++--------------
>  3 files changed, 21 insertions(+), 24 deletions(-)

No technical problems with the patch, but ...
>  
> @@ -5847,15 +5847,15 @@ static void copy_subpage(unsigned long addr, int idx, void *arg)
>  			   addr, copy_arg->vma);
>  }
>  
> -void copy_user_huge_page(struct page *dst, struct page *src,
> -			 unsigned long addr_hint, struct vm_area_struct *vma,
> -			 unsigned int pages_per_huge_page)
> +void copy_user_folio(struct folio *dst, struct folio *src,
> +		      unsigned long addr_hint, struct vm_area_struct *vma)
>  {
> +	unsigned int pages_per_huge_page = folio_nr_pages(dst);
>  	unsigned long addr = addr_hint &
>  		~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
>  	struct copy_subpage_arg arg = {
> -		.dst = dst,
> -		.src = src,
> +		.dst = &dst->page,
> +		.src = &src->page,
>  		.vma = vma,
>  	};
>  

I seem to recall that Matthew suggested changing the function name to
copy_user_folio.  My only concern is that the name now sounds like a
general purpose routine for copying folios.  It certainly would work
for a single page folio, but there is a bunch of unnecessary overhead
in that case.

That makes me think there should perhaps be an optimized path for single
page folios that just does copy_user_highpage().  But, the argument addr_hint
does not make much sense in the single page folio case.  So, I am not
sure if I agree with leaving large/huge out of the function name.

Just wondering if Matthew has any additional thoughts?
Peng Zhang April 8, 2023, 4:42 a.m. UTC | #2
On 2023/4/7 7:55, Mike Kravetz wrote:

> On 03/31/23 17:39, Peng Zhang wrote:
>> From: ZhangPeng <zhangpeng362@huawei.com>
>>
>> Replace copy_user_huge_page() with copy_user_folio(). copy_user_folio()
>> does the same as copy_user_huge_page(), but takes in folios instead of
>> pages. Convert copy_user_gigantic_page() to take in folios.
>> Remove pages_per_huge_page from copy_user_folio(), because we can get
>> that from folio_nr_pages(dst).
>>
>> Signed-off-by: ZhangPeng <zhangpeng362@huawei.com>
>> ---
>>   include/linux/mm.h |  7 +++----
>>   mm/hugetlb.c       | 10 ++++------
>>   mm/memory.c        | 28 ++++++++++++++--------------
>>   3 files changed, 21 insertions(+), 24 deletions(-)
> No technical problems with the patch, but ...
>>   
>> @@ -5847,15 +5847,15 @@ static void copy_subpage(unsigned long addr, int idx, void *arg)
>>   			   addr, copy_arg->vma);
>>   }
>>   
>> -void copy_user_huge_page(struct page *dst, struct page *src,
>> -			 unsigned long addr_hint, struct vm_area_struct *vma,
>> -			 unsigned int pages_per_huge_page)
>> +void copy_user_folio(struct folio *dst, struct folio *src,
>> +		      unsigned long addr_hint, struct vm_area_struct *vma)
>>   {
>> +	unsigned int pages_per_huge_page = folio_nr_pages(dst);
>>   	unsigned long addr = addr_hint &
>>   		~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
>>   	struct copy_subpage_arg arg = {
>> -		.dst = dst,
>> -		.src = src,
>> +		.dst = &dst->page,
>> +		.src = &src->page,
>>   		.vma = vma,
>>   	};
>>   
> I seem to recall that Matthew suggested changing the function name to
> copy_user_folio.  My only concern is that the name now sounds like a
> general purpose routine for copying folios.  It certainly would work
> for a single page folio, but there is a bunch of unnecessary overhead
> in that case.
>
> That makes me think there should perhaps be an optimized path for single
> page folios that just does copy_user_highpage().  But, the argument addr_hint
> does not make much sense in the single page folio case.  So, I am not
> sure if I agree with leaving large/huge out of the function name.
>
> Just wondering if Matthew has any additional thoughts?

Agreed. In my opinion, it's better to leave large/huge out of the
function name.
Also wondering if Matthew has any additional considerations?

Best Regards,
Peng
diff mbox series

Patch

diff --git a/include/linux/mm.h b/include/linux/mm.h
index cf4d773ca506..898ece0a3802 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -3678,10 +3678,9 @@  extern const struct attribute_group memory_failure_attr_group;
 extern void clear_huge_page(struct page *page,
 			    unsigned long addr_hint,
 			    unsigned int pages_per_huge_page);
-extern void copy_user_huge_page(struct page *dst, struct page *src,
-				unsigned long addr_hint,
-				struct vm_area_struct *vma,
-				unsigned int pages_per_huge_page);
+void copy_user_folio(struct folio *dst, struct folio *src,
+		      unsigned long addr_hint,
+		      struct vm_area_struct *vma);
 long copy_folio_from_user(struct folio *dst_folio,
 			   const void __user *usr_src,
 			   bool allow_pagefault);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index c88f856ec2e2..a7ed17cbc84e 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5097,8 +5097,8 @@  int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
 					ret = PTR_ERR(new_folio);
 					break;
 				}
-				copy_user_huge_page(&new_folio->page, ptepage, addr, dst_vma,
-						    npages);
+				copy_user_folio(new_folio, page_folio(ptepage),
+						addr, dst_vma);
 				put_page(ptepage);
 
 				/* Install the new hugetlb folio if src pte stable */
@@ -5616,8 +5616,7 @@  static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
 		goto out_release_all;
 	}
 
-	copy_user_huge_page(&new_folio->page, old_page, address, vma,
-			    pages_per_huge_page(h));
+	copy_user_folio(new_folio, page_folio(old_page), address, vma);
 	__folio_mark_uptodate(new_folio);
 
 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, haddr,
@@ -6260,8 +6259,7 @@  int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
 			*foliop = NULL;
 			goto out;
 		}
-		copy_user_huge_page(&folio->page, &(*foliop)->page, dst_addr, dst_vma,
-				    pages_per_huge_page(h));
+		copy_user_folio(folio, *foliop, dst_addr, dst_vma);
 		folio_put(*foliop);
 		*foliop = NULL;
 	}
diff --git a/mm/memory.c b/mm/memory.c
index 4976422b6979..0a5cefea9774 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -5815,21 +5815,21 @@  void clear_huge_page(struct page *page,
 	process_huge_page(addr_hint, pages_per_huge_page, clear_subpage, page);
 }
 
-static void copy_user_gigantic_page(struct page *dst, struct page *src,
-				    unsigned long addr,
-				    struct vm_area_struct *vma,
-				    unsigned int pages_per_huge_page)
+static void copy_user_gigantic_page(struct folio *dst, struct folio *src,
+				     unsigned long addr,
+				     struct vm_area_struct *vma,
+				     unsigned int pages_per_huge_page)
 {
 	int i;
-	struct page *dst_base = dst;
-	struct page *src_base = src;
+	struct page *dst_page;
+	struct page *src_page;
 
 	for (i = 0; i < pages_per_huge_page; i++) {
-		dst = nth_page(dst_base, i);
-		src = nth_page(src_base, i);
+		dst_page = folio_page(dst, i);
+		src_page = folio_page(src, i);
 
 		cond_resched();
-		copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
+		copy_user_highpage(dst_page, src_page, addr + i*PAGE_SIZE, vma);
 	}
 }
 
@@ -5847,15 +5847,15 @@  static void copy_subpage(unsigned long addr, int idx, void *arg)
 			   addr, copy_arg->vma);
 }
 
-void copy_user_huge_page(struct page *dst, struct page *src,
-			 unsigned long addr_hint, struct vm_area_struct *vma,
-			 unsigned int pages_per_huge_page)
+void copy_user_folio(struct folio *dst, struct folio *src,
+		      unsigned long addr_hint, struct vm_area_struct *vma)
 {
+	unsigned int pages_per_huge_page = folio_nr_pages(dst);
 	unsigned long addr = addr_hint &
 		~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
 	struct copy_subpage_arg arg = {
-		.dst = dst,
-		.src = src,
+		.dst = &dst->page,
+		.src = &src->page,
 		.vma = vma,
 	};