diff mbox series

[2/3] mm/hugetlb: Refactor unmap_hugepage_range() to take folio instead of page

Message ID 20250417155530.124073-2-nifan.cxl@gmail.com (mailing list archive)
State New
Headers show
Series [1/3] mm/hugetlb: Refactor unmap_ref_private() to take folio instead of page | expand

Commit Message

Fan Ni April 17, 2025, 3:43 p.m. UTC
From: Fan Ni <fan.ni@samsung.com>

The function unmap_hugepage_range() has two kinds of users:
1) unmap_ref_private(), which passes in the head page of a folio.  Since
   unmap_ref_private() already takes folio and there are no other uses
   of the folio struct in the function, it is natural for
   unmap_hugepage_range() to take folio also.
2) All other uses, which pass in NULL pointer.

In both cases, we can pass in folio. Refactor unmap_hugepage_range() to
take folio.

Signed-off-by: Fan Ni <fan.ni@samsung.com>
---
 include/linux/hugetlb.h | 2 +-
 mm/hugetlb.c            | 7 ++++---
 2 files changed, 5 insertions(+), 4 deletions(-)

Comments

Sidhartha Kumar April 17, 2025, 4:13 p.m. UTC | #1
On 4/17/25 11:43 AM, nifan.cxl@gmail.com wrote:
> From: Fan Ni <fan.ni@samsung.com>
> 
> The function unmap_hugepage_range() has two kinds of users:
> 1) unmap_ref_private(), which passes in the head page of a folio.  Since
>     unmap_ref_private() already takes folio and there are no other uses
>     of the folio struct in the function, it is natural for
>     unmap_hugepage_range() to take folio also.
> 2) All other uses, which pass in NULL pointer.
> 
> In both cases, we can pass in folio. Refactor unmap_hugepage_range() to
> take folio.
> 
> Signed-off-by: Fan Ni <fan.ni@samsung.com>
> ---
>   include/linux/hugetlb.h | 2 +-
>   mm/hugetlb.c            | 7 ++++---
>   2 files changed, 5 insertions(+), 4 deletions(-)
> 
> diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
> index a57bed83c657..b7699f35c87f 100644
> --- a/include/linux/hugetlb.h
> +++ b/include/linux/hugetlb.h
> @@ -128,7 +128,7 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
>   int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *,
>   			    struct vm_area_struct *, struct vm_area_struct *);
>   void unmap_hugepage_range(struct vm_area_struct *,
> -			  unsigned long, unsigned long, struct page *,
> +			  unsigned long, unsigned long, struct folio *folio,
>   			  zap_flags_t);
>   void __unmap_hugepage_range(struct mmu_gather *tlb,
>   			  struct vm_area_struct *vma,
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index b5d1ac8290a7..3181dbe0c4bb 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -6039,7 +6039,7 @@ void __hugetlb_zap_end(struct vm_area_struct *vma,
>   }
>   
>   void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
> -			  unsigned long end, struct page *ref_page,
> +			  unsigned long end, struct folio *ref_folio,
>   			  zap_flags_t zap_flags)
>   {
>   	struct mmu_notifier_range range;
> @@ -6051,7 +6051,8 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
>   	mmu_notifier_invalidate_range_start(&range);
>   	tlb_gather_mmu(&tlb, vma->vm_mm);
>   
> -	__unmap_hugepage_range(&tlb, vma, start, end, ref_page, zap_flags);
> +	__unmap_hugepage_range(&tlb, vma, start, end,
> +			       folio_page(ref_folio, 0), zap_flags);
>   
>   	mmu_notifier_invalidate_range_end(&range);
>   	tlb_finish_mmu(&tlb);
> @@ -6109,7 +6110,7 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
>   		if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
>   			unmap_hugepage_range(iter_vma, address,
>   					     address + huge_page_size(h),
> -					     folio_page(folio, 0), 0);
> +					     folio, 0);
>   	}
>   	i_mmap_unlock_write(mapping);
>   }
Reviewed-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Muchun Song April 18, 2025, 2:51 a.m. UTC | #2
> On Apr 17, 2025, at 23:43, nifan.cxl@gmail.com wrote:
> 
> From: Fan Ni <fan.ni@samsung.com>
> 
> The function unmap_hugepage_range() has two kinds of users:
> 1) unmap_ref_private(), which passes in the head page of a folio.  Since
>   unmap_ref_private() already takes folio and there are no other uses
>   of the folio struct in the function, it is natural for
>   unmap_hugepage_range() to take folio also.
> 2) All other uses, which pass in NULL pointer.
> 
> In both cases, we can pass in folio. Refactor unmap_hugepage_range() to
> take folio.
> 
> Signed-off-by: Fan Ni <fan.ni@samsung.com>

Reviewed-by: Muchun Song <muchun.song@linux.dev>

Thanks.
diff mbox series

Patch

diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index a57bed83c657..b7699f35c87f 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -128,7 +128,7 @@  int move_hugetlb_page_tables(struct vm_area_struct *vma,
 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *,
 			    struct vm_area_struct *, struct vm_area_struct *);
 void unmap_hugepage_range(struct vm_area_struct *,
-			  unsigned long, unsigned long, struct page *,
+			  unsigned long, unsigned long, struct folio *folio,
 			  zap_flags_t);
 void __unmap_hugepage_range(struct mmu_gather *tlb,
 			  struct vm_area_struct *vma,
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index b5d1ac8290a7..3181dbe0c4bb 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -6039,7 +6039,7 @@  void __hugetlb_zap_end(struct vm_area_struct *vma,
 }
 
 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
-			  unsigned long end, struct page *ref_page,
+			  unsigned long end, struct folio *ref_folio,
 			  zap_flags_t zap_flags)
 {
 	struct mmu_notifier_range range;
@@ -6051,7 +6051,8 @@  void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
 	mmu_notifier_invalidate_range_start(&range);
 	tlb_gather_mmu(&tlb, vma->vm_mm);
 
-	__unmap_hugepage_range(&tlb, vma, start, end, ref_page, zap_flags);
+	__unmap_hugepage_range(&tlb, vma, start, end,
+			       folio_page(ref_folio, 0), zap_flags);
 
 	mmu_notifier_invalidate_range_end(&range);
 	tlb_finish_mmu(&tlb);
@@ -6109,7 +6110,7 @@  static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
 		if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
 			unmap_hugepage_range(iter_vma, address,
 					     address + huge_page_size(h),
-					     folio_page(folio, 0), 0);
+					     folio, 0);
 	}
 	i_mmap_unlock_write(mapping);
 }