diff mbox series

[1/3] mm/hugetlb: Refactor unmap_ref_private() to take folio instead of page

Message ID 20250417155530.124073-1-nifan.cxl@gmail.com (mailing list archive)
State New
Headers show
Series [1/3] mm/hugetlb: Refactor unmap_ref_private() to take folio instead of page | expand

Commit Message

Fan Ni April 17, 2025, 3:43 p.m. UTC
From: Fan Ni <fan.ni@samsung.com>

The function unmap_ref_private() has only user, which passes in
&folio->page. Let it take folio directly.

Signed-off-by: Fan Ni <fan.ni@samsung.com>
---
 mm/hugetlb.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

Comments

Sidhartha Kumar April 17, 2025, 4:11 p.m. UTC | #1
On 4/17/25 11:43 AM, nifan.cxl@gmail.com wrote:
> From: Fan Ni <fan.ni@samsung.com>
> 
> The function unmap_ref_private() has only user, which passes in
> &folio->page. Let it take folio directly.
> 
> Signed-off-by: Fan Ni <fan.ni@samsung.com>
> ---
>   mm/hugetlb.c | 8 ++++----
>   1 file changed, 4 insertions(+), 4 deletions(-)
> 
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index ccc4f08f8481..b5d1ac8290a7 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -6064,7 +6064,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
>    * same region.
>    */
>   static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
> -			      struct page *page, unsigned long address)
> +			      struct folio *folio, unsigned long address)
>   {
>   	struct hstate *h = hstate_vma(vma);
>   	struct vm_area_struct *iter_vma;
> @@ -6108,7 +6108,8 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
>   		 */
>   		if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
>   			unmap_hugepage_range(iter_vma, address,
> -					     address + huge_page_size(h), page, 0);
> +					     address + huge_page_size(h),
> +					     folio_page(folio, 0), 0);
>   	}
>   	i_mmap_unlock_write(mapping);
>   }
> @@ -6231,8 +6232,7 @@ static vm_fault_t hugetlb_wp(struct folio *pagecache_folio,
>   			hugetlb_vma_unlock_read(vma);
>   			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
>   
> -			unmap_ref_private(mm, vma, &old_folio->page,
> -					vmf->address);
> +			unmap_ref_private(mm, vma, old_folio, vmf->address);
>   
>   			mutex_lock(&hugetlb_fault_mutex_table[hash]);
>   			hugetlb_vma_lock_read(vma);
Reviewed-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Muchun Song April 18, 2025, 2:51 a.m. UTC | #2
> On Apr 17, 2025, at 23:43, nifan.cxl@gmail.com wrote:
> 
> From: Fan Ni <fan.ni@samsung.com>
> 
> The function unmap_ref_private() has only user, which passes in
> &folio->page. Let it take folio directly.
> 
> Signed-off-by: Fan Ni <fan.ni@samsung.com>

Reviewed-by: Muchun Song <muchun.song@linux.dev>

Thanks.
diff mbox series

Patch

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ccc4f08f8481..b5d1ac8290a7 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -6064,7 +6064,7 @@  void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
  * same region.
  */
 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
-			      struct page *page, unsigned long address)
+			      struct folio *folio, unsigned long address)
 {
 	struct hstate *h = hstate_vma(vma);
 	struct vm_area_struct *iter_vma;
@@ -6108,7 +6108,8 @@  static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
 		 */
 		if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
 			unmap_hugepage_range(iter_vma, address,
-					     address + huge_page_size(h), page, 0);
+					     address + huge_page_size(h),
+					     folio_page(folio, 0), 0);
 	}
 	i_mmap_unlock_write(mapping);
 }
@@ -6231,8 +6232,7 @@  static vm_fault_t hugetlb_wp(struct folio *pagecache_folio,
 			hugetlb_vma_unlock_read(vma);
 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
 
-			unmap_ref_private(mm, vma, &old_folio->page,
-					vmf->address);
+			unmap_ref_private(mm, vma, old_folio, vmf->address);
 
 			mutex_lock(&hugetlb_fault_mutex_table[hash]);
 			hugetlb_vma_lock_read(vma);