diff mbox series

[v2,07/10] hugetlb: kill hugetlbfs_pagecache_page()

Message ID 20220901120030.63318-8-linmiaohe@huawei.com (mailing list archive)
State New
Headers show
Series A few cleanup patches for hugetlb | expand

Commit Message

Miaohe Lin Sept. 1, 2022, noon UTC
Fold hugetlbfs_pagecache_page() into its sole caller to remove some
duplicated code. No functional change intended.

Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
Reviewed-by: Muchun Song <songmuchun@bytedance.com>
---
 mm/hugetlb.c | 15 +--------------
 1 file changed, 1 insertion(+), 14 deletions(-)
diff mbox series

Patch

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index bb65b7fdca25..26bcc85715eb 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5467,19 +5467,6 @@  static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
 	return ret;
 }
 
-/* Return the pagecache page at a given address within a VMA */
-static struct page *hugetlbfs_pagecache_page(struct hstate *h,
-			struct vm_area_struct *vma, unsigned long address)
-{
-	struct address_space *mapping;
-	pgoff_t idx;
-
-	mapping = vma->vm_file->f_mapping;
-	idx = vma_hugecache_offset(h, vma, address);
-
-	return find_lock_page(mapping, idx);
-}
-
 /*
  * Return whether there is a pagecache page to back given address within VMA.
  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
@@ -5885,7 +5872,7 @@  vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 		/* Just decrements count, does not deallocate */
 		vma_end_reservation(h, vma, haddr);
 
-		pagecache_page = hugetlbfs_pagecache_page(h, vma, haddr);
+		pagecache_page = find_lock_page(mapping, idx);
 	}
 
 	ptl = huge_pte_lock(h, mm, ptep);