diff mbox series

[v2,1/6] mm: memory: add vm_normal_folio_pmd()

Message ID 20230921074417.24004-2-wangkefeng.wang@huawei.com (mailing list archive)
State New
Headers show
Series mm: convert numa balancing functions to use a folio | expand

Commit Message

Kefeng Wang Sept. 21, 2023, 7:44 a.m. UTC
The new vm_normal_folio_pmd() wrapper is similar to vm_normal_folio(),
which allow them to completely replace the struct page variables with
struct folio variables.

Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
 include/linux/mm.h |  2 ++
 mm/memory.c        | 10 ++++++++++
 2 files changed, 12 insertions(+)
diff mbox series

Patch

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 1f1d0d6b8f20..a1d0c82ac9a7 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2325,6 +2325,8 @@  struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
 			     pte_t pte);
 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
 			     pte_t pte);
+struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
+				  unsigned long addr, pmd_t pmd);
 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
 				pmd_t pmd);
 
diff --git a/mm/memory.c b/mm/memory.c
index 983a40f8ee62..dbc7b67eca68 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -689,6 +689,16 @@  struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
 out:
 	return pfn_to_page(pfn);
 }
+
+struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
+				  unsigned long addr, pmd_t pmd)
+{
+	struct page *page = vm_normal_page_pmd(vma, addr, pmd);
+
+	if (page)
+		return page_folio(page);
+	return NULL;
+}
 #endif
 
 static void restore_exclusive_pte(struct vm_area_struct *vma,