diff mbox series

[RFC,01/12] mm: allow per-NUMA node local PUD/PMD allocation

Message ID 20231228131056.602411-2-artem.kuzin@huawei.com (mailing list archive)
State New, archived
Headers show
Series x86 NUMA-aware kernel replication | expand

Commit Message

Artem Kuzin Dec. 28, 2023, 1:10 p.m. UTC
From: Artem Kuzin <artem.kuzin@huawei.com>

Co-developed-by: Nikita Panov <nikita.panov@huawei-partners.com>
Signed-off-by: Nikita Panov <nikita.panov@huawei-partners.com>
Co-developed-by: Alexander Grubnikov <alexander.grubnikov@huawei.com>
Signed-off-by: Alexander Grubnikov <alexander.grubnikov@huawei.com>
Signed-off-by: Artem Kuzin <artem.kuzin@huawei.com>
---
 include/asm-generic/pgalloc.h | 34 ++++++++++++++++++++++++++++++++++
 include/linux/gfp.h           |  2 ++
 mm/page_alloc.c               | 18 ++++++++++++++++++
 3 files changed, 54 insertions(+)
diff mbox series

Patch

diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h
index a7cf825befae..6364375388bf 100644
--- a/include/asm-generic/pgalloc.h
+++ b/include/asm-generic/pgalloc.h
@@ -132,6 +132,24 @@  static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
 	}
 	return (pmd_t *)page_address(page);
 }
+
+static inline pmd_t *pmd_alloc_one_node(unsigned int nid,
+					struct mm_struct *mm, unsigned long addr)
+{
+	struct page *page;
+	gfp_t gfp = GFP_PGTABLE_USER;
+
+	if (mm == &init_mm)
+		gfp = GFP_PGTABLE_KERNEL;
+	page = alloc_pages_node(nid, gfp, 0);
+	if (!page)
+		return NULL;
+	if (!pgtable_pmd_page_ctor(page)) {
+		__free_pages(page, 0);
+		return NULL;
+	}
+	return (pmd_t *)page_address(page);
+}
 #endif
 
 #ifndef __HAVE_ARCH_PMD_FREE
@@ -156,6 +174,16 @@  static inline pud_t *__pud_alloc_one(struct mm_struct *mm, unsigned long addr)
 	return (pud_t *)get_zeroed_page(gfp);
 }
 
+static inline pud_t *__pud_alloc_one_node(unsigned int nid,
+					  struct mm_struct *mm, unsigned long addr)
+{
+	gfp_t gfp = GFP_PGTABLE_USER;
+
+	if (mm == &init_mm)
+		gfp = GFP_PGTABLE_KERNEL;
+	return (pud_t *)get_zeroed_page_node(nid, gfp);
+}
+
 #ifndef __HAVE_ARCH_PUD_ALLOC_ONE
 /**
  * pud_alloc_one - allocate a page for PUD-level page table
@@ -170,6 +198,12 @@  static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
 	return __pud_alloc_one(mm, addr);
 }
+
+static inline pud_t *pud_alloc_one_node(unsigned int nid,
+					struct mm_struct *mm, unsigned long addr)
+{
+	return __pud_alloc_one_node(nid, mm, addr);
+}
 #endif
 
 static inline void __pud_free(struct mm_struct *mm, pud_t *pud)
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 665f06675c83..6ee0004b9774 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -288,6 +288,8 @@  static inline struct page *alloc_page_vma(gfp_t gfp,
 
 extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
 extern unsigned long get_zeroed_page(gfp_t gfp_mask);
+extern unsigned long __get_free_pages_node(unsigned int nid, gfp_t gfp_mask, unsigned int order);
+extern unsigned long get_zeroed_page_node(unsigned int nid, gfp_t gfp_mask);
 
 void *alloc_pages_exact(size_t size, gfp_t gfp_mask) __alloc_size(1);
 void free_pages_exact(void *virt, size_t size);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7d3460c7a480..dc8f4a57d8b1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4537,6 +4537,24 @@  unsigned long get_zeroed_page(gfp_t gfp_mask)
 }
 EXPORT_SYMBOL(get_zeroed_page);
 
+unsigned long __get_free_pages_node(unsigned int nid, gfp_t gfp_mask,
+		unsigned int order)
+{
+	struct page *page;
+
+	page = alloc_pages_node(nid, gfp_mask & ~__GFP_HIGHMEM, order);
+	if (!page)
+		return 0;
+	return (unsigned long) page_address(page);
+}
+EXPORT_SYMBOL(__get_free_pages_node);
+
+unsigned long get_zeroed_page_node(unsigned int nid, gfp_t gfp_mask)
+{
+	return __get_free_pages_node(nid, gfp_mask | __GFP_ZERO, 0);
+}
+EXPORT_SYMBOL(get_zeroed_page_node);
+
 /**
  * __free_pages - Free pages allocated with alloc_pages().
  * @page: The page pointer returned from alloc_pages().