@@ -8,6 +8,7 @@
#include <linux/gfp.h>
#include <linux/highmem.h>
#include <linux/slab.h>
+#include <linux/pte_ref.h>
#include <asm/cp15.h>
#include <asm/pgalloc.h>
@@ -14,6 +14,7 @@
#include <linux/pagemap.h>
#include <linux/err.h>
#include <linux/sysctl.h>
+#include <linux/pte_ref.h>
#include <asm/mman.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/sysctl.h>
#include <linux/log2.h>
+#include <linux/pte_ref.h>
#include <asm/mman.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
@@ -13,6 +13,7 @@
#include <linux/hugetlb.h>
#include <linux/pagemap.h>
#include <linux/sysctl.h>
+#include <linux/pte_ref.h>
#include <asm/mman.h>
#include <asm/tlb.h>
@@ -19,6 +19,7 @@
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/kmemleak.h>
+#include <linux/pte_ref.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/setup.h>
@@ -18,6 +18,7 @@
#include <linux/ksm.h>
#include <linux/mman.h>
#include <linux/pgtable.h>
+#include <linux/pte_ref.h>
#include <asm/pgalloc.h>
#include <asm/gmap.h>
@@ -18,6 +18,7 @@
#include <linux/sysctl.h>
#include <linux/ksm.h>
#include <linux/mman.h>
+#include <linux/pte_ref.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
@@ -15,6 +15,7 @@
#include <linux/hugetlb.h>
#include <linux/pagemap.h>
#include <linux/sysctl.h>
+#include <linux/pte_ref.h>
#include <asm/mman.h>
#include <asm/tlb.h>
@@ -11,6 +11,7 @@
#include <linux/hugetlb.h>
#include <linux/pagemap.h>
#include <linux/sysctl.h>
+#include <linux/pte_ref.h>
#include <asm/mman.h>
#include <asm/pgalloc.h>
@@ -2245,7 +2245,6 @@ static inline void mm_inc_nr_ptes(struct mm_struct *mm) {}
static inline void mm_dec_nr_ptes(struct mm_struct *mm) {}
#endif
-int __pte_alloc(struct mm_struct *mm, pmd_t *pmd);
int __pte_alloc_kernel(pmd_t *pmd);
#if defined(CONFIG_MMU)
@@ -2371,15 +2370,6 @@ static inline void pgtable_pte_page_dtor(struct page *page)
pte_unmap(pte); \
} while (0)
-#define pte_alloc(mm, pmd) (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd))
-
-#define pte_alloc_map(mm, pmd, address) \
- (pte_alloc(mm, pmd) ? NULL : pte_offset_map(pmd, address))
-
-#define pte_alloc_map_lock(mm, pmd, address, ptlp) \
- (pte_alloc(mm, pmd) ? \
- NULL : pte_offset_map_lock(mm, pmd, address, ptlp))
-
#define pte_alloc_kernel(pmd, address) \
((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \
NULL: pte_offset_kernel(pmd, address))
@@ -2471,7 +2461,6 @@ static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
return ptl;
}
-extern void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
extern void __init pagecache_init(void);
extern void __init free_area_init_memoryless_node(int nid);
extern void free_initmem(void);
new file mode 100644
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Free user PTE page table pages
+ *
+ * Copyright (c) 2021, ByteDance. All rights reserved.
+ *
+ * Author: Qi Zheng <zhengqi.arch@bytedance.com>
+ */
+#ifndef _LINUX_PTE_REF_H
+#define _LINUX_PTE_REF_H
+
+#include <linux/mm.h>
+#include <linux/pgtable.h>
+#include <asm/pgalloc.h>
+
+void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
+int __pte_alloc(struct mm_struct *mm, pmd_t *pmd);
+
+#define pte_alloc(mm, pmd) (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd))
+
+#define pte_alloc_map(mm, pmd, address) \
+ (pte_alloc(mm, pmd) ? NULL : pte_offset_map(pmd, address))
+
+#define pte_alloc_map_lock(mm, pmd, address, ptlp) \
+ (pte_alloc(mm, pmd) ? \
+ NULL : pte_offset_map_lock(mm, pmd, address, ptlp))
+
+#endif
+
@@ -38,7 +38,8 @@ mmu-y := nommu.o
mmu-$(CONFIG_MMU) := highmem.o memory.o mincore.o \
mlock.o mmap.o mmu_gather.o mprotect.o mremap.o \
msync.o page_vma_mapped.o pagewalk.o \
- pgtable-generic.o rmap.o vmalloc.o ioremap.o
+ pgtable-generic.o rmap.o vmalloc.o ioremap.o \
+ pte_ref.o
ifdef CONFIG_CROSS_MEMORY_ATTACH
@@ -11,6 +11,7 @@
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/tracepoint-defs.h>
+#include <linux/pte_ref.h>
/*
* The set of flags that only affect watermark checking and reclaim
@@ -433,44 +433,6 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
}
}
-void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte)
-{
- spinlock_t *ptl = pmd_lock(mm, pmd);
-
- if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
- mm_inc_nr_ptes(mm);
- /*
- * Ensure all pte setup (eg. pte page lock and page clearing) are
- * visible before the pte is made visible to other CPUs by being
- * put into page tables.
- *
- * The other side of the story is the pointer chasing in the page
- * table walking code (when walking the page table without locking;
- * ie. most of the time). Fortunately, these data accesses consist
- * of a chain of data-dependent loads, meaning most CPUs (alpha
- * being the notable exception) will already guarantee loads are
- * seen in-order. See the alpha page table accessors for the
- * smp_rmb() barriers in page table walking code.
- */
- smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
- pmd_populate(mm, pmd, *pte);
- *pte = NULL;
- }
- spin_unlock(ptl);
-}
-
-int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
-{
- pgtable_t new = pte_alloc_one(mm);
- if (!new)
- return -ENOMEM;
-
- pmd_install(mm, pmd, &new);
- if (new)
- pte_free(mm, new);
- return 0;
-}
-
int __pte_alloc_kernel(pmd_t *pmd)
{
pte_t *new = pte_alloc_one_kernel(&init_mm);
new file mode 100644
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Free user PTE page table pages
+ *
+ * Copyright (c) 2021, ByteDance. All rights reserved.
+ *
+ * Author: Qi Zheng <zhengqi.arch@bytedance.com>
+ */
+
+#include <linux/pte_ref.h>
+
+void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte)
+{
+ spinlock_t *ptl = pmd_lock(mm, pmd);
+
+ if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
+ mm_inc_nr_ptes(mm);
+ /*
+ * Ensure all pte setup (eg. pte page lock and page clearing) are
+ * visible before the pte is made visible to other CPUs by being
+ * put into page tables.
+ *
+ * The other side of the story is the pointer chasing in the page
+ * table walking code (when walking the page table without locking;
+ * ie. most of the time). Fortunately, these data accesses consist
+ * of a chain of data-dependent loads, meaning most CPUs (alpha
+ * being the notable exception) will already guarantee loads are
+ * seen in-order. See the alpha page table accessors for the
+ * smp_rmb() barriers in page table walking code.
+ */
+ smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
+ pmd_populate(mm, pmd, *pte);
+ *pte = NULL;
+ }
+ spin_unlock(ptl);
+}
+
+int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
+{
+ pgtable_t new = pte_alloc_one(mm);
+ if (!new)
+ return -ENOMEM;
+
+ pmd_install(mm, pmd, &new);
+ if (new)
+ pte_free(mm, new);
+ return 0;
+}
Subsequent patches will modify pte_alloc{,_map,_map_lock}(), which are the allocate function related to user PTE page table pages, so move those to a separate file in advance. This patch contains no functional changes, only some preparatory work. Signed-off-by: Qi Zheng <zhengqi.arch@bytedance.com> --- arch/arm/mm/pgd.c | 1 + arch/arm64/mm/hugetlbpage.c | 1 + arch/ia64/mm/hugetlbpage.c | 1 + arch/parisc/mm/hugetlbpage.c | 1 + arch/powerpc/mm/hugetlbpage.c | 1 + arch/s390/mm/gmap.c | 1 + arch/s390/mm/pgtable.c | 1 + arch/sh/mm/hugetlbpage.c | 1 + arch/sparc/mm/hugetlbpage.c | 1 + include/linux/mm.h | 11 ---------- include/linux/pte_ref.h | 29 ++++++++++++++++++++++++++ mm/Makefile | 3 ++- mm/internal.h | 1 + mm/memory.c | 38 ---------------------------------- mm/pte_ref.c | 48 +++++++++++++++++++++++++++++++++++++++++++ 15 files changed, 89 insertions(+), 50 deletions(-) create mode 100644 include/linux/pte_ref.h create mode 100644 mm/pte_ref.c