@@ -28,6 +28,7 @@
#include <linux/sched.h>
#include <linux/pgtable.h>
#include <linux/kasan.h>
+#include <linux/pte_ref.h>
struct mempolicy;
struct anon_vma;
@@ -2260,11 +2261,16 @@ static inline void pgtable_init(void)
static inline bool pgtable_pte_page_ctor(struct page *page)
{
- if (!ptlock_init(page))
+ if (!pte_ref_init(page))
return false;
+ if (!ptlock_init(page))
+ goto free_pte_ref;
__SetPageTable(page);
inc_lruvec_page_state(page, NR_PAGETABLE);
return true;
+free_pte_ref:
+ pte_ref_free(page);
+ return false;
}
static inline void pgtable_pte_page_dtor(struct page *page)
@@ -2272,6 +2278,7 @@ static inline void pgtable_pte_page_dtor(struct page *page)
ptlock_free(page);
__ClearPageTable(page);
dec_lruvec_page_state(page, NR_PAGETABLE);
+ pte_ref_free(page);
}
#define pte_alloc(mm, pmd) (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd))
@@ -153,6 +153,7 @@ struct page {
union {
struct mm_struct *pt_mm; /* x86 pgds only */
atomic_t pt_frag_refcount; /* powerpc */
+ struct percpu_ref *pte_ref; /* PTE page only */
};
#if ALLOC_SPLIT_PTLOCKS
spinlock_t *ptl;
new file mode 100644
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2022, ByteDance. All rights reserved.
+ *
+ * Author: Qi Zheng <zhengqi.arch@bytedance.com>
+ */
+
+#ifndef _LINUX_PTE_REF_H
+#define _LINUX_PTE_REF_H
+
+#ifdef CONFIG_FREE_USER_PTE
+
+bool pte_ref_init(pgtable_t pte);
+void pte_ref_free(pgtable_t pte);
+
+#else /* !CONFIG_FREE_USER_PTE */
+
+static inline bool pte_ref_init(pgtable_t pte)
+{
+ return true;
+}
+
+static inline void pte_ref_free(pgtable_t pte)
+{
+}
+
+#endif /* CONFIG_FREE_USER_PTE */
+
+#endif /* _LINUX_PTE_REF_H */
@@ -54,7 +54,7 @@ obj-y := filemap.o mempool.o oom_kill.o fadvise.o \
mm_init.o percpu.o slab_common.o \
compaction.o vmacache.o \
interval_tree.o list_lru.o workingset.o \
- debug.o gup.o mmap_lock.o $(mmu-y)
+ debug.o gup.o mmap_lock.o $(mmu-y) pte_ref.o
# Give 'page_alloc' its own module-parameter namespace
page-alloc-y := page_alloc.o
new file mode 100644
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2022, ByteDance. All rights reserved.
+ *
+ * Author: Qi Zheng <zhengqi.arch@bytedance.com>
+ */
+#include <linux/pgtable.h>
+#include <linux/pte_ref.h>
+#include <linux/percpu-refcount.h>
+#include <linux/slab.h>
+
+#ifdef CONFIG_FREE_USER_PTE
+
+static void no_op(struct percpu_ref *r) {}
+
+bool pte_ref_init(pgtable_t pte)
+{
+ struct percpu_ref *pte_ref;
+
+ pte_ref = kmalloc(sizeof(struct percpu_ref), GFP_KERNEL);
+ if (!pte_ref)
+ return false;
+ if (percpu_ref_init(pte_ref, no_op,
+ PERCPU_REF_ALLOW_REINIT, GFP_KERNEL) < 0)
+ goto free_ref;
+ /* We want to start with the refcount at zero */
+ percpu_ref_put(pte_ref);
+
+ pte->pte_ref = pte_ref;
+ return true;
+free_ref:
+ kfree(pte_ref);
+ return false;
+}
+
+void pte_ref_free(pgtable_t pte)
+{
+ struct percpu_ref *ref = pte->pte_ref;
+ if (!ref)
+ return;
+
+ pte->pte_ref = NULL;
+ percpu_ref_exit(ref);
+ kfree(ref);
+}
+
+#endif /* CONFIG_FREE_USER_PTE */
Now in order to pursue high performance, applications mostly use some high-performance user-mode memory allocators, such as jemalloc or tcmalloc. These memory allocators use madvise(MADV_DONTNEED or MADV_FREE) to release physical memory for the following reasons:: First of all, we should hold as few write locks of mmap_lock as possible, since the mmap_lock semaphore has long been a contention point in the memory management subsystem. The mmap()/munmap() hold the write lock, and the madvise(MADV_DONTNEED or MADV_FREE) hold the read lock, so using madvise() instead of munmap() to released physical memory can reduce the competition of the mmap_lock. Secondly, after using madvise() to release physical memory, there is no need to build vma and allocate page tables again when accessing the same virtual address again, which can also save some time. The following is the largest user PTE page table memory that can be allocated by a single user process in a 32-bit and a 64-bit system. +---------------------------+--------+---------+ | | 32-bit | 64-bit | +===========================+========+=========+ | user PTE page table pages | 3 MiB | 512 GiB | +---------------------------+--------+---------+ | user PMD page table pages | 3 KiB | 1 GiB | +---------------------------+--------+---------+ (for 32-bit, take 3G user address space, 4K page size as an example; for 64-bit, take 48-bit address width, 4K page size as an example.) After using madvise(), everything looks good, but as can be seen from the above table, a single process can create a large number of PTE page tables on a 64-bit system, since both of the MADV_DONTNEED and MADV_FREE will not release page table memory. And before the process exits or calls munmap(), the kernel cannot reclaim these pages even if these PTE page tables do not map anything. To fix the situation, this patchset introduces a percpu_ref for each user PTE page table page. The following people will hold a percpu_ref:: The !pte_none() entry, such as regular page table entry that map physical pages, or swap entry, or migrate entry, etc. Visitor to the PTE page table entries, such as page table walker. Any ``!pte_none()`` entry and visitor can be regarded as the user of its PTE page table page. When the percpu_ref is reduced to 0 (need to switch to atomic mode first to check), it means that no one is using the PTE page table page, then this free PTE page table page can be reclaimed at this time. Signed-off-by: Qi Zheng <zhengqi.arch@bytedance.com> --- include/linux/mm.h | 9 +++++++- include/linux/mm_types.h | 1 + include/linux/pte_ref.h | 29 +++++++++++++++++++++++++ mm/Makefile | 2 +- mm/pte_ref.c | 47 ++++++++++++++++++++++++++++++++++++++++ 5 files changed, 86 insertions(+), 2 deletions(-) create mode 100644 include/linux/pte_ref.h create mode 100644 mm/pte_ref.c