@@ -233,7 +233,7 @@ config X86
select HAVE_PCI
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
- select MMU_GATHER_RCU_TABLE_FREE if PARAVIRT
+ select MMU_GATHER_RCU_TABLE_FREE if PARAVIRT || FREE_USER_PTE
select HAVE_POSIX_CPU_TIMERS_TASK_WORK
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE if X86_64 && (UNWINDER_FRAME_POINTER || UNWINDER_ORC) && STACK_VALIDATION
@@ -18,7 +18,8 @@ int __pte_alloc_try_get(struct mm_struct *mm, pmd_t *pmd);
int __pte_alloc_get(struct mm_struct *mm, pmd_t *pmd);
#ifdef CONFIG_FREE_USER_PTE
-void free_pte_table(struct mm_struct *mm, pmd_t *pmdp, unsigned long addr);
+void free_pte_table(struct mmu_gather *tlb, struct mm_struct *mm, pmd_t *pmdp,
+ unsigned long addr);
static inline void pte_ref_init(pgtable_t pte, pmd_t *pmd, int count)
{
@@ -62,7 +63,6 @@ static inline bool pte_get_unless_zero(pmd_t *pmdp)
{
pgtable_t pte = pmd_pgtable(*pmdp);
- VM_BUG_ON(!PageTable(pte));
return atomic_inc_not_zero(&pte->pte_refcount);
}
@@ -97,8 +97,8 @@ static inline bool pte_try_get(pmd_t *pmdp)
* i_mmap_lock or when parallel threads are excluded by other means
* which can make @pmdp entry stable.
*/
-static inline void pte_put_many(struct mm_struct *mm, pmd_t *pmdp,
- unsigned long addr, unsigned int nr)
+static inline void pte_put_many_tlb(struct mmu_gather *tlb, struct mm_struct *mm,
+ pmd_t *pmdp, unsigned long addr, unsigned int nr)
{
pgtable_t pte = pmd_pgtable(*pmdp);
@@ -106,7 +106,19 @@ static inline void pte_put_many(struct mm_struct *mm, pmd_t *pmdp,
VM_BUG_ON(pmd_devmap_trans_unstable(pmdp));
VM_BUG_ON(pte->pmd != pmdp);
if (atomic_sub_and_test(nr, &pte->pte_refcount))
- free_pte_table(mm, pmdp, addr & PMD_MASK);
+ free_pte_table(tlb, mm, pmdp, addr & PMD_MASK);
+}
+
+static inline void pte_put_tlb(struct mmu_gather *tlb, struct mm_struct *mm,
+ pmd_t *pmdp, unsigned long addr)
+{
+ pte_put_many_tlb(tlb, mm, pmdp, addr, 1);
+}
+
+static inline void pte_put_many(struct mm_struct *mm, pmd_t *pmdp,
+ unsigned long addr, unsigned int nr)
+{
+ pte_put_many_tlb(NULL, mm, pmdp, addr, nr);
}
static inline void pte_put(struct mm_struct *mm, pmd_t *pmdp, unsigned long addr)
@@ -180,6 +192,16 @@ static inline bool pte_try_get(pmd_t *pmdp)
return true;
}
+static inline void pte_put_many_tlb(struct mmu_gather *tlb, struct mm_struct *mm,
+ pmd_t *pmdp, unsigned long addr, unsigned int nr)
+{
+}
+
+static inline void pte_put_tlb(struct mmu_gather *tlb, struct mm_struct *mm,
+ pmd_t *pmdp, unsigned long addr)
+{
+}
+
static inline void pte_put_many(struct mm_struct *mm, pmd_t *pmdp,
unsigned long addr, unsigned int value)
{
@@ -477,7 +477,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
arch_leave_lazy_mmu_mode();
pte_unmap_unlock(orig_pte, ptl);
- pte_put(vma->vm_mm, pmd, start);
+ pte_put_tlb(tlb, vma->vm_mm, pmd, start);
if (pageout)
reclaim_pages(&page_list);
cond_resched();
@@ -709,7 +709,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
arch_leave_lazy_mmu_mode();
pte_unmap_unlock(orig_pte, ptl);
if (nr_put)
- pte_put_many(mm, pmd, start, nr_put);
+ pte_put_many_tlb(tlb, mm, pmd, start, nr_put);
cond_resched();
next:
return 0;
@@ -1438,7 +1438,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
}
if (nr_put)
- pte_put_many(mm, pmd, start, nr_put);
+ pte_put_many_tlb(tlb, mm, pmd, start, nr_put);
return addr;
}
@@ -1485,7 +1485,7 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
goto next;
next = zap_pte_range(tlb, vma, pmd, addr, next, details);
- pte_put(tlb->mm, pmd, addr);
+ pte_put_tlb(tlb, tlb->mm, pmd, addr);
next:
cond_resched();
} while (pmd++, addr = next, addr != end);
@@ -134,42 +134,42 @@ static void __tlb_remove_table_free(struct mmu_table_batch *batch)
*
*/
-static void tlb_remove_table_smp_sync(void *arg)
+static void tlb_remove_table_rcu(struct rcu_head *head)
{
- /* Simply deliver the interrupt */
+ __tlb_remove_table_free(container_of(head, struct mmu_table_batch, rcu));
}
-static void tlb_remove_table_sync_one(void)
+static void tlb_remove_table_free(struct mmu_table_batch *batch)
{
- /*
- * This isn't an RCU grace period and hence the page-tables cannot be
- * assumed to be actually RCU-freed.
- *
- * It is however sufficient for software page-table walkers that rely on
- * IRQ disabling.
- */
- smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
+ call_rcu(&batch->rcu, tlb_remove_table_rcu);
}
-static void tlb_remove_table_rcu(struct rcu_head *head)
+static void tlb_remove_table_one_rcu(struct rcu_head *head)
{
- __tlb_remove_table_free(container_of(head, struct mmu_table_batch, rcu));
+ struct page *page = container_of(head, struct page, rcu_head);
+
+ __tlb_remove_table(page);
}
-static void tlb_remove_table_free(struct mmu_table_batch *batch)
+static void tlb_remove_table_one(void *table)
{
- call_rcu(&batch->rcu, tlb_remove_table_rcu);
+ pgtable_t page = (pgtable_t)table;
+
+ call_rcu(&page->rcu_head, tlb_remove_table_one_rcu);
}
#else /* !CONFIG_MMU_GATHER_RCU_TABLE_FREE */
-static void tlb_remove_table_sync_one(void) { }
-
static void tlb_remove_table_free(struct mmu_table_batch *batch)
{
__tlb_remove_table_free(batch);
}
+static void tlb_remove_table_one(void *table)
+{
+ __tlb_remove_table(table);
+}
+
#endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
/*
@@ -187,12 +187,6 @@ static inline void tlb_table_invalidate(struct mmu_gather *tlb)
}
}
-static void tlb_remove_table_one(void *table)
-{
- tlb_remove_table_sync_one();
- __tlb_remove_table(table);
-}
-
static void tlb_table_flush(struct mmu_gather *tlb)
{
struct mmu_table_batch **batch = &tlb->batch;
@@ -10,6 +10,7 @@
#include <linux/pte_ref.h>
#include <linux/hugetlb.h>
#include <asm/tlbflush.h>
+#include <asm/tlb.h>
#ifdef CONFIG_DEBUG_VM
static void pte_free_debug(pmd_t pmd)
@@ -34,7 +35,8 @@ static void pte_free_rcu(struct rcu_head *rcu)
__free_page(page);
}
-void free_pte_table(struct mm_struct *mm, pmd_t *pmdp, unsigned long addr)
+void free_pte_table(struct mmu_gather *tlb, struct mm_struct *mm,
+ pmd_t *pmdp, unsigned long addr)
{
struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
spinlock_t *ptl;
@@ -45,9 +47,13 @@ void free_pte_table(struct mm_struct *mm, pmd_t *pmdp, unsigned long addr)
spin_unlock(ptl);
pte_free_debug(pmd);
- flush_tlb_range(&vma, addr, addr + PMD_SIZE);
+ if (!tlb) {
+ flush_tlb_range(&vma, addr, addr + PMD_SIZE);
+ call_rcu(&pmd_pgtable(pmd)->rcu_head, pte_free_rcu);
+ } else {
+ pte_free_tlb(tlb, pmd_pgtable(pmd), addr);
+ }
mm_dec_nr_ptes(mm);
- call_rcu(&pmd_pgtable(pmd)->rcu_head, pte_free_rcu);
}
static inline void __pte_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte)
In unmap_region() and other paths, we can reuse @tlb to free PTE page table, which can reduce the number of tlb flush. Signed-off-by: Qi Zheng <zhengqi.arch@bytedance.com> --- arch/x86/Kconfig | 2 +- include/linux/pte_ref.h | 32 +++++++++++++++++++++++++++----- mm/madvise.c | 4 ++-- mm/memory.c | 4 ++-- mm/mmu_gather.c | 40 +++++++++++++++++----------------------- mm/pte_ref.c | 12 +++++++++--- 6 files changed, 58 insertions(+), 36 deletions(-)