diff mbox series

[RFC,17/20] mm/tlb: updated completed deferred TLB flush conditionally

Message ID 20210131001132.3368247-18-namit@vmware.com (mailing list archive)
State New, archived
Headers show
Series TLB batching consolidation and enhancements | expand

Commit Message

Nadav Amit Jan. 31, 2021, 12:11 a.m. UTC
From: Nadav Amit <namit@vmware.com>

If all the deferred TLB flushes were completed, there is no need to
update the completed TLB flush. This update requires an atomic cmpxchg,
so we would like to skip it.

To do so, save for each mm the last TLB generation in which TLB flushes
were deferred. While saving this information requires another atomic
cmpxchg, assume that deferred TLB flushes are less frequent than TLB
flushes.

Signed-off-by: Nadav Amit <namit@vmware.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will@kernel.org>
Cc: Yu Zhao <yuzhao@google.com>
Cc: x86@kernel.org
---
 include/asm-generic/tlb.h | 23 ++++++++++++++++++-----
 include/linux/mm_types.h  |  5 +++++
 2 files changed, 23 insertions(+), 5 deletions(-)
diff mbox series

Patch

diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 74dbb56d816d..a41af03fbede 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -536,6 +536,14 @@  static inline void tlb_update_generation(atomic64_t *gen, u64 new_gen)
 
 static inline void mark_mm_tlb_gen_done(struct mm_struct *mm, u64 gen)
 {
+	/*
+	 * If we all the deferred TLB generations were completed, we can skip
+	 * the update of tlb_gen_completed and save a few cycles on cmpxchg.
+	 */
+	if (atomic64_read(&mm->tlb_gen_deferred) ==
+	    atomic64_read(&mm->tlb_gen_completed))
+		return;
+
 	/*
 	 * Update the completed generation to the new generation if the new
 	 * generation is greater than the previous one.
@@ -546,7 +554,7 @@  static inline void mark_mm_tlb_gen_done(struct mm_struct *mm, u64 gen)
 static inline void read_defer_tlb_flush_gen(struct mmu_gather *tlb)
 {
 	struct mm_struct *mm = tlb->mm;
-	u64 mm_gen;
+	u64 mm_gen, new_gen;
 
 	/*
 	 * Any change of PTE before calling __track_deferred_tlb_flush() must be
@@ -567,11 +575,16 @@  static inline void read_defer_tlb_flush_gen(struct mmu_gather *tlb)
 	 * correctness issues, and should not induce overheads, since anyhow in
 	 * TLB storms it is better to perform full TLB flush.
 	 */
-	if (mm_gen != tlb->defer_gen) {
-		VM_BUG_ON(mm_gen < tlb->defer_gen);
+	if (mm_gen == tlb->defer_gen)
+		return;
 
-		tlb->defer_gen = inc_mm_tlb_gen(mm);
-	}
+	VM_BUG_ON(mm_gen < tlb->defer_gen);
+
+	new_gen = inc_mm_tlb_gen(mm);
+	tlb->defer_gen = new_gen;
+
+	/* Update mm->tlb_gen_deferred */
+	tlb_update_generation(&mm->tlb_gen_deferred, new_gen);
 }
 
 #ifndef CONFIG_PER_TABLE_DEFERRED_FLUSHES
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index cae9e8bbf8e6..4122a9b8b56f 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -578,6 +578,11 @@  struct mm_struct {
 		 */
 		atomic64_t tlb_gen;
 
+		/*
+		 * The last TLB generation which was deferred.
+		 */
+		atomic64_t tlb_gen_deferred;
+
 		/*
 		 * TLB generation which is guarnateed to be flushed, including
 		 * all the PTE changes that were performed before tlb_gen was