@@ -63,5 +63,9 @@ typedef struct {
.lock = __MUTEX_INITIALIZER(mm.context.lock), \
}
+/* On x86, mm_cpumask(mm) contains all CPUs that might be lazily using mm */
+#define for_each_possible_lazymm_cpu(cpu, mm) \
+ for_each_cpu((cpu), mm_cpumask((mm)))
+
#endif /* _ASM_X86_MMU_H */
@@ -706,7 +706,9 @@ temp_mm_state_t use_temporary_mm(struct mm_struct *mm)
/*
* Make sure not to be in TLB lazy mode, as otherwise we'll end up
* with a stale address space WITHOUT being in lazy mode after
- * restoring the previous mm.
+ * restoring the previous mm. Additionally, once we switch mms,
+ * for_each_possible_lazymm_cpu() will no longer report this CPU,
+ * so a lazymm pin wouldn't work.
*/
if (this_cpu_read(cpu_tlbstate_shared.is_lazy))
unlazy_mm_irqs_off();
Now that x86 does not switch away from a lazy mm behind the scheduler's back and thus clear a CPU from mm_cpumask() that the scheduler thinks is lazy, x86 can use mm_cpumask() to optimize for_each_possible_lazymm_cpu(). Signed-off-by: Andy Lutomirski <luto@kernel.org> --- arch/x86/include/asm/mmu.h | 4 ++++ arch/x86/mm/tlb.c | 4 +++- 2 files changed, 7 insertions(+), 1 deletion(-)