@@ -240,6 +240,18 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT, false);
}
+static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm)
+{
+ bool should_defer = false;
+
+ /* If remote CPUs need to be flushed then defer batch the flush */
+ if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids)
+ should_defer = true;
+ put_cpu();
+
+ return should_defer;
+}
+
static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
{
/*
@@ -686,17 +686,10 @@ static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
*/
static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
{
- bool should_defer = false;
-
if (!(flags & TTU_BATCH_FLUSH))
return false;
- /* If remote CPUs need to be flushed then defer batch the flush */
- if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids)
- should_defer = true;
- put_cpu();
-
- return should_defer;
+ return arch_tlbbatch_should_defer(mm);
}
/*