@@ -251,7 +251,9 @@ static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
}
static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
- struct mm_struct *mm)
+ struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ unsigned long uaddr)
{
inc_mm_tlb_gen(mm);
cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
@@ -642,12 +642,14 @@ void try_to_unmap_flush_dirty(void)
#define TLB_FLUSH_BATCH_PENDING_LARGE \
(TLB_FLUSH_BATCH_PENDING_MASK / 2)
-static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
+static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable,
+ struct vm_area_struct *vma,
+ unsigned long uaddr)
{
struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
int batch, nbatch;
- arch_tlbbatch_add_mm(&tlb_ubc->arch, mm);
+ arch_tlbbatch_add_mm(&tlb_ubc->arch, mm, vma, uaddr);
tlb_ubc->flush_required = true;
/*
@@ -737,7 +739,9 @@ void flush_tlb_batched_pending(struct mm_struct *mm)
}
}
#else
-static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
+static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable,
+ struct vm_area_struct *vma,
+ unsigned long uaddr)
{
}
@@ -1600,7 +1604,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
*/
pteval = ptep_get_and_clear(mm, address, pvmw.pte);
- set_tlb_ubc_flush_pending(mm, pte_dirty(pteval));
+ set_tlb_ubc_flush_pending(mm, pte_dirty(pteval), vma, address);
} else {
pteval = ptep_clear_flush(vma, address, pvmw.pte);
}