@@ -252,6 +252,13 @@ extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
struct mmu_gather {
struct mm_struct *mm;
+ /*
+ * The current vma. This information is changing upon tlb_start_vma()
+ * and is therefore only valid between tlb_start_vma() and tlb_end_vma()
+ * calls.
+ */
+ struct vm_area_struct *vma;
+
#ifdef CONFIG_MMU_GATHER_TABLE_FREE
struct mmu_table_batch *batch;
#endif
@@ -283,12 +290,6 @@ struct mmu_gather {
unsigned int cleared_puds : 1;
unsigned int cleared_p4ds : 1;
- /*
- * tracks VM_EXEC | VM_HUGETLB in tlb_start_vma
- */
- unsigned int vma_exec : 1;
- unsigned int vma_huge : 1;
-
unsigned int batch_count;
#ifndef CONFIG_MMU_GATHER_NO_GATHER
@@ -352,10 +353,6 @@ static inline void tlb_flush(struct mmu_gather *tlb)
flush_tlb_mm(tlb->mm);
}
-static inline void
-tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
-
-#define tlb_end_vma tlb_end_vma
static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
#else /* CONFIG_MMU_GATHER_NO_RANGE */
@@ -364,7 +361,7 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm
/*
* When an architecture does not provide its own tlb_flush() implementation
- * but does have a reasonably efficient flush_vma_range() implementation
+ * but does have a reasonably efficient flush_tlb_range() implementation
* use that.
*/
static inline void tlb_flush(struct mmu_gather *tlb)
@@ -372,38 +369,20 @@ static inline void tlb_flush(struct mmu_gather *tlb)
if (tlb->fullmm || tlb->need_flush_all) {
flush_tlb_mm(tlb->mm);
} else if (tlb->end) {
- struct vm_area_struct vma = {
- .vm_mm = tlb->mm,
- .vm_flags = (tlb->vma_exec ? VM_EXEC : 0) |
- (tlb->vma_huge ? VM_HUGETLB : 0),
- };
-
- flush_tlb_range(&vma, tlb->start, tlb->end);
+ VM_BUG_ON(!tlb->vma);
+ flush_tlb_range(tlb->vma, tlb->start, tlb->end);
}
}
static inline void
-tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
+tlb_update_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
- /*
- * flush_tlb_range() implementations that look at VM_HUGETLB (tile,
- * mips-4k) flush only large pages.
- *
- * flush_tlb_range() implementations that flush I-TLB also flush D-TLB
- * (tile, xtensa, arm), so it's ok to just add VM_EXEC to an existing
- * range.
- *
- * We rely on tlb_end_vma() to issue a flush, such that when we reset
- * these values the batch is empty.
- */
- tlb->vma_huge = is_vm_hugetlb_page(vma);
- tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
+ tlb->vma = vma;
}
-
#else
static inline void
-tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
+tlb_update_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
#endif
@@ -487,17 +466,17 @@ static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *
if (tlb->fullmm)
return;
- tlb_update_vma_flags(tlb, vma);
+ tlb_update_vma(tlb, vma);
flush_cache_range(vma, vma->vm_start, vma->vm_end);
}
static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
if (tlb->fullmm)
- return;
+ goto out;
if (IS_ENABLED(CONFIG_ARCH_WANT_AGGRESSIVE_TLB_FLUSH_BATCHING))
- return;
+ goto out;
/*
* Do a TLB flush and reset the range at VMA boundaries; this avoids
@@ -506,6 +485,9 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm
* this.
*/
tlb_flush_mmu_tlbonly(tlb);
+out:
+ /* Reset the VMA as a precaution. */
+ tlb_update_vma(tlb, NULL);
}
#ifdef CONFIG_ARCH_HAS_TLB_GENERATIONS