Message ID | 20230911131224.61924-2-alexghiti@rivosinc.com (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | riscv: tlb flush improvements | expand |
On Mon, Sep 11, 2023 at 2:13 PM Alexandre Ghiti <alexghiti@rivosinc.com> wrote: > > For now, flush_tlb() simply calls flush_tlb_mm() which results in a > flush of the whole TLB. So let's use mmu_gather fields to provide a more > fine-grained flush of the TLB. > > Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com> > Reviewed-by: Andrew Jones <ajones@ventanamicro.com> > --- > arch/riscv/include/asm/tlb.h | 8 +++++++- > arch/riscv/include/asm/tlbflush.h | 3 +++ > arch/riscv/mm/tlbflush.c | 7 +++++++ > 3 files changed, 17 insertions(+), 1 deletion(-) > Tested-by: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com> # On RZ/Five SMARC Cheers, Prabhakar > diff --git a/arch/riscv/include/asm/tlb.h b/arch/riscv/include/asm/tlb.h > index 120bcf2ed8a8..1eb5682b2af6 100644 > --- a/arch/riscv/include/asm/tlb.h > +++ b/arch/riscv/include/asm/tlb.h > @@ -15,7 +15,13 @@ static void tlb_flush(struct mmu_gather *tlb); > > static inline void tlb_flush(struct mmu_gather *tlb) > { > - flush_tlb_mm(tlb->mm); > +#ifdef CONFIG_MMU > + if (tlb->fullmm || tlb->need_flush_all) > + flush_tlb_mm(tlb->mm); > + else > + flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, > + tlb_get_unmap_size(tlb)); > +#endif > } > > #endif /* _ASM_RISCV_TLB_H */ > diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h > index a09196f8de68..f5c4fb0ae642 100644 > --- a/arch/riscv/include/asm/tlbflush.h > +++ b/arch/riscv/include/asm/tlbflush.h > @@ -32,6 +32,8 @@ static inline void local_flush_tlb_page(unsigned long addr) > #if defined(CONFIG_SMP) && defined(CONFIG_MMU) > void flush_tlb_all(void); > void flush_tlb_mm(struct mm_struct *mm); > +void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, > + unsigned long end, unsigned int page_size); > void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr); > void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, > unsigned long end); > @@ -52,6 +54,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, > } > > #define flush_tlb_mm(mm) flush_tlb_all() > +#define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all() > #endif /* !CONFIG_SMP || !CONFIG_MMU */ > > /* Flush a range of kernel pages */ > diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c > index 77be59aadc73..fa03289853d8 100644 > --- a/arch/riscv/mm/tlbflush.c > +++ b/arch/riscv/mm/tlbflush.c > @@ -132,6 +132,13 @@ void flush_tlb_mm(struct mm_struct *mm) > __flush_tlb_range(mm, 0, -1, PAGE_SIZE); > } > > +void flush_tlb_mm_range(struct mm_struct *mm, > + unsigned long start, unsigned long end, > + unsigned int page_size) > +{ > + __flush_tlb_range(mm, start, end - start, page_size); > +} > + > void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) > { > __flush_tlb_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE); > -- > 2.39.2 >
On 2023-09-11 8:12 AM, Alexandre Ghiti wrote: > For now, flush_tlb() simply calls flush_tlb_mm() which results in a s/flush_tlb/tlb_flush/ here and in the subject. Otherwise: Reviewed-by: Samuel Holland <samuel.holland@sifive.com> > flush of the whole TLB. So let's use mmu_gather fields to provide a more > fine-grained flush of the TLB. > > Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com> > Reviewed-by: Andrew Jones <ajones@ventanamicro.com> > --- > arch/riscv/include/asm/tlb.h | 8 +++++++- > arch/riscv/include/asm/tlbflush.h | 3 +++ > arch/riscv/mm/tlbflush.c | 7 +++++++ > 3 files changed, 17 insertions(+), 1 deletion(-) > > diff --git a/arch/riscv/include/asm/tlb.h b/arch/riscv/include/asm/tlb.h > index 120bcf2ed8a8..1eb5682b2af6 100644 > --- a/arch/riscv/include/asm/tlb.h > +++ b/arch/riscv/include/asm/tlb.h > @@ -15,7 +15,13 @@ static void tlb_flush(struct mmu_gather *tlb); > > static inline void tlb_flush(struct mmu_gather *tlb) > { > - flush_tlb_mm(tlb->mm); > +#ifdef CONFIG_MMU > + if (tlb->fullmm || tlb->need_flush_all) > + flush_tlb_mm(tlb->mm); > + else > + flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, > + tlb_get_unmap_size(tlb)); > +#endif > } > > #endif /* _ASM_RISCV_TLB_H */ > diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h > index a09196f8de68..f5c4fb0ae642 100644 > --- a/arch/riscv/include/asm/tlbflush.h > +++ b/arch/riscv/include/asm/tlbflush.h > @@ -32,6 +32,8 @@ static inline void local_flush_tlb_page(unsigned long addr) > #if defined(CONFIG_SMP) && defined(CONFIG_MMU) > void flush_tlb_all(void); > void flush_tlb_mm(struct mm_struct *mm); > +void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, > + unsigned long end, unsigned int page_size); > void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr); > void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, > unsigned long end); > @@ -52,6 +54,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, > } > > #define flush_tlb_mm(mm) flush_tlb_all() > +#define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all() > #endif /* !CONFIG_SMP || !CONFIG_MMU */ > > /* Flush a range of kernel pages */ > diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c > index 77be59aadc73..fa03289853d8 100644 > --- a/arch/riscv/mm/tlbflush.c > +++ b/arch/riscv/mm/tlbflush.c > @@ -132,6 +132,13 @@ void flush_tlb_mm(struct mm_struct *mm) > __flush_tlb_range(mm, 0, -1, PAGE_SIZE); > } > > +void flush_tlb_mm_range(struct mm_struct *mm, > + unsigned long start, unsigned long end, > + unsigned int page_size) > +{ > + __flush_tlb_range(mm, start, end - start, page_size); > +} > + > void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) > { > __flush_tlb_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE);
Hi Samuel, On Mon, Oct 9, 2023 at 7:53 PM Samuel Holland <samuel.holland@sifive.com> wrote: > > On 2023-09-11 8:12 AM, Alexandre Ghiti wrote: > > For now, flush_tlb() simply calls flush_tlb_mm() which results in a > > s/flush_tlb/tlb_flush/ here and in the subject. > > Otherwise: > Reviewed-by: Samuel Holland <samuel.holland@sifive.com> Ahah good catch, thanks for that and the RB! Alex > > > flush of the whole TLB. So let's use mmu_gather fields to provide a more > > fine-grained flush of the TLB. > > > > Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com> > > Reviewed-by: Andrew Jones <ajones@ventanamicro.com> > > --- > > arch/riscv/include/asm/tlb.h | 8 +++++++- > > arch/riscv/include/asm/tlbflush.h | 3 +++ > > arch/riscv/mm/tlbflush.c | 7 +++++++ > > 3 files changed, 17 insertions(+), 1 deletion(-) > > > > diff --git a/arch/riscv/include/asm/tlb.h b/arch/riscv/include/asm/tlb.h > > index 120bcf2ed8a8..1eb5682b2af6 100644 > > --- a/arch/riscv/include/asm/tlb.h > > +++ b/arch/riscv/include/asm/tlb.h > > @@ -15,7 +15,13 @@ static void tlb_flush(struct mmu_gather *tlb); > > > > static inline void tlb_flush(struct mmu_gather *tlb) > > { > > - flush_tlb_mm(tlb->mm); > > +#ifdef CONFIG_MMU > > + if (tlb->fullmm || tlb->need_flush_all) > > + flush_tlb_mm(tlb->mm); > > + else > > + flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, > > + tlb_get_unmap_size(tlb)); > > +#endif > > } > > > > #endif /* _ASM_RISCV_TLB_H */ > > diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h > > index a09196f8de68..f5c4fb0ae642 100644 > > --- a/arch/riscv/include/asm/tlbflush.h > > +++ b/arch/riscv/include/asm/tlbflush.h > > @@ -32,6 +32,8 @@ static inline void local_flush_tlb_page(unsigned long addr) > > #if defined(CONFIG_SMP) && defined(CONFIG_MMU) > > void flush_tlb_all(void); > > void flush_tlb_mm(struct mm_struct *mm); > > +void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, > > + unsigned long end, unsigned int page_size); > > void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr); > > void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, > > unsigned long end); > > @@ -52,6 +54,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, > > } > > > > #define flush_tlb_mm(mm) flush_tlb_all() > > +#define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all() > > #endif /* !CONFIG_SMP || !CONFIG_MMU */ > > > > /* Flush a range of kernel pages */ > > diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c > > index 77be59aadc73..fa03289853d8 100644 > > --- a/arch/riscv/mm/tlbflush.c > > +++ b/arch/riscv/mm/tlbflush.c > > @@ -132,6 +132,13 @@ void flush_tlb_mm(struct mm_struct *mm) > > __flush_tlb_range(mm, 0, -1, PAGE_SIZE); > > } > > > > +void flush_tlb_mm_range(struct mm_struct *mm, > > + unsigned long start, unsigned long end, > > + unsigned int page_size) > > +{ > > + __flush_tlb_range(mm, start, end - start, page_size); > > +} > > + > > void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) > > { > > __flush_tlb_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE); >
diff --git a/arch/riscv/include/asm/tlb.h b/arch/riscv/include/asm/tlb.h index 120bcf2ed8a8..1eb5682b2af6 100644 --- a/arch/riscv/include/asm/tlb.h +++ b/arch/riscv/include/asm/tlb.h @@ -15,7 +15,13 @@ static void tlb_flush(struct mmu_gather *tlb); static inline void tlb_flush(struct mmu_gather *tlb) { - flush_tlb_mm(tlb->mm); +#ifdef CONFIG_MMU + if (tlb->fullmm || tlb->need_flush_all) + flush_tlb_mm(tlb->mm); + else + flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, + tlb_get_unmap_size(tlb)); +#endif } #endif /* _ASM_RISCV_TLB_H */ diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h index a09196f8de68..f5c4fb0ae642 100644 --- a/arch/riscv/include/asm/tlbflush.h +++ b/arch/riscv/include/asm/tlbflush.h @@ -32,6 +32,8 @@ static inline void local_flush_tlb_page(unsigned long addr) #if defined(CONFIG_SMP) && defined(CONFIG_MMU) void flush_tlb_all(void); void flush_tlb_mm(struct mm_struct *mm); +void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, + unsigned long end, unsigned int page_size); void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr); void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); @@ -52,6 +54,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, } #define flush_tlb_mm(mm) flush_tlb_all() +#define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all() #endif /* !CONFIG_SMP || !CONFIG_MMU */ /* Flush a range of kernel pages */ diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c index 77be59aadc73..fa03289853d8 100644 --- a/arch/riscv/mm/tlbflush.c +++ b/arch/riscv/mm/tlbflush.c @@ -132,6 +132,13 @@ void flush_tlb_mm(struct mm_struct *mm) __flush_tlb_range(mm, 0, -1, PAGE_SIZE); } +void flush_tlb_mm_range(struct mm_struct *mm, + unsigned long start, unsigned long end, + unsigned int page_size) +{ + __flush_tlb_range(mm, start, end - start, page_size); +} + void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) { __flush_tlb_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE);