Message ID | 1622970249-50770-4-git-send-email-guoren@kernel.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | riscv: Add DMA_COHERENT support for Allwinner D1 | expand |
On Sun, Jun 06, 2021 at 09:03:58AM +0000, guoren@kernel.org wrote: > +static inline void local_flush_tlb_all_asid(unsigned long asid) > +{ > + __asm__ __volatile__ ("sfence.vma x0, %0" > + : > + : "r" (asid) > + : "memory"); > +} > + > +static inline void local_flush_tlb_range_asid(unsigned long start, > + unsigned long size, unsigned long asid) > +{ > + unsigned long tmp, end = ALIGN(start + size, PAGE_SIZE); > + > + for (tmp = start & PAGE_MASK; tmp < end; tmp += PAGE_SIZE) { > + __asm__ __volatile__ ("sfence.vma %0, %1" > + : > + : "r" (tmp), "r" (asid) > + : "memory"); > + } No need to expose these in a header. > +static void __sbi_tlb_flush_range_asid(struct cpumask *cmask, > + unsigned long start, > + unsigned long size, > + unsigned long asid) > +{ > + struct cpumask hmask; > + unsigned int cpuid; > + > + if (cpumask_empty(cmask)) > + return; > + > + cpuid = get_cpu(); > + > + if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) { > + if (size == -1) > + local_flush_tlb_all_asid(asid); > + else > + local_flush_tlb_range_asid(start, size, asid); > + } else { > + riscv_cpuid_to_hartid_mask(cmask, &hmask); > + sbi_remote_sfence_vma_asid(cpumask_bits(&hmask), > + start, size, asid); > + } > + > + put_cpu(); > +} Still no need to duplicate most of this logic. Also please document why this uses a different tradeoff for the flush all logic compared to the non-ASID path. > + > void flush_tlb_mm(struct mm_struct *mm) > { > - __sbi_tlb_flush_range(mm_cpumask(mm), 0, -1); > + if (static_branch_unlikely(&use_asid_allocator)) > + __sbi_tlb_flush_range_asid(mm_cpumask(mm), 0, -1, > + atomic_long_read(&mm->context.id)); > + else > + __sbi_tlb_flush_range(mm_cpumask(mm), 0, -1); > } > > void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) > { > - __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), addr, PAGE_SIZE); > + if (static_branch_unlikely(&use_asid_allocator)) > + __sbi_tlb_flush_range_asid(mm_cpumask(vma->vm_mm), addr, PAGE_SIZE, > + atomic_long_read(&vma->vm_mm->context.id)); > + else > + __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), addr, PAGE_SIZE); > } > > void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, > unsigned long end) > { > - __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), start, end - start); > + if (static_branch_unlikely(&use_asid_allocator)) > + __sbi_tlb_flush_range_asid(mm_cpumask(vma->vm_mm), start, end - start, > + atomic_long_read(&vma->vm_mm->context.id)); > + else > + __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), start, end - start); Various overly long lines (which are trivially avoided when doing the right thing from the beginning).
diff --git a/arch/riscv/include/asm/mmu_context.h b/arch/riscv/include/asm/mmu_context.h index b065941..7030837 100644 --- a/arch/riscv/include/asm/mmu_context.h +++ b/arch/riscv/include/asm/mmu_context.h @@ -33,6 +33,8 @@ static inline int init_new_context(struct task_struct *tsk, return 0; } +DECLARE_STATIC_KEY_FALSE(use_asid_allocator); + #include <asm-generic/mmu_context.h> #endif /* _ASM_RISCV_MMU_CONTEXT_H */ diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h index c84218a..894cf75 100644 --- a/arch/riscv/include/asm/tlbflush.h +++ b/arch/riscv/include/asm/tlbflush.h @@ -22,9 +22,31 @@ static inline void local_flush_tlb_page(unsigned long addr) { ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory")); } + +static inline void local_flush_tlb_all_asid(unsigned long asid) +{ + __asm__ __volatile__ ("sfence.vma x0, %0" + : + : "r" (asid) + : "memory"); +} + +static inline void local_flush_tlb_range_asid(unsigned long start, + unsigned long size, unsigned long asid) +{ + unsigned long tmp, end = ALIGN(start + size, PAGE_SIZE); + + for (tmp = start & PAGE_MASK; tmp < end; tmp += PAGE_SIZE) { + __asm__ __volatile__ ("sfence.vma %0, %1" + : + : "r" (tmp), "r" (asid) + : "memory"); + } +} #else /* CONFIG_MMU */ #define local_flush_tlb_all() do { } while (0) #define local_flush_tlb_page(addr) do { } while (0) +#define local_flush_tlb_range_asid(addr) do { } while (0) #endif /* CONFIG_MMU */ #if defined(CONFIG_SMP) && defined(CONFIG_MMU) diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c index 68aa312..45c1b04 100644 --- a/arch/riscv/mm/context.c +++ b/arch/riscv/mm/context.c @@ -18,7 +18,7 @@ #ifdef CONFIG_MMU -static DEFINE_STATIC_KEY_FALSE(use_asid_allocator); +DEFINE_STATIC_KEY_FALSE(use_asid_allocator); static unsigned long asid_bits; static unsigned long num_asids; diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c index 720b443..87b4e52 100644 --- a/arch/riscv/mm/tlbflush.c +++ b/arch/riscv/mm/tlbflush.c @@ -4,6 +4,7 @@ #include <linux/smp.h> #include <linux/sched.h> #include <asm/sbi.h> +#include <asm/mmu_context.h> void flush_tlb_all(void) { @@ -39,18 +40,57 @@ static void __sbi_tlb_flush_range(struct cpumask *cmask, unsigned long start, put_cpu(); } +static void __sbi_tlb_flush_range_asid(struct cpumask *cmask, + unsigned long start, + unsigned long size, + unsigned long asid) +{ + struct cpumask hmask; + unsigned int cpuid; + + if (cpumask_empty(cmask)) + return; + + cpuid = get_cpu(); + + if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) { + if (size == -1) + local_flush_tlb_all_asid(asid); + else + local_flush_tlb_range_asid(start, size, asid); + } else { + riscv_cpuid_to_hartid_mask(cmask, &hmask); + sbi_remote_sfence_vma_asid(cpumask_bits(&hmask), + start, size, asid); + } + + put_cpu(); +} + void flush_tlb_mm(struct mm_struct *mm) { - __sbi_tlb_flush_range(mm_cpumask(mm), 0, -1); + if (static_branch_unlikely(&use_asid_allocator)) + __sbi_tlb_flush_range_asid(mm_cpumask(mm), 0, -1, + atomic_long_read(&mm->context.id)); + else + __sbi_tlb_flush_range(mm_cpumask(mm), 0, -1); } void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) { - __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), addr, PAGE_SIZE); + if (static_branch_unlikely(&use_asid_allocator)) + __sbi_tlb_flush_range_asid(mm_cpumask(vma->vm_mm), addr, PAGE_SIZE, + atomic_long_read(&vma->vm_mm->context.id)); + else + __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), addr, PAGE_SIZE); } void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), start, end - start); + if (static_branch_unlikely(&use_asid_allocator)) + __sbi_tlb_flush_range_asid(mm_cpumask(vma->vm_mm), start, end - start, + atomic_long_read(&vma->vm_mm->context.id)); + else + __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), start, end - start); }