Message ID | d141829022c075172b1410e67299fc29fa95c6cd.1553647082.git.gary@garyguo.net (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | TLB/I$ flush cleanups and improvements | expand |
Looks good,
Reviewed-by: Christoph Hellwig <hch@lst.de>
On Wed, Mar 27, 2019 at 6:11 AM Gary Guo <gary@garyguo.net> wrote: > > From: Gary Guo <gary@garyguo.net> > > Currently, flush_icache_all is macro-expanded into a SBI call, yet no > asm/sbi.h is included in asm/cacheflush.h. This could be moved to > mm/cacheflush.c instead (SBI call will dominate performance-wise and > there is no worry to not have it inlined. > > Currently, flush_icache_mm stays in kernel/smp.c, which looks like a > hack to prevent it from being compiled when CONFIG_SMP=n. It should > also be in mm/cacheflush.c. > > Signed-off-by: Gary Guo <gary@garyguo.net> > --- > arch/riscv/include/asm/cacheflush.h | 2 +- > arch/riscv/kernel/smp.c | 49 ----------------------- > arch/riscv/mm/cacheflush.c | 61 +++++++++++++++++++++++++++++ > 3 files changed, 62 insertions(+), 50 deletions(-) > > diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h > index 8f13074413a7..1f4ba68ab9aa 100644 > --- a/arch/riscv/include/asm/cacheflush.h > +++ b/arch/riscv/include/asm/cacheflush.h > @@ -47,7 +47,7 @@ static inline void flush_dcache_page(struct page *page) > > #else /* CONFIG_SMP */ > > -#define flush_icache_all() sbi_remote_fence_i(NULL) > +void flush_icache_all(void); > void flush_icache_mm(struct mm_struct *mm, bool local); > > #endif /* CONFIG_SMP */ > diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c > index 0c41d07ec281..17f491e8ed0a 100644 > --- a/arch/riscv/kernel/smp.c > +++ b/arch/riscv/kernel/smp.c > @@ -199,52 +199,3 @@ void smp_send_reschedule(int cpu) > send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); > } > > -/* > - * Performs an icache flush for the given MM context. RISC-V has no direct > - * mechanism for instruction cache shoot downs, so instead we send an IPI that > - * informs the remote harts they need to flush their local instruction caches. > - * To avoid pathologically slow behavior in a common case (a bunch of > - * single-hart processes on a many-hart machine, ie 'make -j') we avoid the > - * IPIs for harts that are not currently executing a MM context and instead > - * schedule a deferred local instruction cache flush to be performed before > - * execution resumes on each hart. > - */ > -void flush_icache_mm(struct mm_struct *mm, bool local) > -{ > - unsigned int cpu; > - cpumask_t others, hmask, *mask; > - > - preempt_disable(); > - > - /* Mark every hart's icache as needing a flush for this MM. */ > - mask = &mm->context.icache_stale_mask; > - cpumask_setall(mask); > - /* Flush this hart's I$ now, and mark it as flushed. */ > - cpu = smp_processor_id(); > - cpumask_clear_cpu(cpu, mask); > - local_flush_icache_all(); > - > - /* > - * Flush the I$ of other harts concurrently executing, and mark them as > - * flushed. > - */ > - cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu)); > - local |= cpumask_empty(&others); > - if (mm != current->active_mm || !local) { > - cpumask_clear(&hmask); > - riscv_cpuid_to_hartid_mask(&others, &hmask); > - sbi_remote_fence_i(hmask.bits); > - } else { > - /* > - * It's assumed that at least one strongly ordered operation is > - * performed on this hart between setting a hart's cpumask bit > - * and scheduling this MM context on that hart. Sending an SBI > - * remote message will do this, but in the case where no > - * messages are sent we still need to order this hart's writes > - * with flush_icache_deferred(). > - */ > - smp_mb(); > - } > - > - preempt_enable(); > -} > diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c > index 498c0a0814fe..497b7d07af0c 100644 > --- a/arch/riscv/mm/cacheflush.c > +++ b/arch/riscv/mm/cacheflush.c > @@ -14,6 +14,67 @@ > #include <asm/pgtable.h> > #include <asm/cacheflush.h> > > +#ifdef CONFIG_SMP > + > +#include <asm/sbi.h> > + > +void flush_icache_all(void) > +{ > + sbi_remote_fence_i(NULL); > +} > + > +/* > + * Performs an icache flush for the given MM context. RISC-V has no direct > + * mechanism for instruction cache shoot downs, so instead we send an IPI that > + * informs the remote harts they need to flush their local instruction caches. > + * To avoid pathologically slow behavior in a common case (a bunch of > + * single-hart processes on a many-hart machine, ie 'make -j') we avoid the > + * IPIs for harts that are not currently executing a MM context and instead > + * schedule a deferred local instruction cache flush to be performed before > + * execution resumes on each hart. > + */ > +void flush_icache_mm(struct mm_struct *mm, bool local) > +{ > + unsigned int cpu; > + cpumask_t others, hmask, *mask; > + > + preempt_disable(); > + > + /* Mark every hart's icache as needing a flush for this MM. */ > + mask = &mm->context.icache_stale_mask; > + cpumask_setall(mask); > + /* Flush this hart's I$ now, and mark it as flushed. */ > + cpu = smp_processor_id(); > + cpumask_clear_cpu(cpu, mask); > + local_flush_icache_all(); > + > + /* > + * Flush the I$ of other harts concurrently executing, and mark them as > + * flushed. > + */ > + cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu)); > + local |= cpumask_empty(&others); > + if (mm != current->active_mm || !local) { > + cpumask_clear(&hmask); > + riscv_cpuid_to_hartid_mask(&others, &hmask); > + sbi_remote_fence_i(hmask.bits); > + } else { > + /* > + * It's assumed that at least one strongly ordered operation is > + * performed on this hart between setting a hart's cpumask bit > + * and scheduling this MM context on that hart. Sending an SBI > + * remote message will do this, but in the case where no > + * messages are sent we still need to order this hart's writes > + * with flush_icache_deferred(). > + */ > + smp_mb(); > + } > + > + preempt_enable(); > +} > + > +#endif /* CONFIG_SMP */ > + > void flush_icache_pte(pte_t pte) > { > struct page *page = pte_page(pte); > -- > 2.17.1 > > > _______________________________________________ > linux-riscv mailing list > linux-riscv@lists.infradead.org > http://lists.infradead.org/mailman/listinfo/linux-riscv LGTM. Reviewed-by: Anup Patel <anup@brainfault.org> Regards, Anup
diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h index 8f13074413a7..1f4ba68ab9aa 100644 --- a/arch/riscv/include/asm/cacheflush.h +++ b/arch/riscv/include/asm/cacheflush.h @@ -47,7 +47,7 @@ static inline void flush_dcache_page(struct page *page) #else /* CONFIG_SMP */ -#define flush_icache_all() sbi_remote_fence_i(NULL) +void flush_icache_all(void); void flush_icache_mm(struct mm_struct *mm, bool local); #endif /* CONFIG_SMP */ diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c index 0c41d07ec281..17f491e8ed0a 100644 --- a/arch/riscv/kernel/smp.c +++ b/arch/riscv/kernel/smp.c @@ -199,52 +199,3 @@ void smp_send_reschedule(int cpu) send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); } -/* - * Performs an icache flush for the given MM context. RISC-V has no direct - * mechanism for instruction cache shoot downs, so instead we send an IPI that - * informs the remote harts they need to flush their local instruction caches. - * To avoid pathologically slow behavior in a common case (a bunch of - * single-hart processes on a many-hart machine, ie 'make -j') we avoid the - * IPIs for harts that are not currently executing a MM context and instead - * schedule a deferred local instruction cache flush to be performed before - * execution resumes on each hart. - */ -void flush_icache_mm(struct mm_struct *mm, bool local) -{ - unsigned int cpu; - cpumask_t others, hmask, *mask; - - preempt_disable(); - - /* Mark every hart's icache as needing a flush for this MM. */ - mask = &mm->context.icache_stale_mask; - cpumask_setall(mask); - /* Flush this hart's I$ now, and mark it as flushed. */ - cpu = smp_processor_id(); - cpumask_clear_cpu(cpu, mask); - local_flush_icache_all(); - - /* - * Flush the I$ of other harts concurrently executing, and mark them as - * flushed. - */ - cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu)); - local |= cpumask_empty(&others); - if (mm != current->active_mm || !local) { - cpumask_clear(&hmask); - riscv_cpuid_to_hartid_mask(&others, &hmask); - sbi_remote_fence_i(hmask.bits); - } else { - /* - * It's assumed that at least one strongly ordered operation is - * performed on this hart between setting a hart's cpumask bit - * and scheduling this MM context on that hart. Sending an SBI - * remote message will do this, but in the case where no - * messages are sent we still need to order this hart's writes - * with flush_icache_deferred(). - */ - smp_mb(); - } - - preempt_enable(); -} diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c index 498c0a0814fe..497b7d07af0c 100644 --- a/arch/riscv/mm/cacheflush.c +++ b/arch/riscv/mm/cacheflush.c @@ -14,6 +14,67 @@ #include <asm/pgtable.h> #include <asm/cacheflush.h> +#ifdef CONFIG_SMP + +#include <asm/sbi.h> + +void flush_icache_all(void) +{ + sbi_remote_fence_i(NULL); +} + +/* + * Performs an icache flush for the given MM context. RISC-V has no direct + * mechanism for instruction cache shoot downs, so instead we send an IPI that + * informs the remote harts they need to flush their local instruction caches. + * To avoid pathologically slow behavior in a common case (a bunch of + * single-hart processes on a many-hart machine, ie 'make -j') we avoid the + * IPIs for harts that are not currently executing a MM context and instead + * schedule a deferred local instruction cache flush to be performed before + * execution resumes on each hart. + */ +void flush_icache_mm(struct mm_struct *mm, bool local) +{ + unsigned int cpu; + cpumask_t others, hmask, *mask; + + preempt_disable(); + + /* Mark every hart's icache as needing a flush for this MM. */ + mask = &mm->context.icache_stale_mask; + cpumask_setall(mask); + /* Flush this hart's I$ now, and mark it as flushed. */ + cpu = smp_processor_id(); + cpumask_clear_cpu(cpu, mask); + local_flush_icache_all(); + + /* + * Flush the I$ of other harts concurrently executing, and mark them as + * flushed. + */ + cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu)); + local |= cpumask_empty(&others); + if (mm != current->active_mm || !local) { + cpumask_clear(&hmask); + riscv_cpuid_to_hartid_mask(&others, &hmask); + sbi_remote_fence_i(hmask.bits); + } else { + /* + * It's assumed that at least one strongly ordered operation is + * performed on this hart between setting a hart's cpumask bit + * and scheduling this MM context on that hart. Sending an SBI + * remote message will do this, but in the case where no + * messages are sent we still need to order this hart's writes + * with flush_icache_deferred(). + */ + smp_mb(); + } + + preempt_enable(); +} + +#endif /* CONFIG_SMP */ + void flush_icache_pte(pte_t pte) { struct page *page = pte_page(pte);