diff mbox series

riscv: move flush_icache_{all,mm} code to proper location

Message ID LO2P265MB0847855A503C55BB11A59A89D6730@LO2P265MB0847.GBRP265.PROD.OUTLOOK.COM (mailing list archive)
State New, archived
Headers show
Series riscv: move flush_icache_{all,mm} code to proper location | expand

Commit Message

Gary Guo March 6, 2019, 5 p.m. UTC
Currently, flush_icache_all is macro-expanded into a SBI call, yet no
asm/sbi.h is included in asm/cacheflush.h. This could be moved to
mm/cacheflush.c instead (SBI call will dominate performance-wise and there
is no worry to not have it inlined.

Currently, flush_icache_mm stays in kernel/smp.c, which looks like a hack
to prevent it from being compiled when CONFIG_SMP=n. It should also be in
mm/cacheflush.c.

Signed-off-by: Xuan Guo <gary@garyguo.net>
---
 arch/riscv/include/asm/cacheflush.h |  2 +-
 arch/riscv/kernel/smp.c             | 49 -----------------------
 arch/riscv/mm/cacheflush.c          | 61 +++++++++++++++++++++++++++++
 3 files changed, 62 insertions(+), 50 deletions(-)

Comments

Christoph Hellwig March 8, 2019, 2:24 p.m. UTC | #1
On Wed, Mar 06, 2019 at 05:00:21PM +0000, Gary Guo wrote:
> Currently, flush_icache_all is macro-expanded into a SBI call, yet no
> asm/sbi.h is included in asm/cacheflush.h. This could be moved to
> mm/cacheflush.c instead (SBI call will dominate performance-wise and there
> is no worry to not have it inlined.
> 
> Currently, flush_icache_mm stays in kernel/smp.c, which looks like a hack
> to prevent it from being compiled when CONFIG_SMP=n. It should also be in
> mm/cacheflush.c.

The non-SMP version of flush_icache_mm should also move to cacheflush.c.

Please make this flush_icache_mm one patch, and the a second one on top
of that that just removes flush_icache_all and open codes the two
users.
Gary Guo March 8, 2019, 3:23 p.m. UTC | #2
Thanks for reply. I wonder what is the purpose for moving the non-SMP 
version? The non-SMP are just plain fence.i instructions that could just 
be inlined directly isn't it?

On 08/03/2019 14:24, Christoph Hellwig wrote:
> On Wed, Mar 06, 2019 at 05:00:21PM +0000, Gary Guo wrote:
>> Currently, flush_icache_all is macro-expanded into a SBI call, yet no
>> asm/sbi.h is included in asm/cacheflush.h. This could be moved to
>> mm/cacheflush.c instead (SBI call will dominate performance-wise and there
>> is no worry to not have it inlined.
>>
>> Currently, flush_icache_mm stays in kernel/smp.c, which looks like a hack
>> to prevent it from being compiled when CONFIG_SMP=n. It should also be in
>> mm/cacheflush.c.
> 
> The non-SMP version of flush_icache_mm should also move to cacheflush.c.
> 
> Please make this flush_icache_mm one patch, and the a second one on top
> of that that just removes flush_icache_all and open codes the two
> users.
>
Christoph Hellwig March 8, 2019, 4:28 p.m. UTC | #3
On Fri, Mar 08, 2019 at 03:23:22PM +0000, Gary Guo wrote:
> Thanks for reply. I wonder what is the purpose for moving the non-SMP 
> version? The non-SMP are just plain fence.i instructions that could just 
> be inlined directly isn't it?

Sorry, I missed a few additional users, I'm fine with keeping it inline.
Andreas Schwab May 21, 2019, 10:38 a.m. UTC | #4
On Mär 06 2019, Gary Guo <gary@garyguo.net> wrote:

> diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h
> index 8f13074413a7..1f4ba68ab9aa 100644
> --- a/arch/riscv/include/asm/cacheflush.h
> +++ b/arch/riscv/include/asm/cacheflush.h
> @@ -47,7 +47,7 @@ static inline void flush_dcache_page(struct page *page)
>  
>  #else /* CONFIG_SMP */
>  
> -#define flush_icache_all() sbi_remote_fence_i(NULL)
> +void flush_icache_all(void);
>  void flush_icache_mm(struct mm_struct *mm, bool local);

ERROR: "flush_icache_all" [drivers/misc/lkdtm/lkdtm.ko] undefined!

Andreas.
diff mbox series

Patch

diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h
index 8f13074413a7..1f4ba68ab9aa 100644
--- a/arch/riscv/include/asm/cacheflush.h
+++ b/arch/riscv/include/asm/cacheflush.h
@@ -47,7 +47,7 @@  static inline void flush_dcache_page(struct page *page)
 
 #else /* CONFIG_SMP */
 
-#define flush_icache_all() sbi_remote_fence_i(NULL)
+void flush_icache_all(void);
 void flush_icache_mm(struct mm_struct *mm, bool local);
 
 #endif /* CONFIG_SMP */
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index 57b1383e5ef7..f066344aaf42 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -162,52 +162,3 @@  void smp_send_reschedule(int cpu)
 	send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
 }
 
-/*
- * Performs an icache flush for the given MM context.  RISC-V has no direct
- * mechanism for instruction cache shoot downs, so instead we send an IPI that
- * informs the remote harts they need to flush their local instruction caches.
- * To avoid pathologically slow behavior in a common case (a bunch of
- * single-hart processes on a many-hart machine, ie 'make -j') we avoid the
- * IPIs for harts that are not currently executing a MM context and instead
- * schedule a deferred local instruction cache flush to be performed before
- * execution resumes on each hart.
- */
-void flush_icache_mm(struct mm_struct *mm, bool local)
-{
-	unsigned int cpu;
-	cpumask_t others, hmask, *mask;
-
-	preempt_disable();
-
-	/* Mark every hart's icache as needing a flush for this MM. */
-	mask = &mm->context.icache_stale_mask;
-	cpumask_setall(mask);
-	/* Flush this hart's I$ now, and mark it as flushed. */
-	cpu = smp_processor_id();
-	cpumask_clear_cpu(cpu, mask);
-	local_flush_icache_all();
-
-	/*
-	 * Flush the I$ of other harts concurrently executing, and mark them as
-	 * flushed.
-	 */
-	cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
-	local |= cpumask_empty(&others);
-	if (mm != current->active_mm || !local) {
-		cpumask_clear(&hmask);
-		riscv_cpuid_to_hartid_mask(&others, &hmask);
-		sbi_remote_fence_i(hmask.bits);
-	} else {
-		/*
-		 * It's assumed that at least one strongly ordered operation is
-		 * performed on this hart between setting a hart's cpumask bit
-		 * and scheduling this MM context on that hart.  Sending an SBI
-		 * remote message will do this, but in the case where no
-		 * messages are sent we still need to order this hart's writes
-		 * with flush_icache_deferred().
-		 */
-		smp_mb();
-	}
-
-	preempt_enable();
-}
diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
index 498c0a0814fe..497b7d07af0c 100644
--- a/arch/riscv/mm/cacheflush.c
+++ b/arch/riscv/mm/cacheflush.c
@@ -14,6 +14,67 @@ 
 #include <asm/pgtable.h>
 #include <asm/cacheflush.h>
 
+#ifdef CONFIG_SMP
+
+#include <asm/sbi.h>
+
+void flush_icache_all(void)
+{
+	sbi_remote_fence_i(NULL);
+}
+
+/*
+ * Performs an icache flush for the given MM context.  RISC-V has no direct
+ * mechanism for instruction cache shoot downs, so instead we send an IPI that
+ * informs the remote harts they need to flush their local instruction caches.
+ * To avoid pathologically slow behavior in a common case (a bunch of
+ * single-hart processes on a many-hart machine, ie 'make -j') we avoid the
+ * IPIs for harts that are not currently executing a MM context and instead
+ * schedule a deferred local instruction cache flush to be performed before
+ * execution resumes on each hart.
+ */
+void flush_icache_mm(struct mm_struct *mm, bool local)
+{
+	unsigned int cpu;
+	cpumask_t others, hmask, *mask;
+
+	preempt_disable();
+
+	/* Mark every hart's icache as needing a flush for this MM. */
+	mask = &mm->context.icache_stale_mask;
+	cpumask_setall(mask);
+	/* Flush this hart's I$ now, and mark it as flushed. */
+	cpu = smp_processor_id();
+	cpumask_clear_cpu(cpu, mask);
+	local_flush_icache_all();
+
+	/*
+	 * Flush the I$ of other harts concurrently executing, and mark them as
+	 * flushed.
+	 */
+	cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
+	local |= cpumask_empty(&others);
+	if (mm != current->active_mm || !local) {
+		cpumask_clear(&hmask);
+		riscv_cpuid_to_hartid_mask(&others, &hmask);
+		sbi_remote_fence_i(hmask.bits);
+	} else {
+		/*
+		 * It's assumed that at least one strongly ordered operation is
+		 * performed on this hart between setting a hart's cpumask bit
+		 * and scheduling this MM context on that hart.  Sending an SBI
+		 * remote message will do this, but in the case where no
+		 * messages are sent we still need to order this hart's writes
+		 * with flush_icache_deferred().
+		 */
+		smp_mb();
+	}
+
+	preempt_enable();
+}
+
+#endif /* CONFIG_SMP */
+
 void flush_icache_pte(pte_t pte)
 {
 	struct page *page = pte_page(pte);