diff mbox series

[v4,2/5] riscv: move switch_mm to its own file

Message ID 862300448c34e225f3fbb4da36ccf18d10753b0b.1553647082.git.gary@garyguo.net (mailing list archive)
State New, archived
Headers show
Series TLB/I$ flush cleanups and improvements | expand

Commit Message

Gary Guo March 27, 2019, 12:41 a.m. UTC
From: Gary Guo <gary@garyguo.net>

switch_mm is an expensive operations that has two users.
flush_icache_deferred is only called within switch_mm and can be moved
together. The function is expected to be more complicated when ASID
support is added, so clean up eagerly.

By moving them to a separate file we also removes some excessive
dependency of tlbflush.h and cacheflush.h.

Signed-off-by: Gary Guo <gary@garyguo.net>
---
 arch/riscv/include/asm/mmu_context.h | 59 +----------------------
 arch/riscv/mm/Makefile               |  1 +
 arch/riscv/mm/context.c              | 71 ++++++++++++++++++++++++++++
 3 files changed, 74 insertions(+), 57 deletions(-)
 create mode 100644 arch/riscv/mm/context.c

Comments

Christoph Hellwig March 27, 2019, 7:08 a.m. UTC | #1
> +#include <linux/mm.h>
> +
> +#include <asm/tlbflush.h>
> +#include <asm/cacheflush.h>

Minor nitpick: no real need for the empty line above.

Otherwise this looks good to me:

Reviewed-by: Christoph Hellwig <hch@lst.de>
Christoph Hellwig March 27, 2019, 7:18 a.m. UTC | #2
> +	csr_write(sptbr, virt_to_pfn(next->pgd) | SATP_MODE);
> +	local_flush_tlb_all();
> +
> +	flush_icache_deferred(next);
> +}
> +

Btw, git-am complains about adding a blank line at EOF here, please
consider dropping that empty line.
Anup Patel March 28, 2019, 6:47 a.m. UTC | #3
On Wed, Mar 27, 2019 at 6:11 AM Gary Guo <gary@garyguo.net> wrote:
>
> From: Gary Guo <gary@garyguo.net>
>
> switch_mm is an expensive operations that has two users.
> flush_icache_deferred is only called within switch_mm and can be moved
> together. The function is expected to be more complicated when ASID
> support is added, so clean up eagerly.
>
> By moving them to a separate file we also removes some excessive
> dependency of tlbflush.h and cacheflush.h.
>
> Signed-off-by: Gary Guo <gary@garyguo.net>
> ---
>  arch/riscv/include/asm/mmu_context.h | 59 +----------------------
>  arch/riscv/mm/Makefile               |  1 +
>  arch/riscv/mm/context.c              | 71 ++++++++++++++++++++++++++++
>  3 files changed, 74 insertions(+), 57 deletions(-)
>  create mode 100644 arch/riscv/mm/context.c
>
> diff --git a/arch/riscv/include/asm/mmu_context.h b/arch/riscv/include/asm/mmu_context.h
> index 336d60ec5698..bf4f097a9051 100644
> --- a/arch/riscv/include/asm/mmu_context.h
> +++ b/arch/riscv/include/asm/mmu_context.h
> @@ -20,8 +20,6 @@
>
>  #include <linux/mm.h>
>  #include <linux/sched.h>
> -#include <asm/tlbflush.h>
> -#include <asm/cacheflush.h>
>
>  static inline void enter_lazy_tlb(struct mm_struct *mm,
>         struct task_struct *task)
> @@ -39,61 +37,8 @@ static inline void destroy_context(struct mm_struct *mm)
>  {
>  }
>
> -/*
> - * When necessary, performs a deferred icache flush for the given MM context,
> - * on the local CPU.  RISC-V has no direct mechanism for instruction cache
> - * shoot downs, so instead we send an IPI that informs the remote harts they
> - * need to flush their local instruction caches.  To avoid pathologically slow
> - * behavior in a common case (a bunch of single-hart processes on a many-hart
> - * machine, ie 'make -j') we avoid the IPIs for harts that are not currently
> - * executing a MM context and instead schedule a deferred local instruction
> - * cache flush to be performed before execution resumes on each hart.  This
> - * actually performs that local instruction cache flush, which implicitly only
> - * refers to the current hart.
> - */
> -static inline void flush_icache_deferred(struct mm_struct *mm)
> -{
> -#ifdef CONFIG_SMP
> -       unsigned int cpu = smp_processor_id();
> -       cpumask_t *mask = &mm->context.icache_stale_mask;
> -
> -       if (cpumask_test_cpu(cpu, mask)) {
> -               cpumask_clear_cpu(cpu, mask);
> -               /*
> -                * Ensure the remote hart's writes are visible to this hart.
> -                * This pairs with a barrier in flush_icache_mm.
> -                */
> -               smp_mb();
> -               local_flush_icache_all();
> -       }
> -#endif
> -}
> -
> -static inline void switch_mm(struct mm_struct *prev,
> -       struct mm_struct *next, struct task_struct *task)
> -{
> -       if (likely(prev != next)) {
> -               /*
> -                * Mark the current MM context as inactive, and the next as
> -                * active.  This is at least used by the icache flushing
> -                * routines in order to determine who should
> -                */
> -               unsigned int cpu = smp_processor_id();
> -
> -               cpumask_clear_cpu(cpu, mm_cpumask(prev));
> -               cpumask_set_cpu(cpu, mm_cpumask(next));
> -
> -               /*
> -                * Use the old spbtr name instead of using the current satp
> -                * name to support binutils 2.29 which doesn't know about the
> -                * privileged ISA 1.10 yet.
> -                */
> -               csr_write(sptbr, virt_to_pfn(next->pgd) | SATP_MODE);
> -               local_flush_tlb_all();
> -
> -               flush_icache_deferred(next);
> -       }
> -}
> +void switch_mm(struct mm_struct *prev, struct mm_struct *next,
> +       struct task_struct *task);
>
>  static inline void activate_mm(struct mm_struct *prev,
>                                struct mm_struct *next)
> diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile
> index eb22ab49b3e0..d75b035786d6 100644
> --- a/arch/riscv/mm/Makefile
> +++ b/arch/riscv/mm/Makefile
> @@ -3,3 +3,4 @@ obj-y += fault.o
>  obj-y += extable.o
>  obj-y += ioremap.o
>  obj-y += cacheflush.o
> +obj-y += context.o
> diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
> new file mode 100644
> index 000000000000..fbb1cfe80267
> --- /dev/null
> +++ b/arch/riscv/mm/context.c
> @@ -0,0 +1,71 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * Copyright (C) 2012 Regents of the University of California
> + * Copyright (C) 2017 SiFive
> + */
> +
> +#include <linux/mm.h>
> +
> +#include <asm/tlbflush.h>
> +#include <asm/cacheflush.h>
> +
> +/*
> + * When necessary, performs a deferred icache flush for the given MM context,
> + * on the local CPU.  RISC-V has no direct mechanism for instruction cache
> + * shoot downs, so instead we send an IPI that informs the remote harts they
> + * need to flush their local instruction caches.  To avoid pathologically slow
> + * behavior in a common case (a bunch of single-hart processes on a many-hart
> + * machine, ie 'make -j') we avoid the IPIs for harts that are not currently
> + * executing a MM context and instead schedule a deferred local instruction
> + * cache flush to be performed before execution resumes on each hart.  This
> + * actually performs that local instruction cache flush, which implicitly only
> + * refers to the current hart.
> + */
> +static inline void flush_icache_deferred(struct mm_struct *mm)
> +{
> +#ifdef CONFIG_SMP
> +       unsigned int cpu = smp_processor_id();
> +       cpumask_t *mask = &mm->context.icache_stale_mask;
> +
> +       if (cpumask_test_cpu(cpu, mask)) {
> +               cpumask_clear_cpu(cpu, mask);
> +               /*
> +                * Ensure the remote hart's writes are visible to this hart.
> +                * This pairs with a barrier in flush_icache_mm.
> +                */
> +               smp_mb();
> +               local_flush_icache_all();
> +       }
> +
> +#endif
> +}
> +
> +void switch_mm(struct mm_struct *prev, struct mm_struct *next,
> +       struct task_struct *task)
> +{
> +       unsigned int cpu;
> +
> +       if (unlikely(prev == next))
> +               return;
> +
> +       /*
> +        * Mark the current MM context as inactive, and the next as
> +        * active.  This is at least used by the icache flushing
> +        * routines in order to determine who should be flushed.
> +        */
> +       cpu = smp_processor_id();
> +
> +       cpumask_clear_cpu(cpu, mm_cpumask(prev));
> +       cpumask_set_cpu(cpu, mm_cpumask(next));
> +
> +       /*
> +        * Use the old spbtr name instead of using the current satp
> +        * name to support binutils 2.29 which doesn't know about the
> +        * privileged ISA 1.10 yet.
> +        */
> +       csr_write(sptbr, virt_to_pfn(next->pgd) | SATP_MODE);
> +       local_flush_tlb_all();
> +
> +       flush_icache_deferred(next);
> +}
> +
> --
> 2.17.1
>
>
> _______________________________________________
> linux-riscv mailing list
> linux-riscv@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-riscv

Apart from nit pointed out by Christoph, looks good to me.

Reviewed-by: Anup Patel <anup@brainfault.org>

Regards,
Anup
diff mbox series

Patch

diff --git a/arch/riscv/include/asm/mmu_context.h b/arch/riscv/include/asm/mmu_context.h
index 336d60ec5698..bf4f097a9051 100644
--- a/arch/riscv/include/asm/mmu_context.h
+++ b/arch/riscv/include/asm/mmu_context.h
@@ -20,8 +20,6 @@ 
 
 #include <linux/mm.h>
 #include <linux/sched.h>
-#include <asm/tlbflush.h>
-#include <asm/cacheflush.h>
 
 static inline void enter_lazy_tlb(struct mm_struct *mm,
 	struct task_struct *task)
@@ -39,61 +37,8 @@  static inline void destroy_context(struct mm_struct *mm)
 {
 }
 
-/*
- * When necessary, performs a deferred icache flush for the given MM context,
- * on the local CPU.  RISC-V has no direct mechanism for instruction cache
- * shoot downs, so instead we send an IPI that informs the remote harts they
- * need to flush their local instruction caches.  To avoid pathologically slow
- * behavior in a common case (a bunch of single-hart processes on a many-hart
- * machine, ie 'make -j') we avoid the IPIs for harts that are not currently
- * executing a MM context and instead schedule a deferred local instruction
- * cache flush to be performed before execution resumes on each hart.  This
- * actually performs that local instruction cache flush, which implicitly only
- * refers to the current hart.
- */
-static inline void flush_icache_deferred(struct mm_struct *mm)
-{
-#ifdef CONFIG_SMP
-	unsigned int cpu = smp_processor_id();
-	cpumask_t *mask = &mm->context.icache_stale_mask;
-
-	if (cpumask_test_cpu(cpu, mask)) {
-		cpumask_clear_cpu(cpu, mask);
-		/*
-		 * Ensure the remote hart's writes are visible to this hart.
-		 * This pairs with a barrier in flush_icache_mm.
-		 */
-		smp_mb();
-		local_flush_icache_all();
-	}
-#endif
-}
-
-static inline void switch_mm(struct mm_struct *prev,
-	struct mm_struct *next, struct task_struct *task)
-{
-	if (likely(prev != next)) {
-		/*
-		 * Mark the current MM context as inactive, and the next as
-		 * active.  This is at least used by the icache flushing
-		 * routines in order to determine who should
-		 */
-		unsigned int cpu = smp_processor_id();
-
-		cpumask_clear_cpu(cpu, mm_cpumask(prev));
-		cpumask_set_cpu(cpu, mm_cpumask(next));
-
-		/*
-		 * Use the old spbtr name instead of using the current satp
-		 * name to support binutils 2.29 which doesn't know about the
-		 * privileged ISA 1.10 yet.
-		 */
-		csr_write(sptbr, virt_to_pfn(next->pgd) | SATP_MODE);
-		local_flush_tlb_all();
-
-		flush_icache_deferred(next);
-	}
-}
+void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+	struct task_struct *task);
 
 static inline void activate_mm(struct mm_struct *prev,
 			       struct mm_struct *next)
diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile
index eb22ab49b3e0..d75b035786d6 100644
--- a/arch/riscv/mm/Makefile
+++ b/arch/riscv/mm/Makefile
@@ -3,3 +3,4 @@  obj-y += fault.o
 obj-y += extable.o
 obj-y += ioremap.o
 obj-y += cacheflush.o
+obj-y += context.o
diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
new file mode 100644
index 000000000000..fbb1cfe80267
--- /dev/null
+++ b/arch/riscv/mm/context.c
@@ -0,0 +1,71 @@ 
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ */
+
+#include <linux/mm.h>
+
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+
+/*
+ * When necessary, performs a deferred icache flush for the given MM context,
+ * on the local CPU.  RISC-V has no direct mechanism for instruction cache
+ * shoot downs, so instead we send an IPI that informs the remote harts they
+ * need to flush their local instruction caches.  To avoid pathologically slow
+ * behavior in a common case (a bunch of single-hart processes on a many-hart
+ * machine, ie 'make -j') we avoid the IPIs for harts that are not currently
+ * executing a MM context and instead schedule a deferred local instruction
+ * cache flush to be performed before execution resumes on each hart.  This
+ * actually performs that local instruction cache flush, which implicitly only
+ * refers to the current hart.
+ */
+static inline void flush_icache_deferred(struct mm_struct *mm)
+{
+#ifdef CONFIG_SMP
+	unsigned int cpu = smp_processor_id();
+	cpumask_t *mask = &mm->context.icache_stale_mask;
+
+	if (cpumask_test_cpu(cpu, mask)) {
+		cpumask_clear_cpu(cpu, mask);
+		/*
+		 * Ensure the remote hart's writes are visible to this hart.
+		 * This pairs with a barrier in flush_icache_mm.
+		 */
+		smp_mb();
+		local_flush_icache_all();
+	}
+
+#endif
+}
+
+void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+	struct task_struct *task)
+{
+	unsigned int cpu;
+
+	if (unlikely(prev == next))
+		return;
+
+	/*
+	 * Mark the current MM context as inactive, and the next as
+	 * active.  This is at least used by the icache flushing
+	 * routines in order to determine who should be flushed.
+	 */
+	cpu = smp_processor_id();
+
+	cpumask_clear_cpu(cpu, mm_cpumask(prev));
+	cpumask_set_cpu(cpu, mm_cpumask(next));
+
+	/*
+	 * Use the old spbtr name instead of using the current satp
+	 * name to support binutils 2.29 which doesn't know about the
+	 * privileged ISA 1.10 yet.
+	 */
+	csr_write(sptbr, virt_to_pfn(next->pgd) | SATP_MODE);
+	local_flush_tlb_all();
+
+	flush_icache_deferred(next);
+}
+