diff mbox series

[v2,2/6] riscv: Deduplicate IRQ stack switching

Message ID 20230815203442.1608773-10-samitolvanen@google.com (mailing list archive)
State Superseded
Headers show
Series riscv: SCS support | expand

Checks

Context Check Description
conchuod/cover_letter success Series has a cover letter
conchuod/tree_selection success Guessed tree name to be for-next at HEAD 174e8ac0272d
conchuod/fixes_present success Fixes tag not required for -next series
conchuod/maintainers_pattern success MAINTAINERS pattern errors before the patch: 4 and now 4
conchuod/verify_signedoff success Signed-off-by tag matches author and committer
conchuod/kdoc success Errors and warnings before: 0 this patch: 0
conchuod/build_rv64_clang_allmodconfig success Errors and warnings before: 2810 this patch: 2810
conchuod/module_param success Was 0 now: 0
conchuod/build_rv64_gcc_allmodconfig fail Errors and warnings before: 15878 this patch: 15879
conchuod/build_rv32_defconfig success Build OK
conchuod/dtb_warn_rv64 success Errors and warnings before: 12 this patch: 12
conchuod/header_inline success No static functions without inline keyword in header files
conchuod/checkpatch success total: 0 errors, 0 warnings, 0 checks, 133 lines checked
conchuod/build_rv64_nommu_k210_defconfig success Build OK
conchuod/verify_fixes success No Fixes tag
conchuod/build_rv64_nommu_virt_defconfig success Build OK

Commit Message

Sami Tolvanen Aug. 15, 2023, 8:34 p.m. UTC
With CONFIG_IRQ_STACKS, we switch to a separate per-CPU IRQ stack
before calling handle_riscv_irq or __do_softirq. We currently
have duplicate inline assembly snippets for stack switching in
both code paths. Now that we can access per-CPU variables in
assembly, implement call_on_irq_stack in assembly, and use that
instead of redudant inline assembly.

Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
---
 arch/riscv/include/asm/asm.h       |  5 +++++
 arch/riscv/include/asm/irq_stack.h |  3 +++
 arch/riscv/kernel/entry.S          | 32 ++++++++++++++++++++++++++++++
 arch/riscv/kernel/irq.c            | 32 ++++++++----------------------
 arch/riscv/kernel/traps.c          | 29 ++++-----------------------
 5 files changed, 52 insertions(+), 49 deletions(-)

Comments

Clément Léger Aug. 24, 2023, 8:12 a.m. UTC | #1
On 15/08/2023 22:34, Sami Tolvanen wrote:
> With CONFIG_IRQ_STACKS, we switch to a separate per-CPU IRQ stack
> before calling handle_riscv_irq or __do_softirq. We currently
> have duplicate inline assembly snippets for stack switching in
> both code paths. Now that we can access per-CPU variables in
> assembly, implement call_on_irq_stack in assembly, and use that
> instead of redudant inline assembly.
> 
> Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
> ---
>  arch/riscv/include/asm/asm.h       |  5 +++++
>  arch/riscv/include/asm/irq_stack.h |  3 +++
>  arch/riscv/kernel/entry.S          | 32 ++++++++++++++++++++++++++++++
>  arch/riscv/kernel/irq.c            | 32 ++++++++----------------------
>  arch/riscv/kernel/traps.c          | 29 ++++-----------------------
>  5 files changed, 52 insertions(+), 49 deletions(-)
> 
> diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h
> index bfb4c26f113c..8e446be2d57c 100644
> --- a/arch/riscv/include/asm/asm.h
> +++ b/arch/riscv/include/asm/asm.h
> @@ -104,6 +104,11 @@
>  .endm
>  #endif /* CONFIG_SMP */
>  
> +.macro load_per_cpu dst ptr tmp
> +	asm_per_cpu \dst \ptr \tmp
> +	REG_L \dst, 0(\dst)
> +.endm
> +
>  	/* save all GPs except x1 ~ x5 */
>  	.macro save_from_x6_to_x31
>  	REG_S x6,  PT_T1(sp)
> diff --git a/arch/riscv/include/asm/irq_stack.h b/arch/riscv/include/asm/irq_stack.h
> index e4042d297580..6441ded3b0cf 100644
> --- a/arch/riscv/include/asm/irq_stack.h
> +++ b/arch/riscv/include/asm/irq_stack.h
> @@ -12,6 +12,9 @@
>  
>  DECLARE_PER_CPU(ulong *, irq_stack_ptr);
>  
> +asmlinkage void call_on_irq_stack(struct pt_regs *regs,
> +				  void (*func)(struct pt_regs *));
> +
>  #ifdef CONFIG_VMAP_STACK
>  /*
>   * To ensure that VMAP'd stack overflow detection works correctly, all VMAP'd
> diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
> index 3d11aa3af105..39875f5e08a6 100644
> --- a/arch/riscv/kernel/entry.S
> +++ b/arch/riscv/kernel/entry.S
> @@ -218,6 +218,38 @@ SYM_CODE_START(ret_from_fork)
>  	tail syscall_exit_to_user_mode
>  SYM_CODE_END(ret_from_fork)
>  
> +#ifdef CONFIG_IRQ_STACKS
> +/*
> + * void call_on_irq_stack(struct pt_regs *regs,
> + * 		          void (*func)(struct pt_regs *));
> + *
> + * Calls func(regs) using the per-CPU IRQ stack.
> + */
> +SYM_FUNC_START(call_on_irq_stack)
> +	/* Create a frame record to save ra and s0 (fp) */
> +	addi	sp, sp, -RISCV_SZPTR
> +	REG_S	ra, (sp)
> +	addi	sp, sp, -RISCV_SZPTR
> +	REG_S	s0, (sp)
> +	addi	s0, sp, 2*RISCV_SZPTR

Hi Sami,

Some defines for stack frame offsets could be added in asm-offsets for
the offsets:

	DEFINE(STACKFRAME_SIZE_ON_STACK, ALIGN(sizeof(struct stackframe),
STACK_ALIGN));
	OFFSET(STACKFRAME_FP, stackframe, fp);
	OFFSET(STACKFRAME_RA, stackframe, ra);

And you can probably increment the stack at once (saving two addi in
prologue/epilogue) for both RA and FP and reuse the asm-offsets defines:

	addi	sp, sp, -STACKFRAME_SIZE_ON_STACK
	REG_S	ra, STACKFRAME_RA(sp)
	REG_S	s0, STACKFRAME_FP(sp)
	addi	s0, sp, STACKFRAME_SIZE_ON_STACK

Clément

> +
> +	/* Switch to the per-CPU IRQ stack and call the handler */
> +	load_per_cpu t0, irq_stack_ptr, t1
> +	li	t1, IRQ_STACK_SIZE
> +	add	sp, t0, t1
> +	jalr	a1
> +
> +	/* Switch back to the thread stack and restore ra and s0 */
> +	addi	sp, s0, -2*RISCV_SZPTR
> +	REG_L	s0, (sp)
> +	addi	sp, sp, RISCV_SZPTR
> +	REG_L	ra, (sp)
> +	addi	sp, sp, RISCV_SZPTR
> +
> +	ret
> +SYM_FUNC_END(call_on_irq_stack)
> +#endif /* CONFIG_IRQ_STACKS */
> +
>  /*
>   * Integer register context switch
>   * The callee-saved registers must be saved and restored.
> diff --git a/arch/riscv/kernel/irq.c b/arch/riscv/kernel/irq.c
> index d0577cc6a081..95dafdcbd135 100644
> --- a/arch/riscv/kernel/irq.c
> +++ b/arch/riscv/kernel/irq.c
> @@ -61,32 +61,16 @@ static void init_irq_stacks(void)
>  #endif /* CONFIG_VMAP_STACK */
>  
>  #ifdef CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK
> +static void ___do_softirq(struct pt_regs *regs)
> +{
> +	__do_softirq();
> +}
> +
>  void do_softirq_own_stack(void)
>  {
> -#ifdef CONFIG_IRQ_STACKS
> -	if (on_thread_stack()) {
> -		ulong *sp = per_cpu(irq_stack_ptr, smp_processor_id())
> -					+ IRQ_STACK_SIZE/sizeof(ulong);
> -		__asm__ __volatile(
> -		"addi	sp, sp, -"RISCV_SZPTR  "\n"
> -		REG_S"  ra, (sp)		\n"
> -		"addi	sp, sp, -"RISCV_SZPTR  "\n"
> -		REG_S"  s0, (sp)		\n"
> -		"addi	s0, sp, 2*"RISCV_SZPTR "\n"
> -		"move	sp, %[sp]		\n"
> -		"call	__do_softirq		\n"
> -		"addi	sp, s0, -2*"RISCV_SZPTR"\n"
> -		REG_L"  s0, (sp)		\n"
> -		"addi	sp, sp, "RISCV_SZPTR   "\n"
> -		REG_L"  ra, (sp)		\n"
> -		"addi	sp, sp, "RISCV_SZPTR   "\n"
> -		:
> -		: [sp] "r" (sp)
> -		: "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7",
> -		  "t0", "t1", "t2", "t3", "t4", "t5", "t6",
> -		  "memory");
> -	} else
> -#endif
> +	if (on_thread_stack())
> +		call_on_irq_stack(NULL, ___do_softirq);
> +	else
>  		__do_softirq();
>  }
>  #endif /* CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK */
> diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
> index deb2144d9143..83319b6816da 100644
> --- a/arch/riscv/kernel/traps.c
> +++ b/arch/riscv/kernel/traps.c
> @@ -350,31 +350,10 @@ static void noinstr handle_riscv_irq(struct pt_regs *regs)
>  asmlinkage void noinstr do_irq(struct pt_regs *regs)
>  {
>  	irqentry_state_t state = irqentry_enter(regs);
> -#ifdef CONFIG_IRQ_STACKS
> -	if (on_thread_stack()) {
> -		ulong *sp = per_cpu(irq_stack_ptr, smp_processor_id())
> -					+ IRQ_STACK_SIZE/sizeof(ulong);
> -		__asm__ __volatile(
> -		"addi	sp, sp, -"RISCV_SZPTR  "\n"
> -		REG_S"  ra, (sp)		\n"
> -		"addi	sp, sp, -"RISCV_SZPTR  "\n"
> -		REG_S"  s0, (sp)		\n"
> -		"addi	s0, sp, 2*"RISCV_SZPTR "\n"
> -		"move	sp, %[sp]		\n"
> -		"move	a0, %[regs]		\n"
> -		"call	handle_riscv_irq	\n"
> -		"addi	sp, s0, -2*"RISCV_SZPTR"\n"
> -		REG_L"  s0, (sp)		\n"
> -		"addi	sp, sp, "RISCV_SZPTR   "\n"
> -		REG_L"  ra, (sp)		\n"
> -		"addi	sp, sp, "RISCV_SZPTR   "\n"
> -		:
> -		: [sp] "r" (sp), [regs] "r" (regs)
> -		: "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7",
> -		  "t0", "t1", "t2", "t3", "t4", "t5", "t6",
> -		  "memory");
> -	} else
> -#endif
> +
> +	if (IS_ENABLED(CONFIG_IRQ_STACKS) && on_thread_stack())
> +		call_on_irq_stack(regs, handle_riscv_irq);
> +	else
>  		handle_riscv_irq(regs);
>  
>  	irqentry_exit(regs, state);
Sami Tolvanen Aug. 24, 2023, 7:03 p.m. UTC | #2
Hi Clément,

On Thu, Aug 24, 2023 at 1:12 AM Clément Léger <cleger@rivosinc.com> wrote:
>
> Some defines for stack frame offsets could be added in asm-offsets for
> the offsets:
>
>         DEFINE(STACKFRAME_SIZE_ON_STACK, ALIGN(sizeof(struct stackframe),
> STACK_ALIGN));
>         OFFSET(STACKFRAME_FP, stackframe, fp);
>         OFFSET(STACKFRAME_RA, stackframe, ra);
>
> And you can probably increment the stack at once (saving two addi in
> prologue/epilogue) for both RA and FP and reuse the asm-offsets defines:
>
>         addi    sp, sp, -STACKFRAME_SIZE_ON_STACK
>         REG_S   ra, STACKFRAME_RA(sp)
>         REG_S   s0, STACKFRAME_FP(sp)
>         addi    s0, sp, STACKFRAME_SIZE_ON_STACK

Thanks for taking a look!

I just copied the existing inline assembly here, but I do agree that
defining stack frame offsets and avoiding the extra addis is a cleaner
approach. I'll change this in v3.

Sami
diff mbox series

Patch

diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h
index bfb4c26f113c..8e446be2d57c 100644
--- a/arch/riscv/include/asm/asm.h
+++ b/arch/riscv/include/asm/asm.h
@@ -104,6 +104,11 @@ 
 .endm
 #endif /* CONFIG_SMP */
 
+.macro load_per_cpu dst ptr tmp
+	asm_per_cpu \dst \ptr \tmp
+	REG_L \dst, 0(\dst)
+.endm
+
 	/* save all GPs except x1 ~ x5 */
 	.macro save_from_x6_to_x31
 	REG_S x6,  PT_T1(sp)
diff --git a/arch/riscv/include/asm/irq_stack.h b/arch/riscv/include/asm/irq_stack.h
index e4042d297580..6441ded3b0cf 100644
--- a/arch/riscv/include/asm/irq_stack.h
+++ b/arch/riscv/include/asm/irq_stack.h
@@ -12,6 +12,9 @@ 
 
 DECLARE_PER_CPU(ulong *, irq_stack_ptr);
 
+asmlinkage void call_on_irq_stack(struct pt_regs *regs,
+				  void (*func)(struct pt_regs *));
+
 #ifdef CONFIG_VMAP_STACK
 /*
  * To ensure that VMAP'd stack overflow detection works correctly, all VMAP'd
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index 3d11aa3af105..39875f5e08a6 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -218,6 +218,38 @@  SYM_CODE_START(ret_from_fork)
 	tail syscall_exit_to_user_mode
 SYM_CODE_END(ret_from_fork)
 
+#ifdef CONFIG_IRQ_STACKS
+/*
+ * void call_on_irq_stack(struct pt_regs *regs,
+ * 		          void (*func)(struct pt_regs *));
+ *
+ * Calls func(regs) using the per-CPU IRQ stack.
+ */
+SYM_FUNC_START(call_on_irq_stack)
+	/* Create a frame record to save ra and s0 (fp) */
+	addi	sp, sp, -RISCV_SZPTR
+	REG_S	ra, (sp)
+	addi	sp, sp, -RISCV_SZPTR
+	REG_S	s0, (sp)
+	addi	s0, sp, 2*RISCV_SZPTR
+
+	/* Switch to the per-CPU IRQ stack and call the handler */
+	load_per_cpu t0, irq_stack_ptr, t1
+	li	t1, IRQ_STACK_SIZE
+	add	sp, t0, t1
+	jalr	a1
+
+	/* Switch back to the thread stack and restore ra and s0 */
+	addi	sp, s0, -2*RISCV_SZPTR
+	REG_L	s0, (sp)
+	addi	sp, sp, RISCV_SZPTR
+	REG_L	ra, (sp)
+	addi	sp, sp, RISCV_SZPTR
+
+	ret
+SYM_FUNC_END(call_on_irq_stack)
+#endif /* CONFIG_IRQ_STACKS */
+
 /*
  * Integer register context switch
  * The callee-saved registers must be saved and restored.
diff --git a/arch/riscv/kernel/irq.c b/arch/riscv/kernel/irq.c
index d0577cc6a081..95dafdcbd135 100644
--- a/arch/riscv/kernel/irq.c
+++ b/arch/riscv/kernel/irq.c
@@ -61,32 +61,16 @@  static void init_irq_stacks(void)
 #endif /* CONFIG_VMAP_STACK */
 
 #ifdef CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK
+static void ___do_softirq(struct pt_regs *regs)
+{
+	__do_softirq();
+}
+
 void do_softirq_own_stack(void)
 {
-#ifdef CONFIG_IRQ_STACKS
-	if (on_thread_stack()) {
-		ulong *sp = per_cpu(irq_stack_ptr, smp_processor_id())
-					+ IRQ_STACK_SIZE/sizeof(ulong);
-		__asm__ __volatile(
-		"addi	sp, sp, -"RISCV_SZPTR  "\n"
-		REG_S"  ra, (sp)		\n"
-		"addi	sp, sp, -"RISCV_SZPTR  "\n"
-		REG_S"  s0, (sp)		\n"
-		"addi	s0, sp, 2*"RISCV_SZPTR "\n"
-		"move	sp, %[sp]		\n"
-		"call	__do_softirq		\n"
-		"addi	sp, s0, -2*"RISCV_SZPTR"\n"
-		REG_L"  s0, (sp)		\n"
-		"addi	sp, sp, "RISCV_SZPTR   "\n"
-		REG_L"  ra, (sp)		\n"
-		"addi	sp, sp, "RISCV_SZPTR   "\n"
-		:
-		: [sp] "r" (sp)
-		: "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7",
-		  "t0", "t1", "t2", "t3", "t4", "t5", "t6",
-		  "memory");
-	} else
-#endif
+	if (on_thread_stack())
+		call_on_irq_stack(NULL, ___do_softirq);
+	else
 		__do_softirq();
 }
 #endif /* CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK */
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index deb2144d9143..83319b6816da 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -350,31 +350,10 @@  static void noinstr handle_riscv_irq(struct pt_regs *regs)
 asmlinkage void noinstr do_irq(struct pt_regs *regs)
 {
 	irqentry_state_t state = irqentry_enter(regs);
-#ifdef CONFIG_IRQ_STACKS
-	if (on_thread_stack()) {
-		ulong *sp = per_cpu(irq_stack_ptr, smp_processor_id())
-					+ IRQ_STACK_SIZE/sizeof(ulong);
-		__asm__ __volatile(
-		"addi	sp, sp, -"RISCV_SZPTR  "\n"
-		REG_S"  ra, (sp)		\n"
-		"addi	sp, sp, -"RISCV_SZPTR  "\n"
-		REG_S"  s0, (sp)		\n"
-		"addi	s0, sp, 2*"RISCV_SZPTR "\n"
-		"move	sp, %[sp]		\n"
-		"move	a0, %[regs]		\n"
-		"call	handle_riscv_irq	\n"
-		"addi	sp, s0, -2*"RISCV_SZPTR"\n"
-		REG_L"  s0, (sp)		\n"
-		"addi	sp, sp, "RISCV_SZPTR   "\n"
-		REG_L"  ra, (sp)		\n"
-		"addi	sp, sp, "RISCV_SZPTR   "\n"
-		:
-		: [sp] "r" (sp), [regs] "r" (regs)
-		: "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7",
-		  "t0", "t1", "t2", "t3", "t4", "t5", "t6",
-		  "memory");
-	} else
-#endif
+
+	if (IS_ENABLED(CONFIG_IRQ_STACKS) && on_thread_stack())
+		call_on_irq_stack(regs, handle_riscv_irq);
+	else
 		handle_riscv_irq(regs);
 
 	irqentry_exit(regs, state);