diff mbox series

arm64: fpsimd: Bring cond_yield asm macro in line with new rules

Message ID 20240111112447.577640-2-ardb+git@google.com (mailing list archive)
State New, archived
Headers show
Series arm64: fpsimd: Bring cond_yield asm macro in line with new rules | expand

Commit Message

Ard Biesheuvel Jan. 11, 2024, 11:24 a.m. UTC
From: Ard Biesheuvel <ardb@kernel.org>

We no longer disable softirqs or preemption when doing kernel mode SIMD,
and so for fully preemptible kernels, there is no longer a need to do any
explicit yielding (and for non-preemptible kernels, yielding is not
needed either).

That leaves voluntary preemption, where only explicit yield calls may
result in a reschedule. To retain the existing behavior for such a
configuration, we should take the new situation into account, where the
preempt count will be zero rather than one, and yielding to pending
softirqs is unnecessary.

Fixes: aefbab8e77eb ("arm64: fpsimd: Preserve/restore kernel mode NEON at context switch")
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
 arch/arm64/include/asm/assembler.h | 25 +++++++++----------------
 arch/arm64/kernel/asm-offsets.c    |  2 --
 2 files changed, 9 insertions(+), 18 deletions(-)

Comments

Mark Brown Jan. 11, 2024, 5:38 p.m. UTC | #1
On Thu, Jan 11, 2024 at 12:24:48PM +0100, Ard Biesheuvel wrote:
> From: Ard Biesheuvel <ardb@kernel.org>
> 
> We no longer disable softirqs or preemption when doing kernel mode SIMD,
> and so for fully preemptible kernels, there is no longer a need to do any
> explicit yielding (and for non-preemptible kernels, yielding is not
> needed either).

Reviewed-by: Mark Brown <broonie@kernel.org>
Will Deacon Jan. 12, 2024, 1:42 p.m. UTC | #2
On Thu, 11 Jan 2024 12:24:48 +0100, Ard Biesheuvel wrote:
> From: Ard Biesheuvel <ardb@kernel.org>
> 
> We no longer disable softirqs or preemption when doing kernel mode SIMD,
> and so for fully preemptible kernels, there is no longer a need to do any
> explicit yielding (and for non-preemptible kernels, yielding is not
> needed either).
> 
> [...]

Applied to arm64 (for-next/core), thanks!

[1/1] arm64: fpsimd: Bring cond_yield asm macro in line with new rules
      https://git.kernel.org/arm64/c/3931261ecf46

Cheers,
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 7b1975bf4b90..513787e43329 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -760,32 +760,25 @@  alternative_endif
 .endm
 
 	/*
-	 * Check whether preempt/bh-disabled asm code should yield as soon as
-	 * it is able. This is the case if we are currently running in task
-	 * context, and either a softirq is pending, or the TIF_NEED_RESCHED
-	 * flag is set and re-enabling preemption a single time would result in
-	 * a preempt count of zero. (Note that the TIF_NEED_RESCHED flag is
-	 * stored negated in the top word of the thread_info::preempt_count
+	 * Check whether asm code should yield as soon as it is able. This is
+	 * the case if we are currently running in task context, and the
+	 * TIF_NEED_RESCHED flag is set. (Note that the TIF_NEED_RESCHED flag
+	 * is stored negated in the top word of the thread_info::preempt_count
 	 * field)
 	 */
-	.macro		cond_yield, lbl:req, tmp:req, tmp2:req
+	.macro		cond_yield, lbl:req, tmp:req, tmp2
+#ifdef CONFIG_PREEMPT_VOLUNTARY
 	get_current_task \tmp
 	ldr		\tmp, [\tmp, #TSK_TI_PREEMPT]
 	/*
 	 * If we are serving a softirq, there is no point in yielding: the
 	 * softirq will not be preempted no matter what we do, so we should
-	 * run to completion as quickly as we can.
+	 * run to completion as quickly as we can. The preempt_count field will
+	 * have BIT(SOFTIRQ_SHIFT) set in this case, so the zero check will
+	 * catch this case too.
 	 */
-	tbnz		\tmp, #SOFTIRQ_SHIFT, .Lnoyield_\@
-#ifdef CONFIG_PREEMPTION
-	sub		\tmp, \tmp, #PREEMPT_DISABLE_OFFSET
 	cbz		\tmp, \lbl
 #endif
-	adr_l		\tmp, irq_stat + IRQ_CPUSTAT_SOFTIRQ_PENDING
-	get_this_cpu_offset	\tmp2
-	ldr		w\tmp, [\tmp, \tmp2]
-	cbnz		w\tmp, \lbl	// yield on pending softirq in task context
-.Lnoyield_\@:
 	.endm
 
 /*
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 5ff1942b04fc..5a7dbbe0ce63 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -117,8 +117,6 @@  int main(void)
   DEFINE(DMA_FROM_DEVICE,	DMA_FROM_DEVICE);
   BLANK();
   DEFINE(PREEMPT_DISABLE_OFFSET, PREEMPT_DISABLE_OFFSET);
-  DEFINE(SOFTIRQ_SHIFT, SOFTIRQ_SHIFT);
-  DEFINE(IRQ_CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending));
   BLANK();
   DEFINE(CPU_BOOT_TASK,		offsetof(struct secondary_data, task));
   BLANK();