@@ -42,6 +42,16 @@ config ARM64
select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPT
select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPT
select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPT
+ select ARCH_INLINE_SPIN_TRYLOCK if !PREEMPT
+ select ARCH_INLINE_SPIN_TRYLOCK_BH if !PREEMPT
+ select ARCH_INLINE_SPIN_LOCK if !PREEMPT
+ select ARCH_INLINE_SPIN_LOCK_BH if !PREEMPT
+ select ARCH_INLINE_SPIN_LOCK_IRQ if !PREEMPT
+ select ARCH_INLINE_SPIN_LOCK_IRQSAVE if !PREEMPT
+ select ARCH_INLINE_SPIN_UNLOCK if !PREEMPT
+ select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPT
+ select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPT
+ select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPT
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
When running with CONFIG_PREEMPT=n, the spinlock fastpaths fit inside 64 bytes, which typically coincides with the L1 I-cache line size. Inline the spinlock fastpaths, like we do already for rwlocks. Signed-off-by: Will Deacon <will.deacon@arm.com> --- arch/arm64/Kconfig | 10 ++++++++++ 1 file changed, 10 insertions(+)