From: Will Deacon Date: Tue, 13 Mar 2018 21:17:01 +0000 (+0000) Subject: arm64: kconfig: Ensure spinlock fastpaths are inlined if !PREEMPT X-Git-Tag: v4.19~379^2~76 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=5d168964aece0b4a41269839c613683c5d7e0fb2;p=platform%2Fkernel%2Flinux-rpi.git arm64: kconfig: Ensure spinlock fastpaths are inlined if !PREEMPT When running with CONFIG_PREEMPT=n, the spinlock fastpaths fit inside 64 bytes, which typically coincides with the L1 I-cache line size. Inline the spinlock fastpaths, like we do already for rwlocks. Signed-off-by: Will Deacon --- diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index facd196..476de9b 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -42,6 +42,16 @@ config ARM64 select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPT select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPT select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPT + select ARCH_INLINE_SPIN_TRYLOCK if !PREEMPT + select ARCH_INLINE_SPIN_TRYLOCK_BH if !PREEMPT + select ARCH_INLINE_SPIN_LOCK if !PREEMPT + select ARCH_INLINE_SPIN_LOCK_BH if !PREEMPT + select ARCH_INLINE_SPIN_LOCK_IRQ if !PREEMPT + select ARCH_INLINE_SPIN_LOCK_IRQSAVE if !PREEMPT + select ARCH_INLINE_SPIN_UNLOCK if !PREEMPT + select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPT + select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPT + select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPT select ARCH_USE_CMPXCHG_LOCKREF select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_SPINLOCKS