arm64: kconfig: Ensure spinlock fastpaths are inlined if !PREEMPT

When running with CONFIG_PREEMPT=n, the spinlock fastpaths fit inside
64 bytes, which typically coincides with the L1 I-cache line size.

Inline the spinlock fastpaths, like we do already for rwlocks.

Signed-off-by: Will Deacon <will.deacon@arm.com>
This commit is contained in:
Will Deacon 2018-03-13 21:17:01 +00:00
parent c11090474d
commit 5d168964ae
1 changed files with 10 additions and 0 deletions

View File

@ -42,6 +42,16 @@ config ARM64
select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPT
select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPT
select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPT
select ARCH_INLINE_SPIN_TRYLOCK if !PREEMPT
select ARCH_INLINE_SPIN_TRYLOCK_BH if !PREEMPT
select ARCH_INLINE_SPIN_LOCK if !PREEMPT
select ARCH_INLINE_SPIN_LOCK_BH if !PREEMPT
select ARCH_INLINE_SPIN_LOCK_IRQ if !PREEMPT
select ARCH_INLINE_SPIN_LOCK_IRQSAVE if !PREEMPT
select ARCH_INLINE_SPIN_UNLOCK if !PREEMPT
select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPT
select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPT
select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPT
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS