arm64: kconfig: Ensure spinlock fastpaths are inlined if !PREEMPT
When running with CONFIG_PREEMPT=n, the spinlock fastpaths fit inside 64 bytes, which typically coincides with the L1 I-cache line size. Inline the spinlock fastpaths, like we do already for rwlocks. Signed-off-by: Will Deacon <will.deacon@arm.com>
This commit is contained in:
parent
c11090474d
commit
5d168964ae
|
@ -42,6 +42,16 @@ config ARM64
|
|||
select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPT
|
||||
select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPT
|
||||
select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPT
|
||||
select ARCH_INLINE_SPIN_TRYLOCK if !PREEMPT
|
||||
select ARCH_INLINE_SPIN_TRYLOCK_BH if !PREEMPT
|
||||
select ARCH_INLINE_SPIN_LOCK if !PREEMPT
|
||||
select ARCH_INLINE_SPIN_LOCK_BH if !PREEMPT
|
||||
select ARCH_INLINE_SPIN_LOCK_IRQ if !PREEMPT
|
||||
select ARCH_INLINE_SPIN_LOCK_IRQSAVE if !PREEMPT
|
||||
select ARCH_INLINE_SPIN_UNLOCK if !PREEMPT
|
||||
select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPT
|
||||
select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPT
|
||||
select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPT
|
||||
select ARCH_USE_CMPXCHG_LOCKREF
|
||||
select ARCH_USE_QUEUED_RWLOCKS
|
||||
select ARCH_USE_QUEUED_SPINLOCKS
|
||||
|
|
Loading…
Reference in New Issue