locking/kconfig: Simplify INLINE_SPIN_UNLOCK usage
Get rid of INLINE_SPIN_UNLOCK entirely replacing it with UNINLINE_SPIN_UNLOCK instead of the reverse meaning. Whoever wants to change the default spinlock inlining behavior and uninline the spinlocks for some weird reason, such as spinlock debugging, paravirt etc. can now all just select UNINLINE_SPIN_UNLOCK Original discussion at: https://lkml.org/lkml/2012/3/21/357 Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Chris Zankel <chris@zankel.net> Cc: linux-mips@linux-mips.org Link: http://lkml.kernel.org/r/20120322095502.30866.75756.sendpatchset@codeblue [ tidied up the changelog a bit ] Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
280fb016bf
commit
e335e3eb82
|
@ -133,7 +133,7 @@ CONFIG_BLK_DEV_BSG=y
|
|||
CONFIG_IOSCHED_NOOP=y
|
||||
CONFIG_DEFAULT_NOOP=y
|
||||
CONFIG_DEFAULT_IOSCHED="noop"
|
||||
CONFIG_INLINE_SPIN_UNLOCK=y
|
||||
# CONFIG_UNINLINE_SPIN_UNLOCK is not set
|
||||
CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
|
||||
CONFIG_INLINE_READ_UNLOCK=y
|
||||
CONFIG_INLINE_READ_UNLOCK_IRQ=y
|
||||
|
|
|
@ -113,7 +113,7 @@ CONFIG_DEFAULT_IOSCHED="noop"
|
|||
# CONFIG_INLINE_SPIN_LOCK_BH is not set
|
||||
# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
|
||||
# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
|
||||
CONFIG_INLINE_SPIN_UNLOCK=y
|
||||
# CONFIG_UNINLINE_SPIN_UNLOCK is not set
|
||||
# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
|
||||
CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
|
||||
# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
|
||||
|
|
|
@ -67,7 +67,7 @@ _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
|
|||
#define _raw_spin_trylock_bh(lock) __raw_spin_trylock_bh(lock)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_INLINE_SPIN_UNLOCK
|
||||
#ifndef CONFIG_UNINLINE_SPIN_UNLOCK
|
||||
#define _raw_spin_unlock(lock) __raw_spin_unlock(lock)
|
||||
#endif
|
||||
|
||||
|
|
|
@ -124,8 +124,8 @@ config INLINE_SPIN_LOCK_IRQSAVE
|
|||
def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
|
||||
ARCH_INLINE_SPIN_LOCK_IRQSAVE
|
||||
|
||||
config INLINE_SPIN_UNLOCK
|
||||
def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_SPIN_UNLOCK)
|
||||
config UNINLINE_SPIN_UNLOCK
|
||||
bool
|
||||
|
||||
config INLINE_SPIN_UNLOCK_BH
|
||||
def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_UNLOCK_BH
|
||||
|
|
|
@ -36,6 +36,7 @@ config PREEMPT_VOLUNTARY
|
|||
config PREEMPT
|
||||
bool "Preemptible Kernel (Low-Latency Desktop)"
|
||||
select PREEMPT_COUNT
|
||||
select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK
|
||||
help
|
||||
This option reduces the latency of the kernel by making
|
||||
all kernel code (that is not executing in a critical section)
|
||||
|
|
|
@ -163,7 +163,7 @@ void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock)
|
|||
EXPORT_SYMBOL(_raw_spin_lock_bh);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_SPIN_UNLOCK
|
||||
#ifdef CONFIG_UNINLINE_SPIN_UNLOCK
|
||||
void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock)
|
||||
{
|
||||
__raw_spin_unlock(lock);
|
||||
|
|
|
@ -499,6 +499,7 @@ config RT_MUTEX_TESTER
|
|||
config DEBUG_SPINLOCK
|
||||
bool "Spinlock and rw-lock debugging: basic checks"
|
||||
depends on DEBUG_KERNEL
|
||||
select UNINLINE_SPIN_UNLOCK
|
||||
help
|
||||
Say Y here and build SMP to catch missing spinlock initialization
|
||||
and certain other kinds of spinlock errors commonly made. This is
|
||||
|
|
Loading…
Reference in New Issue