powerpc/spinlocks: Refactor SHARED_PROCESSOR
Determining if a processor is in shared processor mode is not a constant so don't hide it behind a #define. Signed-off-by: Christopher M. Riedl <cmr@informatik.wtf> Reviewed-by: Andrew Donnellan <ajd@linux.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20190813031314.1828-2-cmr@informatik.wtf
This commit is contained in:
parent
d7fb5b18a5
commit
d57b78353a
|
@ -101,15 +101,27 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
|||
|
||||
#if defined(CONFIG_PPC_SPLPAR)
|
||||
/* We only yield to the hypervisor if we are in shared processor mode */
|
||||
#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr))
|
||||
extern void __spin_yield(arch_spinlock_t *lock);
|
||||
extern void __rw_yield(arch_rwlock_t *lock);
|
||||
#else /* SPLPAR */
|
||||
#define __spin_yield(x) barrier()
|
||||
#define __rw_yield(x) barrier()
|
||||
#define SHARED_PROCESSOR 0
|
||||
#endif
|
||||
|
||||
static inline bool is_shared_processor(void)
|
||||
{
|
||||
/*
|
||||
* LPPACA is only available on Pseries so guard anything LPPACA related to
|
||||
* allow other platforms (which include this common header) to compile.
|
||||
*/
|
||||
#ifdef CONFIG_PPC_PSERIES
|
||||
return (IS_ENABLED(CONFIG_PPC_SPLPAR) &&
|
||||
lppaca_shared_proc(local_paca->lppaca_ptr));
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void arch_spin_lock(arch_spinlock_t *lock)
|
||||
{
|
||||
while (1) {
|
||||
|
@ -117,7 +129,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
|
|||
break;
|
||||
do {
|
||||
HMT_low();
|
||||
if (SHARED_PROCESSOR)
|
||||
if (is_shared_processor())
|
||||
__spin_yield(lock);
|
||||
} while (unlikely(lock->slock != 0));
|
||||
HMT_medium();
|
||||
|
@ -136,7 +148,7 @@ void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
|
|||
local_irq_restore(flags);
|
||||
do {
|
||||
HMT_low();
|
||||
if (SHARED_PROCESSOR)
|
||||
if (is_shared_processor())
|
||||
__spin_yield(lock);
|
||||
} while (unlikely(lock->slock != 0));
|
||||
HMT_medium();
|
||||
|
@ -226,7 +238,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
|
|||
break;
|
||||
do {
|
||||
HMT_low();
|
||||
if (SHARED_PROCESSOR)
|
||||
if (is_shared_processor())
|
||||
__rw_yield(rw);
|
||||
} while (unlikely(rw->lock < 0));
|
||||
HMT_medium();
|
||||
|
@ -240,7 +252,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
|
|||
break;
|
||||
do {
|
||||
HMT_low();
|
||||
if (SHARED_PROCESSOR)
|
||||
if (is_shared_processor())
|
||||
__rw_yield(rw);
|
||||
} while (unlikely(rw->lock != 0));
|
||||
HMT_medium();
|
||||
|
|
Loading…
Reference in New Issue