s390/spinlock: cleanup spinlock code
Improve the spinlock code in several aspects: - Have _raw_compare_and_swap return true if the operation has been successful instead of returning the old value. - Remove the "volatile" from arch_spinlock_t and arch_rwlock_t - Rename 'owner_cpu' to 'lock' - Add helper functions arch_spin_trylock_once / arch_spin_tryrelease_once [ Martin Schwidefsky: patch breakdown and code beautification ] Signed-off-by: Philipp Hachtmann <phacht@linux.vnet.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
parent
50be634507
commit
5b3f683e69
|
@ -14,15 +14,16 @@
|
|||
extern int spin_retry;
|
||||
|
||||
static inline int
|
||||
_raw_compare_and_swap(volatile unsigned int *lock,
|
||||
unsigned int old, unsigned int new)
|
||||
_raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
|
||||
{
|
||||
unsigned int old_expected = old;
|
||||
|
||||
asm volatile(
|
||||
" cs %0,%3,%1"
|
||||
: "=d" (old), "=Q" (*lock)
|
||||
: "0" (old), "d" (new), "Q" (*lock)
|
||||
: "cc", "memory" );
|
||||
return old;
|
||||
return old == old_expected;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -34,55 +35,64 @@ _raw_compare_and_swap(volatile unsigned int *lock,
|
|||
* (the type definitions are in asm/spinlock_types.h)
|
||||
*/
|
||||
|
||||
#define arch_spin_is_locked(x) ((x)->owner_cpu != 0)
|
||||
#define arch_spin_unlock_wait(lock) \
|
||||
do { while (arch_spin_is_locked(lock)) \
|
||||
arch_spin_relax(lock); } while (0)
|
||||
|
||||
extern void arch_spin_lock_wait(arch_spinlock_t *);
|
||||
extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
|
||||
extern int arch_spin_trylock_retry(arch_spinlock_t *);
|
||||
extern void arch_spin_relax(arch_spinlock_t *lock);
|
||||
void arch_spin_lock_wait(arch_spinlock_t *);
|
||||
int arch_spin_trylock_retry(arch_spinlock_t *);
|
||||
void arch_spin_relax(arch_spinlock_t *);
|
||||
void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
|
||||
|
||||
static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
|
||||
{
|
||||
return lock.owner_cpu == 0;
|
||||
return lock.lock == 0;
|
||||
}
|
||||
|
||||
static inline int arch_spin_is_locked(arch_spinlock_t *lp)
|
||||
{
|
||||
return ACCESS_ONCE(lp->lock) != 0;
|
||||
}
|
||||
|
||||
static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
|
||||
{
|
||||
unsigned int new = ~smp_processor_id();
|
||||
|
||||
return _raw_compare_and_swap(&lp->lock, 0, new);
|
||||
}
|
||||
|
||||
static inline int arch_spin_tryrelease_once(arch_spinlock_t *lp)
|
||||
{
|
||||
unsigned int old = ~smp_processor_id();
|
||||
|
||||
return _raw_compare_and_swap(&lp->lock, old, 0);
|
||||
}
|
||||
|
||||
static inline void arch_spin_lock(arch_spinlock_t *lp)
|
||||
{
|
||||
int old;
|
||||
|
||||
old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
|
||||
if (likely(old == 0))
|
||||
return;
|
||||
if (unlikely(!arch_spin_trylock_once(lp)))
|
||||
arch_spin_lock_wait(lp);
|
||||
}
|
||||
|
||||
static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
|
||||
unsigned long flags)
|
||||
{
|
||||
int old;
|
||||
|
||||
old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
|
||||
if (likely(old == 0))
|
||||
return;
|
||||
if (unlikely(!arch_spin_trylock_once(lp)))
|
||||
arch_spin_lock_wait_flags(lp, flags);
|
||||
}
|
||||
|
||||
static inline int arch_spin_trylock(arch_spinlock_t *lp)
|
||||
{
|
||||
int old;
|
||||
|
||||
old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
|
||||
if (likely(old == 0))
|
||||
return 1;
|
||||
if (unlikely(!arch_spin_trylock_once(lp)))
|
||||
return arch_spin_trylock_retry(lp);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline void arch_spin_unlock(arch_spinlock_t *lp)
|
||||
{
|
||||
_raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0);
|
||||
arch_spin_tryrelease_once(lp);
|
||||
}
|
||||
|
||||
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
|
||||
{
|
||||
while (arch_spin_is_locked(lock))
|
||||
arch_spin_relax(lock);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -119,7 +129,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
|
|||
{
|
||||
unsigned int old;
|
||||
old = rw->lock & 0x7fffffffU;
|
||||
if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old)
|
||||
if (!_raw_compare_and_swap(&rw->lock, old, old + 1))
|
||||
_raw_read_lock_wait(rw);
|
||||
}
|
||||
|
||||
|
@ -127,30 +137,28 @@ static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags)
|
|||
{
|
||||
unsigned int old;
|
||||
old = rw->lock & 0x7fffffffU;
|
||||
if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old)
|
||||
if (!_raw_compare_and_swap(&rw->lock, old, old + 1))
|
||||
_raw_read_lock_wait_flags(rw, flags);
|
||||
}
|
||||
|
||||
static inline void arch_read_unlock(arch_rwlock_t *rw)
|
||||
{
|
||||
unsigned int old, cmp;
|
||||
unsigned int old;
|
||||
|
||||
old = rw->lock;
|
||||
do {
|
||||
cmp = old;
|
||||
old = _raw_compare_and_swap(&rw->lock, old, old - 1);
|
||||
} while (cmp != old);
|
||||
old = ACCESS_ONCE(rw->lock);
|
||||
} while (!_raw_compare_and_swap(&rw->lock, old, old - 1));
|
||||
}
|
||||
|
||||
static inline void arch_write_lock(arch_rwlock_t *rw)
|
||||
{
|
||||
if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
|
||||
if (unlikely(!_raw_compare_and_swap(&rw->lock, 0, 0x80000000)))
|
||||
_raw_write_lock_wait(rw);
|
||||
}
|
||||
|
||||
static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags)
|
||||
{
|
||||
if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
|
||||
if (unlikely(!_raw_compare_and_swap(&rw->lock, 0, 0x80000000)))
|
||||
_raw_write_lock_wait_flags(rw, flags);
|
||||
}
|
||||
|
||||
|
@ -163,14 +171,14 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
|
|||
{
|
||||
unsigned int old;
|
||||
old = rw->lock & 0x7fffffffU;
|
||||
if (likely(_raw_compare_and_swap(&rw->lock, old, old + 1) == old))
|
||||
if (likely(_raw_compare_and_swap(&rw->lock, old, old + 1)))
|
||||
return 1;
|
||||
return _raw_read_trylock_retry(rw);
|
||||
}
|
||||
|
||||
static inline int arch_write_trylock(arch_rwlock_t *rw)
|
||||
{
|
||||
if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0))
|
||||
if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000)))
|
||||
return 1;
|
||||
return _raw_write_trylock_retry(rw);
|
||||
}
|
||||
|
|
|
@ -6,13 +6,13 @@
|
|||
#endif
|
||||
|
||||
typedef struct {
|
||||
volatile unsigned int owner_cpu;
|
||||
unsigned int lock;
|
||||
} __attribute__ ((aligned (4))) arch_spinlock_t;
|
||||
|
||||
#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
|
||||
#define __ARCH_SPIN_LOCK_UNLOCKED { .lock = 0, }
|
||||
|
||||
typedef struct {
|
||||
volatile unsigned int lock;
|
||||
unsigned int lock;
|
||||
} arch_rwlock_t;
|
||||
|
||||
#define __ARCH_RW_LOCK_UNLOCKED { 0 }
|
||||
|
|
|
@ -31,22 +31,21 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
|
|||
unsigned int owner;
|
||||
|
||||
while (1) {
|
||||
owner = lp->owner_cpu;
|
||||
owner = lp->lock;
|
||||
if (!owner || smp_vcpu_scheduled(~owner)) {
|
||||
for (count = spin_retry; count > 0; count--) {
|
||||
if (arch_spin_is_locked(lp))
|
||||
continue;
|
||||
if (_raw_compare_and_swap(&lp->owner_cpu, 0,
|
||||
cpu) == 0)
|
||||
if (_raw_compare_and_swap(&lp->lock, 0, cpu))
|
||||
return;
|
||||
}
|
||||
if (MACHINE_IS_LPAR)
|
||||
continue;
|
||||
}
|
||||
owner = lp->owner_cpu;
|
||||
owner = lp->lock;
|
||||
if (owner)
|
||||
smp_yield_cpu(~owner);
|
||||
if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
|
||||
if (_raw_compare_and_swap(&lp->lock, 0, cpu))
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -60,49 +59,33 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
|
|||
|
||||
local_irq_restore(flags);
|
||||
while (1) {
|
||||
owner = lp->owner_cpu;
|
||||
owner = lp->lock;
|
||||
if (!owner || smp_vcpu_scheduled(~owner)) {
|
||||
for (count = spin_retry; count > 0; count--) {
|
||||
if (arch_spin_is_locked(lp))
|
||||
continue;
|
||||
local_irq_disable();
|
||||
if (_raw_compare_and_swap(&lp->owner_cpu, 0,
|
||||
cpu) == 0)
|
||||
if (_raw_compare_and_swap(&lp->lock, 0, cpu))
|
||||
return;
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
if (MACHINE_IS_LPAR)
|
||||
continue;
|
||||
}
|
||||
owner = lp->owner_cpu;
|
||||
owner = lp->lock;
|
||||
if (owner)
|
||||
smp_yield_cpu(~owner);
|
||||
local_irq_disable();
|
||||
if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
|
||||
if (_raw_compare_and_swap(&lp->lock, 0, cpu))
|
||||
return;
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(arch_spin_lock_wait_flags);
|
||||
|
||||
int arch_spin_trylock_retry(arch_spinlock_t *lp)
|
||||
void arch_spin_relax(arch_spinlock_t *lp)
|
||||
{
|
||||
unsigned int cpu = ~smp_processor_id();
|
||||
int count;
|
||||
|
||||
for (count = spin_retry; count > 0; count--) {
|
||||
if (arch_spin_is_locked(lp))
|
||||
continue;
|
||||
if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(arch_spin_trylock_retry);
|
||||
|
||||
void arch_spin_relax(arch_spinlock_t *lock)
|
||||
{
|
||||
unsigned int cpu = lock->owner_cpu;
|
||||
unsigned int cpu = lp->lock;
|
||||
if (cpu != 0) {
|
||||
if (MACHINE_IS_VM || MACHINE_IS_KVM ||
|
||||
!smp_vcpu_scheduled(~cpu))
|
||||
|
@ -111,6 +94,20 @@ void arch_spin_relax(arch_spinlock_t *lock)
|
|||
}
|
||||
EXPORT_SYMBOL(arch_spin_relax);
|
||||
|
||||
int arch_spin_trylock_retry(arch_spinlock_t *lp)
|
||||
{
|
||||
int count;
|
||||
|
||||
for (count = spin_retry; count > 0; count--) {
|
||||
if (arch_spin_is_locked(lp))
|
||||
continue;
|
||||
if (arch_spin_trylock_once(lp))
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(arch_spin_trylock_retry);
|
||||
|
||||
void _raw_read_lock_wait(arch_rwlock_t *rw)
|
||||
{
|
||||
unsigned int old;
|
||||
|
@ -124,7 +121,7 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
|
|||
if (!arch_read_can_lock(rw))
|
||||
continue;
|
||||
old = rw->lock & 0x7fffffffU;
|
||||
if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
|
||||
if (_raw_compare_and_swap(&rw->lock, old, old + 1))
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -145,7 +142,7 @@ void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
|
|||
continue;
|
||||
old = rw->lock & 0x7fffffffU;
|
||||
local_irq_disable();
|
||||
if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
|
||||
if (_raw_compare_and_swap(&rw->lock, old, old + 1))
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -160,7 +157,7 @@ int _raw_read_trylock_retry(arch_rwlock_t *rw)
|
|||
if (!arch_read_can_lock(rw))
|
||||
continue;
|
||||
old = rw->lock & 0x7fffffffU;
|
||||
if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
|
||||
if (_raw_compare_and_swap(&rw->lock, old, old + 1))
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
|
@ -178,7 +175,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
|
|||
}
|
||||
if (!arch_write_can_lock(rw))
|
||||
continue;
|
||||
if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
|
||||
if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -197,7 +194,7 @@ void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
|
|||
if (!arch_write_can_lock(rw))
|
||||
continue;
|
||||
local_irq_disable();
|
||||
if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
|
||||
if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -210,7 +207,7 @@ int _raw_write_trylock_retry(arch_rwlock_t *rw)
|
|||
while (count-- > 0) {
|
||||
if (!arch_write_can_lock(rw))
|
||||
continue;
|
||||
if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
|
||||
if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
|
|
Loading…
Reference in New Issue