s390/spinlock: use atomic primitives for spinlocks
Add a couple more __atomic_xxx function to atomic_ops.h and use them to replace the compare-and-swap inlines in the spinlock code. This changes the type of the lock value from unsigned int to int. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
parent
df26c2e87e
commit
02c503ff23
|
@ -111,20 +111,22 @@ __ATOMIC64_OPS(__atomic64_xor, "xgr")
|
|||
|
||||
static inline int __atomic_cmpxchg(int *ptr, int old, int new)
|
||||
{
|
||||
asm volatile(
|
||||
" cs %[old],%[new],%[ptr]"
|
||||
: [old] "+d" (old), [ptr] "+Q" (*ptr)
|
||||
: [new] "d" (new) : "cc", "memory");
|
||||
return old;
|
||||
return __sync_val_compare_and_swap(ptr, old, new);
|
||||
}
|
||||
|
||||
static inline int __atomic_cmpxchg_bool(int *ptr, int old, int new)
|
||||
{
|
||||
return __sync_bool_compare_and_swap(ptr, old, new);
|
||||
}
|
||||
|
||||
static inline long __atomic64_cmpxchg(long *ptr, long old, long new)
|
||||
{
|
||||
asm volatile(
|
||||
" csg %[old],%[new],%[ptr]"
|
||||
: [old] "+d" (old), [ptr] "+Q" (*ptr)
|
||||
: [new] "d" (new) : "cc", "memory");
|
||||
return old;
|
||||
return __sync_val_compare_and_swap(ptr, old, new);
|
||||
}
|
||||
|
||||
static inline long __atomic64_cmpxchg_bool(long *ptr, long old, long new)
|
||||
{
|
||||
return __sync_bool_compare_and_swap(ptr, old, new);
|
||||
}
|
||||
|
||||
#endif /* __ARCH_S390_ATOMIC_OPS__ */
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#define __ASM_SPINLOCK_H
|
||||
|
||||
#include <linux/smp.h>
|
||||
#include <asm/atomic_ops.h>
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/processor.h>
|
||||
|
||||
|
@ -17,12 +18,6 @@
|
|||
|
||||
extern int spin_retry;
|
||||
|
||||
static inline int
|
||||
_raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
|
||||
{
|
||||
return __sync_bool_compare_and_swap(lock, old, new);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
static inline bool arch_vcpu_is_preempted(int cpu) { return false; }
|
||||
#else
|
||||
|
@ -40,7 +35,7 @@ bool arch_vcpu_is_preempted(int cpu);
|
|||
* (the type definitions are in asm/spinlock_types.h)
|
||||
*/
|
||||
|
||||
void arch_lock_relax(unsigned int cpu);
|
||||
void arch_lock_relax(int cpu);
|
||||
|
||||
void arch_spin_lock_wait(arch_spinlock_t *);
|
||||
int arch_spin_trylock_retry(arch_spinlock_t *);
|
||||
|
@ -70,7 +65,7 @@ static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
|
|||
{
|
||||
barrier();
|
||||
return likely(arch_spin_value_unlocked(*lp) &&
|
||||
_raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL));
|
||||
__atomic_cmpxchg_bool(&lp->lock, 0, SPINLOCK_LOCKVAL));
|
||||
}
|
||||
|
||||
static inline void arch_spin_lock(arch_spinlock_t *lp)
|
||||
|
@ -95,7 +90,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lp)
|
|||
|
||||
static inline void arch_spin_unlock(arch_spinlock_t *lp)
|
||||
{
|
||||
typecheck(unsigned int, lp->lock);
|
||||
typecheck(int, lp->lock);
|
||||
asm volatile(
|
||||
"st %1,%0\n"
|
||||
: "+Q" (lp->lock)
|
||||
|
@ -141,16 +136,16 @@ extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
|
|||
|
||||
static inline int arch_read_trylock_once(arch_rwlock_t *rw)
|
||||
{
|
||||
unsigned int old = ACCESS_ONCE(rw->lock);
|
||||
return likely((int) old >= 0 &&
|
||||
_raw_compare_and_swap(&rw->lock, old, old + 1));
|
||||
int old = ACCESS_ONCE(rw->lock);
|
||||
return likely(old >= 0 &&
|
||||
__atomic_cmpxchg_bool(&rw->lock, old, old + 1));
|
||||
}
|
||||
|
||||
static inline int arch_write_trylock_once(arch_rwlock_t *rw)
|
||||
{
|
||||
unsigned int old = ACCESS_ONCE(rw->lock);
|
||||
int old = ACCESS_ONCE(rw->lock);
|
||||
return likely(old == 0 &&
|
||||
_raw_compare_and_swap(&rw->lock, 0, 0x80000000));
|
||||
__atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
|
||||
|
@ -161,9 +156,9 @@ static inline int arch_write_trylock_once(arch_rwlock_t *rw)
|
|||
|
||||
#define __RAW_LOCK(ptr, op_val, op_string) \
|
||||
({ \
|
||||
unsigned int old_val; \
|
||||
int old_val; \
|
||||
\
|
||||
typecheck(unsigned int *, ptr); \
|
||||
typecheck(int *, ptr); \
|
||||
asm volatile( \
|
||||
op_string " %0,%2,%1\n" \
|
||||
"bcr 14,0\n" \
|
||||
|
@ -175,9 +170,9 @@ static inline int arch_write_trylock_once(arch_rwlock_t *rw)
|
|||
|
||||
#define __RAW_UNLOCK(ptr, op_val, op_string) \
|
||||
({ \
|
||||
unsigned int old_val; \
|
||||
int old_val; \
|
||||
\
|
||||
typecheck(unsigned int *, ptr); \
|
||||
typecheck(int *, ptr); \
|
||||
asm volatile( \
|
||||
op_string " %0,%2,%1\n" \
|
||||
: "=d" (old_val), "+Q" (*ptr) \
|
||||
|
@ -187,14 +182,14 @@ static inline int arch_write_trylock_once(arch_rwlock_t *rw)
|
|||
})
|
||||
|
||||
extern void _raw_read_lock_wait(arch_rwlock_t *lp);
|
||||
extern void _raw_write_lock_wait(arch_rwlock_t *lp, unsigned int prev);
|
||||
extern void _raw_write_lock_wait(arch_rwlock_t *lp, int prev);
|
||||
|
||||
static inline void arch_read_lock(arch_rwlock_t *rw)
|
||||
{
|
||||
unsigned int old;
|
||||
int old;
|
||||
|
||||
old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD);
|
||||
if ((int) old < 0)
|
||||
if (old < 0)
|
||||
_raw_read_lock_wait(rw);
|
||||
}
|
||||
|
||||
|
@ -205,7 +200,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
|
|||
|
||||
static inline void arch_write_lock(arch_rwlock_t *rw)
|
||||
{
|
||||
unsigned int old;
|
||||
int old;
|
||||
|
||||
old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
|
||||
if (old != 0)
|
||||
|
@ -232,11 +227,11 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
|
|||
|
||||
static inline void arch_read_unlock(arch_rwlock_t *rw)
|
||||
{
|
||||
unsigned int old;
|
||||
int old;
|
||||
|
||||
do {
|
||||
old = ACCESS_ONCE(rw->lock);
|
||||
} while (!_raw_compare_and_swap(&rw->lock, old, old - 1));
|
||||
} while (!__atomic_cmpxchg_bool(&rw->lock, old, old - 1));
|
||||
}
|
||||
|
||||
static inline void arch_write_lock(arch_rwlock_t *rw)
|
||||
|
@ -248,7 +243,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
|
|||
|
||||
static inline void arch_write_unlock(arch_rwlock_t *rw)
|
||||
{
|
||||
typecheck(unsigned int, rw->lock);
|
||||
typecheck(int, rw->lock);
|
||||
|
||||
rw->owner = 0;
|
||||
asm volatile(
|
||||
|
|
|
@ -6,14 +6,14 @@
|
|||
#endif
|
||||
|
||||
typedef struct {
|
||||
unsigned int lock;
|
||||
int lock;
|
||||
} __attribute__ ((aligned (4))) arch_spinlock_t;
|
||||
|
||||
#define __ARCH_SPIN_LOCK_UNLOCKED { .lock = 0, }
|
||||
|
||||
typedef struct {
|
||||
unsigned int lock;
|
||||
unsigned int owner;
|
||||
int lock;
|
||||
int owner;
|
||||
} arch_rwlock_t;
|
||||
|
||||
#define __ARCH_RW_LOCK_UNLOCKED { 0 }
|
||||
|
|
|
@ -32,23 +32,22 @@ static int __init spin_retry_setup(char *str)
|
|||
}
|
||||
__setup("spin_retry=", spin_retry_setup);
|
||||
|
||||
static inline void _raw_compare_and_delay(unsigned int *lock, unsigned int old)
|
||||
static inline void compare_and_delay(int *lock, int old)
|
||||
{
|
||||
asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock));
|
||||
}
|
||||
|
||||
void arch_spin_lock_wait(arch_spinlock_t *lp)
|
||||
{
|
||||
unsigned int cpu = SPINLOCK_LOCKVAL;
|
||||
unsigned int owner;
|
||||
int count, first_diag;
|
||||
int cpu = SPINLOCK_LOCKVAL;
|
||||
int owner, count, first_diag;
|
||||
|
||||
first_diag = 1;
|
||||
while (1) {
|
||||
owner = ACCESS_ONCE(lp->lock);
|
||||
/* Try to get the lock if it is free. */
|
||||
if (!owner) {
|
||||
if (_raw_compare_and_swap(&lp->lock, 0, cpu))
|
||||
if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
|
||||
return;
|
||||
continue;
|
||||
}
|
||||
|
@ -62,7 +61,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
|
|||
count = spin_retry;
|
||||
do {
|
||||
if (MACHINE_HAS_CAD)
|
||||
_raw_compare_and_delay(&lp->lock, owner);
|
||||
compare_and_delay(&lp->lock, owner);
|
||||
owner = ACCESS_ONCE(lp->lock);
|
||||
} while (owner && count-- > 0);
|
||||
if (!owner)
|
||||
|
@ -82,9 +81,8 @@ EXPORT_SYMBOL(arch_spin_lock_wait);
|
|||
|
||||
void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
|
||||
{
|
||||
unsigned int cpu = SPINLOCK_LOCKVAL;
|
||||
unsigned int owner;
|
||||
int count, first_diag;
|
||||
int cpu = SPINLOCK_LOCKVAL;
|
||||
int owner, count, first_diag;
|
||||
|
||||
local_irq_restore(flags);
|
||||
first_diag = 1;
|
||||
|
@ -93,7 +91,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
|
|||
/* Try to get the lock if it is free. */
|
||||
if (!owner) {
|
||||
local_irq_disable();
|
||||
if (_raw_compare_and_swap(&lp->lock, 0, cpu))
|
||||
if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
|
||||
return;
|
||||
local_irq_restore(flags);
|
||||
continue;
|
||||
|
@ -108,7 +106,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
|
|||
count = spin_retry;
|
||||
do {
|
||||
if (MACHINE_HAS_CAD)
|
||||
_raw_compare_and_delay(&lp->lock, owner);
|
||||
compare_and_delay(&lp->lock, owner);
|
||||
owner = ACCESS_ONCE(lp->lock);
|
||||
} while (owner && count-- > 0);
|
||||
if (!owner)
|
||||
|
@ -128,18 +126,17 @@ EXPORT_SYMBOL(arch_spin_lock_wait_flags);
|
|||
|
||||
int arch_spin_trylock_retry(arch_spinlock_t *lp)
|
||||
{
|
||||
unsigned int cpu = SPINLOCK_LOCKVAL;
|
||||
unsigned int owner;
|
||||
int count;
|
||||
int cpu = SPINLOCK_LOCKVAL;
|
||||
int owner, count;
|
||||
|
||||
for (count = spin_retry; count > 0; count--) {
|
||||
owner = READ_ONCE(lp->lock);
|
||||
/* Try to get the lock if it is free. */
|
||||
if (!owner) {
|
||||
if (_raw_compare_and_swap(&lp->lock, 0, cpu))
|
||||
if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
|
||||
return 1;
|
||||
} else if (MACHINE_HAS_CAD)
|
||||
_raw_compare_and_delay(&lp->lock, owner);
|
||||
compare_and_delay(&lp->lock, owner);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -147,8 +144,8 @@ EXPORT_SYMBOL(arch_spin_trylock_retry);
|
|||
|
||||
void _raw_read_lock_wait(arch_rwlock_t *rw)
|
||||
{
|
||||
unsigned int owner, old;
|
||||
int count = spin_retry;
|
||||
int owner, old;
|
||||
|
||||
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
|
||||
__RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD);
|
||||
|
@ -162,12 +159,12 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
|
|||
}
|
||||
old = ACCESS_ONCE(rw->lock);
|
||||
owner = ACCESS_ONCE(rw->owner);
|
||||
if ((int) old < 0) {
|
||||
if (old < 0) {
|
||||
if (MACHINE_HAS_CAD)
|
||||
_raw_compare_and_delay(&rw->lock, old);
|
||||
compare_and_delay(&rw->lock, old);
|
||||
continue;
|
||||
}
|
||||
if (_raw_compare_and_swap(&rw->lock, old, old + 1))
|
||||
if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -175,17 +172,17 @@ EXPORT_SYMBOL(_raw_read_lock_wait);
|
|||
|
||||
int _raw_read_trylock_retry(arch_rwlock_t *rw)
|
||||
{
|
||||
unsigned int old;
|
||||
int count = spin_retry;
|
||||
int old;
|
||||
|
||||
while (count-- > 0) {
|
||||
old = ACCESS_ONCE(rw->lock);
|
||||
if ((int) old < 0) {
|
||||
if (old < 0) {
|
||||
if (MACHINE_HAS_CAD)
|
||||
_raw_compare_and_delay(&rw->lock, old);
|
||||
compare_and_delay(&rw->lock, old);
|
||||
continue;
|
||||
}
|
||||
if (_raw_compare_and_swap(&rw->lock, old, old + 1))
|
||||
if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
|
@ -194,10 +191,10 @@ EXPORT_SYMBOL(_raw_read_trylock_retry);
|
|||
|
||||
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
|
||||
|
||||
void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
|
||||
void _raw_write_lock_wait(arch_rwlock_t *rw, int prev)
|
||||
{
|
||||
unsigned int owner, old;
|
||||
int count = spin_retry;
|
||||
int owner, old;
|
||||
|
||||
owner = 0;
|
||||
while (1) {
|
||||
|
@ -209,14 +206,14 @@ void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
|
|||
old = ACCESS_ONCE(rw->lock);
|
||||
owner = ACCESS_ONCE(rw->owner);
|
||||
smp_mb();
|
||||
if ((int) old >= 0) {
|
||||
if (old >= 0) {
|
||||
prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
|
||||
old = prev;
|
||||
}
|
||||
if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
|
||||
if ((old & 0x7fffffff) == 0 && prev >= 0)
|
||||
break;
|
||||
if (MACHINE_HAS_CAD)
|
||||
_raw_compare_and_delay(&rw->lock, old);
|
||||
compare_and_delay(&rw->lock, old);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(_raw_write_lock_wait);
|
||||
|
@ -225,8 +222,8 @@ EXPORT_SYMBOL(_raw_write_lock_wait);
|
|||
|
||||
void _raw_write_lock_wait(arch_rwlock_t *rw)
|
||||
{
|
||||
unsigned int owner, old, prev;
|
||||
int count = spin_retry;
|
||||
int owner, old, prev;
|
||||
|
||||
prev = 0x80000000;
|
||||
owner = 0;
|
||||
|
@ -238,15 +235,15 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
|
|||
}
|
||||
old = ACCESS_ONCE(rw->lock);
|
||||
owner = ACCESS_ONCE(rw->owner);
|
||||
if ((int) old >= 0 &&
|
||||
_raw_compare_and_swap(&rw->lock, old, old | 0x80000000))
|
||||
if (old >= 0 &&
|
||||
__atomic_cmpxchg_bool(&rw->lock, old, old | 0x80000000))
|
||||
prev = old;
|
||||
else
|
||||
smp_mb();
|
||||
if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
|
||||
if ((old & 0x7fffffff) == 0 && prev >= 0)
|
||||
break;
|
||||
if (MACHINE_HAS_CAD)
|
||||
_raw_compare_and_delay(&rw->lock, old);
|
||||
compare_and_delay(&rw->lock, old);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(_raw_write_lock_wait);
|
||||
|
@ -255,24 +252,24 @@ EXPORT_SYMBOL(_raw_write_lock_wait);
|
|||
|
||||
int _raw_write_trylock_retry(arch_rwlock_t *rw)
|
||||
{
|
||||
unsigned int old;
|
||||
int count = spin_retry;
|
||||
int old;
|
||||
|
||||
while (count-- > 0) {
|
||||
old = ACCESS_ONCE(rw->lock);
|
||||
if (old) {
|
||||
if (MACHINE_HAS_CAD)
|
||||
_raw_compare_and_delay(&rw->lock, old);
|
||||
compare_and_delay(&rw->lock, old);
|
||||
continue;
|
||||
}
|
||||
if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
|
||||
if (__atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000))
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(_raw_write_trylock_retry);
|
||||
|
||||
void arch_lock_relax(unsigned int cpu)
|
||||
void arch_lock_relax(int cpu)
|
||||
{
|
||||
if (!cpu)
|
||||
return;
|
||||
|
|
Loading…
Reference in New Issue