ARC: LLOCK/SCOND based rwlock
With LLOCK/SCOND, the rwlock counter can be atomically updated w/o need for a guarding spin lock. This in turn elides the EXchange instruction based spinning which causes the cacheline transition to exclusive state and concurrent spinning across cores would cause the line to keep bouncing around. LLOCK/SCOND based implementation is superior as spinning on LLOCK keeps the cacheline in shared state. Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
This commit is contained in:
parent
ae7eae9e03
commit
69cbe630f5
|
@ -75,6 +75,164 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
||||||
smp_mb();
|
smp_mb();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Read-write spinlocks, allowing multiple readers but only one writer.
|
||||||
|
* Unfair locking as Writers could be starved indefinitely by Reader(s)
|
||||||
|
*/
|
||||||
|
|
||||||
|
static inline void arch_read_lock(arch_rwlock_t *rw)
|
||||||
|
{
|
||||||
|
unsigned int val;
|
||||||
|
|
||||||
|
smp_mb();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* zero means writer holds the lock exclusively, deny Reader.
|
||||||
|
* Otherwise grant lock to first/subseq reader
|
||||||
|
*
|
||||||
|
* if (rw->counter > 0) {
|
||||||
|
* rw->counter--;
|
||||||
|
* ret = 1;
|
||||||
|
* }
|
||||||
|
*/
|
||||||
|
|
||||||
|
__asm__ __volatile__(
|
||||||
|
"1: llock %[val], [%[rwlock]] \n"
|
||||||
|
" brls %[val], %[WR_LOCKED], 1b\n" /* <= 0: spin while write locked */
|
||||||
|
" sub %[val], %[val], 1 \n" /* reader lock */
|
||||||
|
" scond %[val], [%[rwlock]] \n"
|
||||||
|
" bnz 1b \n"
|
||||||
|
" \n"
|
||||||
|
: [val] "=&r" (val)
|
||||||
|
: [rwlock] "r" (&(rw->counter)),
|
||||||
|
[WR_LOCKED] "ir" (0)
|
||||||
|
: "memory", "cc");
|
||||||
|
|
||||||
|
smp_mb();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* 1 - lock taken successfully */
|
||||||
|
static inline int arch_read_trylock(arch_rwlock_t *rw)
|
||||||
|
{
|
||||||
|
unsigned int val, got_it = 0;
|
||||||
|
|
||||||
|
smp_mb();
|
||||||
|
|
||||||
|
__asm__ __volatile__(
|
||||||
|
"1: llock %[val], [%[rwlock]] \n"
|
||||||
|
" brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
|
||||||
|
" sub %[val], %[val], 1 \n" /* counter-- */
|
||||||
|
" scond %[val], [%[rwlock]] \n"
|
||||||
|
" bnz 1b \n" /* retry if collided with someone */
|
||||||
|
" mov %[got_it], 1 \n"
|
||||||
|
" \n"
|
||||||
|
"4: ; --- done --- \n"
|
||||||
|
|
||||||
|
: [val] "=&r" (val),
|
||||||
|
[got_it] "+&r" (got_it)
|
||||||
|
: [rwlock] "r" (&(rw->counter)),
|
||||||
|
[WR_LOCKED] "ir" (0)
|
||||||
|
: "memory", "cc");
|
||||||
|
|
||||||
|
smp_mb();
|
||||||
|
|
||||||
|
return got_it;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void arch_write_lock(arch_rwlock_t *rw)
|
||||||
|
{
|
||||||
|
unsigned int val;
|
||||||
|
|
||||||
|
smp_mb();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
|
||||||
|
* deny writer. Otherwise if unlocked grant to writer
|
||||||
|
* Hence the claim that Linux rwlocks are unfair to writers.
|
||||||
|
* (can be starved for an indefinite time by readers).
|
||||||
|
*
|
||||||
|
* if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
|
||||||
|
* rw->counter = 0;
|
||||||
|
* ret = 1;
|
||||||
|
* }
|
||||||
|
*/
|
||||||
|
|
||||||
|
__asm__ __volatile__(
|
||||||
|
"1: llock %[val], [%[rwlock]] \n"
|
||||||
|
" brne %[val], %[UNLOCKED], 1b \n" /* while !UNLOCKED spin */
|
||||||
|
" mov %[val], %[WR_LOCKED] \n"
|
||||||
|
" scond %[val], [%[rwlock]] \n"
|
||||||
|
" bnz 1b \n"
|
||||||
|
" \n"
|
||||||
|
: [val] "=&r" (val)
|
||||||
|
: [rwlock] "r" (&(rw->counter)),
|
||||||
|
[UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
|
||||||
|
[WR_LOCKED] "ir" (0)
|
||||||
|
: "memory", "cc");
|
||||||
|
|
||||||
|
smp_mb();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* 1 - lock taken successfully */
|
||||||
|
static inline int arch_write_trylock(arch_rwlock_t *rw)
|
||||||
|
{
|
||||||
|
unsigned int val, got_it = 0;
|
||||||
|
|
||||||
|
smp_mb();
|
||||||
|
|
||||||
|
__asm__ __volatile__(
|
||||||
|
"1: llock %[val], [%[rwlock]] \n"
|
||||||
|
" brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
|
||||||
|
" mov %[val], %[WR_LOCKED] \n"
|
||||||
|
" scond %[val], [%[rwlock]] \n"
|
||||||
|
" bnz 1b \n" /* retry if collided with someone */
|
||||||
|
" mov %[got_it], 1 \n"
|
||||||
|
" \n"
|
||||||
|
"4: ; --- done --- \n"
|
||||||
|
|
||||||
|
: [val] "=&r" (val),
|
||||||
|
[got_it] "+&r" (got_it)
|
||||||
|
: [rwlock] "r" (&(rw->counter)),
|
||||||
|
[UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
|
||||||
|
[WR_LOCKED] "ir" (0)
|
||||||
|
: "memory", "cc");
|
||||||
|
|
||||||
|
smp_mb();
|
||||||
|
|
||||||
|
return got_it;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void arch_read_unlock(arch_rwlock_t *rw)
|
||||||
|
{
|
||||||
|
unsigned int val;
|
||||||
|
|
||||||
|
smp_mb();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* rw->counter++;
|
||||||
|
*/
|
||||||
|
__asm__ __volatile__(
|
||||||
|
"1: llock %[val], [%[rwlock]] \n"
|
||||||
|
" add %[val], %[val], 1 \n"
|
||||||
|
" scond %[val], [%[rwlock]] \n"
|
||||||
|
" bnz 1b \n"
|
||||||
|
" \n"
|
||||||
|
: [val] "=&r" (val)
|
||||||
|
: [rwlock] "r" (&(rw->counter))
|
||||||
|
: "memory", "cc");
|
||||||
|
|
||||||
|
smp_mb();
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void arch_write_unlock(arch_rwlock_t *rw)
|
||||||
|
{
|
||||||
|
smp_mb();
|
||||||
|
|
||||||
|
rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
|
||||||
|
|
||||||
|
smp_mb();
|
||||||
|
}
|
||||||
|
|
||||||
#else /* !CONFIG_ARC_HAS_LLSC */
|
#else /* !CONFIG_ARC_HAS_LLSC */
|
||||||
|
|
||||||
static inline void arch_spin_lock(arch_spinlock_t *lock)
|
static inline void arch_spin_lock(arch_spinlock_t *lock)
|
||||||
|
@ -148,23 +306,14 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
||||||
smp_mb();
|
smp_mb();
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Read-write spinlocks, allowing multiple readers but only one writer.
|
* Read-write spinlocks, allowing multiple readers but only one writer.
|
||||||
|
* Unfair locking as Writers could be starved indefinitely by Reader(s)
|
||||||
*
|
*
|
||||||
* The spinlock itself is contained in @counter and access to it is
|
* The spinlock itself is contained in @counter and access to it is
|
||||||
* serialized with @lock_mutex.
|
* serialized with @lock_mutex.
|
||||||
*
|
|
||||||
* Unfair locking as Writers could be starved indefinitely by Reader(s)
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* Would read_trylock() succeed? */
|
|
||||||
#define arch_read_can_lock(x) ((x)->counter > 0)
|
|
||||||
|
|
||||||
/* Would write_trylock() succeed? */
|
|
||||||
#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
|
|
||||||
|
|
||||||
/* 1 - lock taken successfully */
|
/* 1 - lock taken successfully */
|
||||||
static inline int arch_read_trylock(arch_rwlock_t *rw)
|
static inline int arch_read_trylock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
|
@ -235,6 +384,11 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
|
||||||
arch_spin_unlock(&(rw->lock_mutex));
|
arch_spin_unlock(&(rw->lock_mutex));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define arch_read_can_lock(x) ((x)->counter > 0)
|
||||||
|
#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
|
||||||
|
|
||||||
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
||||||
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
||||||
|
|
||||||
|
|
|
@ -26,7 +26,9 @@ typedef struct {
|
||||||
*/
|
*/
|
||||||
typedef struct {
|
typedef struct {
|
||||||
volatile unsigned int counter;
|
volatile unsigned int counter;
|
||||||
|
#ifndef CONFIG_ARC_HAS_LLSC
|
||||||
arch_spinlock_t lock_mutex;
|
arch_spinlock_t lock_mutex;
|
||||||
|
#endif
|
||||||
} arch_rwlock_t;
|
} arch_rwlock_t;
|
||||||
|
|
||||||
#define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000
|
#define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000
|
||||||
|
|
Loading…
Reference in New Issue