[S390] implement interrupt-enabling rwlocks
arch backend for f5f7eac41d
"Allow rwlocks to re-enable interrupts".
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
parent
76d4e00a05
commit
ce58ae6f7f
|
@ -122,8 +122,10 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lp)
|
||||||
#define __raw_write_can_lock(x) ((x)->lock == 0)
|
#define __raw_write_can_lock(x) ((x)->lock == 0)
|
||||||
|
|
||||||
extern void _raw_read_lock_wait(raw_rwlock_t *lp);
|
extern void _raw_read_lock_wait(raw_rwlock_t *lp);
|
||||||
|
extern void _raw_read_lock_wait_flags(raw_rwlock_t *lp, unsigned long flags);
|
||||||
extern int _raw_read_trylock_retry(raw_rwlock_t *lp);
|
extern int _raw_read_trylock_retry(raw_rwlock_t *lp);
|
||||||
extern void _raw_write_lock_wait(raw_rwlock_t *lp);
|
extern void _raw_write_lock_wait(raw_rwlock_t *lp);
|
||||||
|
extern void _raw_write_lock_wait_flags(raw_rwlock_t *lp, unsigned long flags);
|
||||||
extern int _raw_write_trylock_retry(raw_rwlock_t *lp);
|
extern int _raw_write_trylock_retry(raw_rwlock_t *lp);
|
||||||
|
|
||||||
static inline void __raw_read_lock(raw_rwlock_t *rw)
|
static inline void __raw_read_lock(raw_rwlock_t *rw)
|
||||||
|
@ -134,6 +136,14 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
|
||||||
_raw_read_lock_wait(rw);
|
_raw_read_lock_wait(rw);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void __raw_read_lock_flags(raw_rwlock_t *rw, unsigned long flags)
|
||||||
|
{
|
||||||
|
unsigned int old;
|
||||||
|
old = rw->lock & 0x7fffffffU;
|
||||||
|
if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old)
|
||||||
|
_raw_read_lock_wait_flags(rw, flags);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void __raw_read_unlock(raw_rwlock_t *rw)
|
static inline void __raw_read_unlock(raw_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
unsigned int old, cmp;
|
unsigned int old, cmp;
|
||||||
|
@ -151,6 +161,12 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
|
||||||
_raw_write_lock_wait(rw);
|
_raw_write_lock_wait(rw);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void __raw_write_lock_flags(raw_rwlock_t *rw, unsigned long flags)
|
||||||
|
{
|
||||||
|
if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
|
||||||
|
_raw_write_lock_wait_flags(rw, flags);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void __raw_write_unlock(raw_rwlock_t *rw)
|
static inline void __raw_write_unlock(raw_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
_raw_compare_and_swap(&rw->lock, 0x80000000, 0);
|
_raw_compare_and_swap(&rw->lock, 0x80000000, 0);
|
||||||
|
@ -172,9 +188,6 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
|
||||||
return _raw_write_trylock_retry(rw);
|
return _raw_write_trylock_retry(rw);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
|
|
||||||
#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
|
|
||||||
|
|
||||||
#define _raw_read_relax(lock) cpu_relax()
|
#define _raw_read_relax(lock) cpu_relax()
|
||||||
#define _raw_write_relax(lock) cpu_relax()
|
#define _raw_write_relax(lock) cpu_relax()
|
||||||
|
|
||||||
|
|
|
@ -124,6 +124,27 @@ void _raw_read_lock_wait(raw_rwlock_t *rw)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(_raw_read_lock_wait);
|
EXPORT_SYMBOL(_raw_read_lock_wait);
|
||||||
|
|
||||||
|
void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
|
||||||
|
{
|
||||||
|
unsigned int old;
|
||||||
|
int count = spin_retry;
|
||||||
|
|
||||||
|
local_irq_restore(flags);
|
||||||
|
while (1) {
|
||||||
|
if (count-- <= 0) {
|
||||||
|
_raw_yield();
|
||||||
|
count = spin_retry;
|
||||||
|
}
|
||||||
|
if (!__raw_read_can_lock(rw))
|
||||||
|
continue;
|
||||||
|
old = rw->lock & 0x7fffffffU;
|
||||||
|
local_irq_disable();
|
||||||
|
if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(_raw_read_lock_wait_flags);
|
||||||
|
|
||||||
int _raw_read_trylock_retry(raw_rwlock_t *rw)
|
int _raw_read_trylock_retry(raw_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
unsigned int old;
|
unsigned int old;
|
||||||
|
@ -157,6 +178,25 @@ void _raw_write_lock_wait(raw_rwlock_t *rw)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(_raw_write_lock_wait);
|
EXPORT_SYMBOL(_raw_write_lock_wait);
|
||||||
|
|
||||||
|
void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
|
||||||
|
{
|
||||||
|
int count = spin_retry;
|
||||||
|
|
||||||
|
local_irq_restore(flags);
|
||||||
|
while (1) {
|
||||||
|
if (count-- <= 0) {
|
||||||
|
_raw_yield();
|
||||||
|
count = spin_retry;
|
||||||
|
}
|
||||||
|
if (!__raw_write_can_lock(rw))
|
||||||
|
continue;
|
||||||
|
local_irq_disable();
|
||||||
|
if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(_raw_write_lock_wait_flags);
|
||||||
|
|
||||||
int _raw_write_trylock_retry(raw_rwlock_t *rw)
|
int _raw_write_trylock_retry(raw_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
int count = spin_retry;
|
int count = spin_retry;
|
||||||
|
|
Loading…
Reference in New Issue