locking/arch: Remove dummy arch_{read,spin,write}_lock_flags() implementations
The arch_{read,spin,write}_lock_flags() macros are simply mapped to the non-flags versions by the majority of architectures, so do this in core code and remove the dummy implementations. Also remove the implementation in spinlock_up.h, since all callers of do_raw_spin_lock_flags() call local_irq_save(flags) anyway. Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: paulmck@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/1507055129-12300-4-git-send-email-will.deacon@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
0160fb177d
commit
a4c1887d4c
|
@ -13,7 +13,6 @@
|
||||||
* We make no fairness assumptions. They have a cost.
|
* We make no fairness assumptions. They have a cost.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
|
|
||||||
#define arch_spin_is_locked(x) ((x)->lock != 0)
|
#define arch_spin_is_locked(x) ((x)->lock != 0)
|
||||||
|
|
||||||
static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
|
static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
|
||||||
|
@ -160,7 +159,4 @@ static inline void arch_write_unlock(arch_rwlock_t * lock)
|
||||||
lock->lock = 0;
|
lock->lock = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
|
||||||
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
|
||||||
|
|
||||||
#endif /* _ALPHA_SPINLOCK_H */
|
#endif /* _ALPHA_SPINLOCK_H */
|
||||||
|
|
|
@ -14,7 +14,6 @@
|
||||||
#include <asm/barrier.h>
|
#include <asm/barrier.h>
|
||||||
|
|
||||||
#define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
|
#define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
|
||||||
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
|
|
||||||
|
|
||||||
#ifdef CONFIG_ARC_HAS_LLSC
|
#ifdef CONFIG_ARC_HAS_LLSC
|
||||||
|
|
||||||
|
@ -410,7 +409,4 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
|
||||||
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
|
||||||
|
|
||||||
#endif /* __ASM_SPINLOCK_H */
|
#endif /* __ASM_SPINLOCK_H */
|
||||||
|
|
|
@ -52,8 +52,6 @@ static inline void dsb_sev(void)
|
||||||
* memory.
|
* memory.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
|
|
||||||
|
|
||||||
static inline void arch_spin_lock(arch_spinlock_t *lock)
|
static inline void arch_spin_lock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
unsigned long tmp;
|
unsigned long tmp;
|
||||||
|
@ -270,7 +268,4 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
|
||||||
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
|
||||||
|
|
||||||
#endif /* __ASM_SPINLOCK_H */
|
#endif /* __ASM_SPINLOCK_H */
|
||||||
|
|
|
@ -27,8 +27,6 @@
|
||||||
* instructions.
|
* instructions.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
|
|
||||||
|
|
||||||
static inline void arch_spin_lock(arch_spinlock_t *lock)
|
static inline void arch_spin_lock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
unsigned int tmp;
|
unsigned int tmp;
|
||||||
|
@ -303,9 +301,6 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
|
||||||
/* read_can_lock - would read_trylock() succeed? */
|
/* read_can_lock - would read_trylock() succeed? */
|
||||||
#define arch_read_can_lock(x) ((x)->lock < 0x80000000)
|
#define arch_read_can_lock(x) ((x)->lock < 0x80000000)
|
||||||
|
|
||||||
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
|
||||||
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
|
||||||
|
|
||||||
/* See include/linux/spinlock.h */
|
/* See include/linux/spinlock.h */
|
||||||
#define smp_mb__after_spinlock() smp_mb()
|
#define smp_mb__after_spinlock() smp_mb()
|
||||||
|
|
||||||
|
|
|
@ -36,8 +36,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
|
||||||
__raw_spin_lock_asm(&lock->lock);
|
__raw_spin_lock_asm(&lock->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
|
|
||||||
|
|
||||||
static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
return __raw_spin_trylock_asm(&lock->lock);
|
return __raw_spin_trylock_asm(&lock->lock);
|
||||||
|
@ -53,8 +51,6 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
|
||||||
__raw_read_lock_asm(&rw->lock);
|
__raw_read_lock_asm(&rw->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
|
||||||
|
|
||||||
static inline int arch_read_trylock(arch_rwlock_t *rw)
|
static inline int arch_read_trylock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
return __raw_read_trylock_asm(&rw->lock);
|
return __raw_read_trylock_asm(&rw->lock);
|
||||||
|
@ -70,8 +66,6 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
|
||||||
__raw_write_lock_asm(&rw->lock);
|
__raw_write_lock_asm(&rw->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
|
||||||
|
|
||||||
static inline int arch_write_trylock(arch_rwlock_t *rw)
|
static inline int arch_write_trylock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
return __raw_write_trylock_asm(&rw->lock);
|
return __raw_write_trylock_asm(&rw->lock);
|
||||||
|
|
|
@ -167,11 +167,6 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
|
||||||
/*
|
/*
|
||||||
* SMP spinlocks are intended to allow only a single CPU at the lock
|
* SMP spinlocks are intended to allow only a single CPU at the lock
|
||||||
*/
|
*/
|
||||||
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
|
|
||||||
|
|
||||||
#define arch_spin_is_locked(x) ((x)->lock != 0)
|
#define arch_spin_is_locked(x) ((x)->lock != 0)
|
||||||
|
|
||||||
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
|
||||||
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -126,6 +126,7 @@ static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
|
||||||
{
|
{
|
||||||
arch_spin_lock(lock);
|
arch_spin_lock(lock);
|
||||||
}
|
}
|
||||||
|
#define arch_spin_lock_flags arch_spin_lock_flags
|
||||||
|
|
||||||
#ifdef ASM_SUPPORTED
|
#ifdef ASM_SUPPORTED
|
||||||
|
|
||||||
|
@ -153,6 +154,7 @@ arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
|
||||||
: "p6", "p7", "r2", "memory");
|
: "p6", "p7", "r2", "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define arch_read_lock_flags arch_read_lock_flags
|
||||||
#define arch_read_lock(lock) arch_read_lock_flags(lock, 0)
|
#define arch_read_lock(lock) arch_read_lock_flags(lock, 0)
|
||||||
|
|
||||||
#else /* !ASM_SUPPORTED */
|
#else /* !ASM_SUPPORTED */
|
||||||
|
@ -205,6 +207,7 @@ arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
|
||||||
: "ar.ccv", "p6", "p7", "r2", "r29", "memory");
|
: "ar.ccv", "p6", "p7", "r2", "r29", "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define arch_write_lock_flags arch_write_lock_flags
|
||||||
#define arch_write_lock(rw) arch_write_lock_flags(rw, 0)
|
#define arch_write_lock(rw) arch_write_lock_flags(rw, 0)
|
||||||
|
|
||||||
#define arch_write_trylock(rw) \
|
#define arch_write_trylock(rw) \
|
||||||
|
@ -228,8 +231,6 @@ static inline void arch_write_unlock(arch_rwlock_t *x)
|
||||||
|
|
||||||
#else /* !ASM_SUPPORTED */
|
#else /* !ASM_SUPPORTED */
|
||||||
|
|
||||||
#define arch_write_lock_flags(l, flags) arch_write_lock(l)
|
|
||||||
|
|
||||||
#define arch_write_lock(l) \
|
#define arch_write_lock(l) \
|
||||||
({ \
|
({ \
|
||||||
__u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \
|
__u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \
|
||||||
|
|
|
@ -28,7 +28,6 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define arch_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0)
|
#define arch_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0)
|
||||||
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* arch_spin_trylock - Try spin lock and return a result
|
* arch_spin_trylock - Try spin lock and return a result
|
||||||
|
@ -305,7 +304,4 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
|
||||||
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
|
||||||
|
|
||||||
#endif /* _ASM_M32R_SPINLOCK_H */
|
#endif /* _ASM_M32R_SPINLOCK_H */
|
||||||
|
|
|
@ -15,9 +15,4 @@
|
||||||
* locked.
|
* locked.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
|
|
||||||
|
|
||||||
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
|
||||||
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
|
||||||
|
|
||||||
#endif /* __ASM_SPINLOCK_H */
|
#endif /* __ASM_SPINLOCK_H */
|
||||||
|
|
|
@ -209,7 +209,4 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
|
||||||
return tmp;
|
return tmp;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
|
||||||
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
|
||||||
|
|
||||||
#endif /* __ASM_SPINLOCK_LNKGET_H */
|
#endif /* __ASM_SPINLOCK_LNKGET_H */
|
||||||
|
|
|
@ -13,7 +13,4 @@
|
||||||
#include <asm/qrwlock.h>
|
#include <asm/qrwlock.h>
|
||||||
#include <asm/qspinlock.h>
|
#include <asm/qspinlock.h>
|
||||||
|
|
||||||
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
|
||||||
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
|
||||||
|
|
||||||
#endif /* _ASM_SPINLOCK_H */
|
#endif /* _ASM_SPINLOCK_H */
|
||||||
|
|
|
@ -84,6 +84,7 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock,
|
||||||
: "d" (flags), "a"(&lock->slock), "i"(EPSW_IE | MN10300_CLI_LEVEL)
|
: "d" (flags), "a"(&lock->slock), "i"(EPSW_IE | MN10300_CLI_LEVEL)
|
||||||
: "memory", "cc");
|
: "memory", "cc");
|
||||||
}
|
}
|
||||||
|
#define arch_spin_lock_flags arch_spin_lock_flags
|
||||||
|
|
||||||
#ifdef __KERNEL__
|
#ifdef __KERNEL__
|
||||||
|
|
||||||
|
@ -171,9 +172,6 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
|
||||||
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
|
||||||
|
|
||||||
#define _raw_spin_relax(lock) cpu_relax()
|
#define _raw_spin_relax(lock) cpu_relax()
|
||||||
#define _raw_read_relax(lock) cpu_relax()
|
#define _raw_read_relax(lock) cpu_relax()
|
||||||
#define _raw_write_relax(lock) cpu_relax()
|
#define _raw_write_relax(lock) cpu_relax()
|
||||||
|
|
|
@ -31,6 +31,7 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *x,
|
||||||
cpu_relax();
|
cpu_relax();
|
||||||
mb();
|
mb();
|
||||||
}
|
}
|
||||||
|
#define arch_spin_lock_flags arch_spin_lock_flags
|
||||||
|
|
||||||
static inline void arch_spin_unlock(arch_spinlock_t *x)
|
static inline void arch_spin_unlock(arch_spinlock_t *x)
|
||||||
{
|
{
|
||||||
|
@ -168,7 +169,4 @@ static __inline__ int arch_write_trylock(arch_rwlock_t *rw)
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
|
||||||
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
|
||||||
|
|
||||||
#endif /* __ASM_SPINLOCK_H */
|
#endif /* __ASM_SPINLOCK_H */
|
||||||
|
|
|
@ -161,6 +161,7 @@ void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
|
||||||
local_irq_restore(flags_dis);
|
local_irq_restore(flags_dis);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#define arch_spin_lock_flags arch_spin_lock_flags
|
||||||
|
|
||||||
static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
|
@ -299,9 +300,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
|
||||||
rw->lock = 0;
|
rw->lock = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
|
||||||
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
|
||||||
|
|
||||||
#define arch_spin_relax(lock) __spin_yield(lock)
|
#define arch_spin_relax(lock) __spin_yield(lock)
|
||||||
#define arch_read_relax(lock) __rw_yield(lock)
|
#define arch_read_relax(lock) __rw_yield(lock)
|
||||||
#define arch_write_relax(lock) __rw_yield(lock)
|
#define arch_write_relax(lock) __rw_yield(lock)
|
||||||
|
|
|
@ -81,6 +81,7 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
|
||||||
if (!arch_spin_trylock_once(lp))
|
if (!arch_spin_trylock_once(lp))
|
||||||
arch_spin_lock_wait_flags(lp, flags);
|
arch_spin_lock_wait_flags(lp, flags);
|
||||||
}
|
}
|
||||||
|
#define arch_spin_lock_flags arch_spin_lock_flags
|
||||||
|
|
||||||
static inline int arch_spin_trylock(arch_spinlock_t *lp)
|
static inline int arch_spin_trylock(arch_spinlock_t *lp)
|
||||||
{
|
{
|
||||||
|
@ -114,9 +115,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp)
|
||||||
extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
|
extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
|
||||||
extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
|
extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
|
||||||
|
|
||||||
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
|
||||||
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
|
||||||
|
|
||||||
static inline int arch_read_trylock_once(arch_rwlock_t *rw)
|
static inline int arch_read_trylock_once(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
int old = ACCESS_ONCE(rw->lock);
|
int old = ACCESS_ONCE(rw->lock);
|
||||||
|
|
|
@ -27,7 +27,6 @@ static inline unsigned __sl_cas(volatile unsigned *p, unsigned old, unsigned new
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define arch_spin_is_locked(x) ((x)->lock <= 0)
|
#define arch_spin_is_locked(x) ((x)->lock <= 0)
|
||||||
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
|
|
||||||
|
|
||||||
static inline void arch_spin_lock(arch_spinlock_t *lock)
|
static inline void arch_spin_lock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
|
@ -90,7 +89,4 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
|
||||||
return __sl_cas(&rw->lock, RW_LOCK_BIAS, 0) == RW_LOCK_BIAS;
|
return __sl_cas(&rw->lock, RW_LOCK_BIAS, 0) == RW_LOCK_BIAS;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
|
||||||
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
|
||||||
|
|
||||||
#endif /* __ASM_SH_SPINLOCK_CAS_H */
|
#endif /* __ASM_SH_SPINLOCK_CAS_H */
|
||||||
|
|
|
@ -19,7 +19,6 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define arch_spin_is_locked(x) ((x)->lock <= 0)
|
#define arch_spin_is_locked(x) ((x)->lock <= 0)
|
||||||
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Simple spin lock operations. There are two variants, one clears IRQ's
|
* Simple spin lock operations. There are two variants, one clears IRQ's
|
||||||
|
@ -197,7 +196,4 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
|
||||||
return (oldval > (RW_LOCK_BIAS - 1));
|
return (oldval > (RW_LOCK_BIAS - 1));
|
||||||
}
|
}
|
||||||
|
|
||||||
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
|
||||||
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
|
||||||
|
|
||||||
#endif /* __ASM_SH_SPINLOCK_LLSC_H */
|
#endif /* __ASM_SH_SPINLOCK_LLSC_H */
|
||||||
|
|
|
@ -182,10 +182,6 @@ static inline int __arch_read_trylock(arch_rwlock_t *rw)
|
||||||
res; \
|
res; \
|
||||||
})
|
})
|
||||||
|
|
||||||
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
|
|
||||||
#define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
|
|
||||||
#define arch_write_lock_flags(rw, flags) arch_write_lock(rw)
|
|
||||||
|
|
||||||
#endif /* !(__ASSEMBLY__) */
|
#endif /* !(__ASSEMBLY__) */
|
||||||
|
|
||||||
#endif /* __SPARC_SPINLOCK_H */
|
#endif /* __SPARC_SPINLOCK_H */
|
||||||
|
|
|
@ -13,9 +13,6 @@
|
||||||
#include <asm/qrwlock.h>
|
#include <asm/qrwlock.h>
|
||||||
#include <asm/qspinlock.h>
|
#include <asm/qspinlock.h>
|
||||||
|
|
||||||
#define arch_read_lock_flags(p, f) arch_read_lock(p)
|
|
||||||
#define arch_write_lock_flags(p, f) arch_write_lock(p)
|
|
||||||
|
|
||||||
#endif /* !(__ASSEMBLY__) */
|
#endif /* !(__ASSEMBLY__) */
|
||||||
|
|
||||||
#endif /* !(__SPARC64_SPINLOCK_H) */
|
#endif /* !(__SPARC64_SPINLOCK_H) */
|
||||||
|
|
|
@ -51,9 +51,6 @@ static inline int arch_spin_is_locked(arch_spinlock_t *lock)
|
||||||
|
|
||||||
void arch_spin_lock(arch_spinlock_t *lock);
|
void arch_spin_lock(arch_spinlock_t *lock);
|
||||||
|
|
||||||
/* We cannot take an interrupt after getting a ticket, so don't enable them. */
|
|
||||||
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
|
|
||||||
|
|
||||||
int arch_spin_trylock(arch_spinlock_t *lock);
|
int arch_spin_trylock(arch_spinlock_t *lock);
|
||||||
|
|
||||||
static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
||||||
|
@ -109,7 +106,4 @@ void arch_read_unlock(arch_rwlock_t *rwlock);
|
||||||
*/
|
*/
|
||||||
void arch_write_unlock(arch_rwlock_t *rwlock);
|
void arch_write_unlock(arch_rwlock_t *rwlock);
|
||||||
|
|
||||||
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
|
||||||
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_SPINLOCK_32_H */
|
#endif /* _ASM_TILE_SPINLOCK_32_H */
|
||||||
|
|
|
@ -75,9 +75,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
|
||||||
/* Try to get the lock, and return whether we succeeded. */
|
/* Try to get the lock, and return whether we succeeded. */
|
||||||
int arch_spin_trylock(arch_spinlock_t *lock);
|
int arch_spin_trylock(arch_spinlock_t *lock);
|
||||||
|
|
||||||
/* We cannot take an interrupt after getting a ticket, so don't enable them. */
|
|
||||||
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Read-write spinlocks, allowing multiple readers
|
* Read-write spinlocks, allowing multiple readers
|
||||||
* but only one writer.
|
* but only one writer.
|
||||||
|
@ -138,7 +135,4 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
|
||||||
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
|
||||||
|
|
||||||
#endif /* _ASM_TILE_SPINLOCK_64_H */
|
#endif /* _ASM_TILE_SPINLOCK_64_H */
|
||||||
|
|
|
@ -41,7 +41,4 @@
|
||||||
|
|
||||||
#include <asm/qrwlock.h>
|
#include <asm/qrwlock.h>
|
||||||
|
|
||||||
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
|
||||||
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
|
||||||
|
|
||||||
#endif /* _ASM_X86_SPINLOCK_H */
|
#endif /* _ASM_X86_SPINLOCK_H */
|
||||||
|
|
|
@ -33,8 +33,6 @@
|
||||||
|
|
||||||
#define arch_spin_is_locked(x) ((x)->slock != 0)
|
#define arch_spin_is_locked(x) ((x)->slock != 0)
|
||||||
|
|
||||||
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
|
|
||||||
|
|
||||||
static inline void arch_spin_lock(arch_spinlock_t *lock)
|
static inline void arch_spin_lock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
unsigned long tmp;
|
unsigned long tmp;
|
||||||
|
@ -198,7 +196,4 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
|
||||||
: "memory");
|
: "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
|
||||||
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
|
||||||
|
|
||||||
#endif /* _XTENSA_SPINLOCK_H */
|
#endif /* _XTENSA_SPINLOCK_H */
|
||||||
|
|
|
@ -121,6 +121,5 @@ static __always_inline bool virt_spin_lock(struct qspinlock *lock)
|
||||||
#define arch_spin_lock(l) queued_spin_lock(l)
|
#define arch_spin_lock(l) queued_spin_lock(l)
|
||||||
#define arch_spin_trylock(l) queued_spin_trylock(l)
|
#define arch_spin_trylock(l) queued_spin_trylock(l)
|
||||||
#define arch_spin_unlock(l) queued_spin_unlock(l)
|
#define arch_spin_unlock(l) queued_spin_unlock(l)
|
||||||
#define arch_spin_lock_flags(l, f) queued_spin_lock(l)
|
|
||||||
|
|
||||||
#endif /* __ASM_GENERIC_QSPINLOCK_H */
|
#endif /* __ASM_GENERIC_QSPINLOCK_H */
|
||||||
|
|
|
@ -38,6 +38,15 @@ do { \
|
||||||
extern int do_raw_write_trylock(rwlock_t *lock);
|
extern int do_raw_write_trylock(rwlock_t *lock);
|
||||||
extern void do_raw_write_unlock(rwlock_t *lock) __releases(lock);
|
extern void do_raw_write_unlock(rwlock_t *lock) __releases(lock);
|
||||||
#else
|
#else
|
||||||
|
|
||||||
|
#ifndef arch_read_lock_flags
|
||||||
|
# define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef arch_write_lock_flags
|
||||||
|
# define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
||||||
|
#endif
|
||||||
|
|
||||||
# define do_raw_read_lock(rwlock) do {__acquire(lock); arch_read_lock(&(rwlock)->raw_lock); } while (0)
|
# define do_raw_read_lock(rwlock) do {__acquire(lock); arch_read_lock(&(rwlock)->raw_lock); } while (0)
|
||||||
# define do_raw_read_lock_flags(lock, flags) \
|
# define do_raw_read_lock_flags(lock, flags) \
|
||||||
do {__acquire(lock); arch_read_lock_flags(&(lock)->raw_lock, *(flags)); } while (0)
|
do {__acquire(lock); arch_read_lock_flags(&(lock)->raw_lock, *(flags)); } while (0)
|
||||||
|
|
|
@ -165,6 +165,10 @@ static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
|
||||||
arch_spin_lock(&lock->raw_lock);
|
arch_spin_lock(&lock->raw_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef arch_spin_lock_flags
|
||||||
|
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
|
||||||
|
#endif
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
|
do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
|
||||||
{
|
{
|
||||||
|
|
|
@ -32,14 +32,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
|
||||||
barrier();
|
barrier();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
|
||||||
arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
|
|
||||||
{
|
|
||||||
local_irq_save(flags);
|
|
||||||
lock->slock = 0;
|
|
||||||
barrier();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
char oldval = lock->slock;
|
char oldval = lock->slock;
|
||||||
|
|
Loading…
Reference in New Issue