x86/cmpxchg: add a locked add() helper
Mostly to remove some conditional code in spinlock.h. Signed-off-by: Jeremy Fitzhardinge <jeremy@goop.org>
This commit is contained in:
parent
4a7f340c6a
commit
3d94ae0c70
|
@ -14,6 +14,8 @@ extern void __cmpxchg_wrong_size(void)
|
||||||
__compiletime_error("Bad argument size for cmpxchg");
|
__compiletime_error("Bad argument size for cmpxchg");
|
||||||
extern void __xadd_wrong_size(void)
|
extern void __xadd_wrong_size(void)
|
||||||
__compiletime_error("Bad argument size for xadd");
|
__compiletime_error("Bad argument size for xadd");
|
||||||
|
extern void __add_wrong_size(void)
|
||||||
|
__compiletime_error("Bad argument size for add");
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Constants for operation sizes. On 32-bit, the 64-bit size it set to
|
* Constants for operation sizes. On 32-bit, the 64-bit size it set to
|
||||||
|
@ -207,4 +209,44 @@ extern void __xadd_wrong_size(void)
|
||||||
#define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
|
#define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
|
||||||
#define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
|
#define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
|
||||||
|
|
||||||
|
#define __add(ptr, inc, lock) \
|
||||||
|
({ \
|
||||||
|
__typeof__ (*(ptr)) __ret = (inc); \
|
||||||
|
switch (sizeof(*(ptr))) { \
|
||||||
|
case __X86_CASE_B: \
|
||||||
|
asm volatile (lock "addb %b1, %0\n" \
|
||||||
|
: "+m" (*(ptr)) : "ri" (inc) \
|
||||||
|
: "memory", "cc"); \
|
||||||
|
break; \
|
||||||
|
case __X86_CASE_W: \
|
||||||
|
asm volatile (lock "addw %w1, %0\n" \
|
||||||
|
: "+m" (*(ptr)) : "ri" (inc) \
|
||||||
|
: "memory", "cc"); \
|
||||||
|
break; \
|
||||||
|
case __X86_CASE_L: \
|
||||||
|
asm volatile (lock "addl %1, %0\n" \
|
||||||
|
: "+m" (*(ptr)) : "ri" (inc) \
|
||||||
|
: "memory", "cc"); \
|
||||||
|
break; \
|
||||||
|
case __X86_CASE_Q: \
|
||||||
|
asm volatile (lock "addq %1, %0\n" \
|
||||||
|
: "+m" (*(ptr)) : "ri" (inc) \
|
||||||
|
: "memory", "cc"); \
|
||||||
|
break; \
|
||||||
|
default: \
|
||||||
|
__add_wrong_size(); \
|
||||||
|
} \
|
||||||
|
__ret; \
|
||||||
|
})
|
||||||
|
|
||||||
|
/*
|
||||||
|
* add_*() adds "inc" to "*ptr"
|
||||||
|
*
|
||||||
|
* __add() takes a lock prefix
|
||||||
|
* add_smp() is locked when multiple CPUs are online
|
||||||
|
* add_sync() is always locked
|
||||||
|
*/
|
||||||
|
#define add_smp(ptr, inc) __add((ptr), (inc), LOCK_PREFIX)
|
||||||
|
#define add_sync(ptr, inc) __add((ptr), (inc), "lock; ")
|
||||||
|
|
||||||
#endif /* ASM_X86_CMPXCHG_H */
|
#endif /* ASM_X86_CMPXCHG_H */
|
||||||
|
|
|
@ -79,23 +79,10 @@ static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
|
||||||
return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
|
return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if (NR_CPUS < 256)
|
|
||||||
static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
|
static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
|
__add(&lock->tickets.head, 1, UNLOCK_LOCK_PREFIX);
|
||||||
: "+m" (lock->head_tail)
|
|
||||||
:
|
|
||||||
: "memory", "cc");
|
|
||||||
}
|
}
|
||||||
#else
|
|
||||||
static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
|
|
||||||
{
|
|
||||||
asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
|
|
||||||
: "+m" (lock->head_tail)
|
|
||||||
:
|
|
||||||
: "memory", "cc");
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
|
static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
|
|
Loading…
Reference in New Issue