x86: Add support for cmpxchg_double
A simple implementation that only supports the word size and does not have a fallback mode (would require a spinlock). Add 32 and 64 bit support for cmpxchg_double. cmpxchg double uses the cmpxchg8b or cmpxchg16b instruction on x86 processors to compare and swap 2 machine words. This allows lockless algorithms to move more context information through critical sections. Set a flag CONFIG_CMPXCHG_DOUBLE to signal that support for double word cmpxchg detection has been build into the kernel. Note that each subsystem using cmpxchg_double has to implement a fall back mechanism as long as we offer support for processors that do not implement cmpxchg_double. Reviewed-by: H. Peter Anvin <hpa@zytor.com> Cc: Tejun Heo <tj@kernel.org> Cc: Pekka Enberg <penberg@cs.helsinki.fi> Signed-off-by: Christoph Lameter <cl@linux.com> Link: http://lkml.kernel.org/r/20110601172614.173427964@linux.com Signed-off-by: H. Peter Anvin <hpa@zytor.com>
This commit is contained in:
parent
5629937872
commit
3824abd127
|
@ -312,6 +312,9 @@ config X86_CMPXCHG
|
|||
config CMPXCHG_LOCAL
|
||||
def_bool X86_64 || (X86_32 && !M386)
|
||||
|
||||
config CMPXCHG_DOUBLE
|
||||
def_bool y
|
||||
|
||||
config X86_L1_CACHE_SHIFT
|
||||
int
|
||||
default "7" if MPENTIUM4 || MPSC
|
||||
|
|
|
@ -280,4 +280,52 @@ static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
|
|||
|
||||
#endif
|
||||
|
||||
#define cmpxchg8b(ptr, o1, o2, n1, n2) \
|
||||
({ \
|
||||
char __ret; \
|
||||
__typeof__(o2) __dummy; \
|
||||
__typeof__(*(ptr)) __old1 = (o1); \
|
||||
__typeof__(o2) __old2 = (o2); \
|
||||
__typeof__(*(ptr)) __new1 = (n1); \
|
||||
__typeof__(o2) __new2 = (n2); \
|
||||
asm volatile(LOCK_PREFIX "cmpxchg8b %2; setz %1" \
|
||||
: "=d"(__dummy), "=a" (__ret), "+m" (*ptr)\
|
||||
: "a" (__old1), "d"(__old2), \
|
||||
"b" (__new1), "c" (__new2) \
|
||||
: "memory"); \
|
||||
__ret; })
|
||||
|
||||
|
||||
#define cmpxchg8b_local(ptr, o1, o2, n1, n2) \
|
||||
({ \
|
||||
char __ret; \
|
||||
__typeof__(o2) __dummy; \
|
||||
__typeof__(*(ptr)) __old1 = (o1); \
|
||||
__typeof__(o2) __old2 = (o2); \
|
||||
__typeof__(*(ptr)) __new1 = (n1); \
|
||||
__typeof__(o2) __new2 = (n2); \
|
||||
asm volatile("cmpxchg8b %2; setz %1" \
|
||||
: "=d"(__dummy), "=a"(__ret), "+m" (*ptr)\
|
||||
: "a" (__old), "d"(__old2), \
|
||||
"b" (__new1), "c" (__new2), \
|
||||
: "memory"); \
|
||||
__ret; })
|
||||
|
||||
|
||||
#define cmpxchg_double(ptr, o1, o2, n1, n2) \
|
||||
({ \
|
||||
BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
|
||||
VM_BUG_ON((unsigned long)(ptr) % 8); \
|
||||
cmpxchg8b((ptr), (o1), (o2), (n1), (n2)); \
|
||||
})
|
||||
|
||||
#define cmpxchg_double_local(ptr, o1, o2, n1, n2) \
|
||||
({ \
|
||||
BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
|
||||
VM_BUG_ON((unsigned long)(ptr) % 8); \
|
||||
cmpxchg16b_local((ptr), (o1), (o2), (n1), (n2)); \
|
||||
})
|
||||
|
||||
#define system_has_cmpxchg_double() cpu_has_cx8
|
||||
|
||||
#endif /* _ASM_X86_CMPXCHG_32_H */
|
||||
|
|
|
@ -151,4 +151,49 @@ extern void __cmpxchg_wrong_size(void);
|
|||
cmpxchg_local((ptr), (o), (n)); \
|
||||
})
|
||||
|
||||
#define cmpxchg16b(ptr, o1, o2, n1, n2) \
|
||||
({ \
|
||||
char __ret; \
|
||||
__typeof__(o2) __junk; \
|
||||
__typeof__(*(ptr)) __old1 = (o1); \
|
||||
__typeof__(o2) __old2 = (o2); \
|
||||
__typeof__(*(ptr)) __new1 = (n1); \
|
||||
__typeof__(o2) __new2 = (n2); \
|
||||
asm volatile(LOCK_PREFIX "cmpxchg16b %2;setz %1" \
|
||||
: "=d"(__junk), "=a"(__ret), "+m" (*ptr) \
|
||||
: "b"(__new1), "c"(__new2), \
|
||||
"a"(__old1), "d"(__old2)); \
|
||||
__ret; })
|
||||
|
||||
|
||||
#define cmpxchg16b_local(ptr, o1, o2, n1, n2) \
|
||||
({ \
|
||||
char __ret; \
|
||||
__typeof__(o2) __junk; \
|
||||
__typeof__(*(ptr)) __old1 = (o1); \
|
||||
__typeof__(o2) __old2 = (o2); \
|
||||
__typeof__(*(ptr)) __new1 = (n1); \
|
||||
__typeof__(o2) __new2 = (n2); \
|
||||
asm volatile("cmpxchg16b %2;setz %1" \
|
||||
: "=d"(__junk), "=a"(__ret), "+m" (*ptr) \
|
||||
: "b"(__new1), "c"(__new2), \
|
||||
"a"(__old1), "d"(__old2)); \
|
||||
__ret; })
|
||||
|
||||
#define cmpxchg_double(ptr, o1, o2, n1, n2) \
|
||||
({ \
|
||||
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
|
||||
VM_BUG_ON((unsigned long)(ptr) % 16); \
|
||||
cmpxchg16b((ptr), (o1), (o2), (n1), (n2)); \
|
||||
})
|
||||
|
||||
#define cmpxchg_double_local(ptr, o1, o2, n1, n2) \
|
||||
({ \
|
||||
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
|
||||
VM_BUG_ON((unsigned long)(ptr) % 16); \
|
||||
cmpxchg16b_local((ptr), (o1), (o2), (n1), (n2)); \
|
||||
})
|
||||
|
||||
#define system_has_cmpxchg_double() cpu_has_cx16
|
||||
|
||||
#endif /* _ASM_X86_CMPXCHG_64_H */
|
||||
|
|
|
@ -288,6 +288,8 @@ extern const char * const x86_power_flags[32];
|
|||
#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
|
||||
#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ)
|
||||
#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE)
|
||||
#define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8)
|
||||
#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16)
|
||||
|
||||
#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64)
|
||||
# define cpu_has_invlpg 1
|
||||
|
|
Loading…
Reference in New Issue