locking/atomic: csky: move to ARCH_ATOMIC
We'd like all architectures to convert to ARCH_ATOMIC, as once all architectures are converted it will be possible to make significant cleanups to the atomics headers, and this will make it much easier to generically enable atomic functionality (e.g. debug logic in the instrumented wrappers). As a step towards that, this patch migrates csky to ARCH_ATOMIC. The arch code provides arch_{atomic,atomic64,xchg,cmpxchg}*(), and common code wraps these with optional instrumentation to provide the regular functions. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Guo Ren <guoren@kernel.org> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Will Deacon <will@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20210525140232.53872-17-mark.rutland@arm.com
This commit is contained in:
parent
fc63a6e08a
commit
a5fb82d7e2
|
@ -2,6 +2,7 @@
|
||||||
config CSKY
|
config CSKY
|
||||||
def_bool y
|
def_bool y
|
||||||
select ARCH_32BIT_OFF_T
|
select ARCH_32BIT_OFF_T
|
||||||
|
select ARCH_ATOMIC
|
||||||
select ARCH_HAS_DMA_PREP_COHERENT
|
select ARCH_HAS_DMA_PREP_COHERENT
|
||||||
select ARCH_HAS_GCOV_PROFILE_ALL
|
select ARCH_HAS_GCOV_PROFILE_ALL
|
||||||
select ARCH_HAS_SYNC_DMA_FOR_CPU
|
select ARCH_HAS_SYNC_DMA_FOR_CPU
|
||||||
|
|
|
@ -31,7 +31,7 @@ extern void __bad_xchg(void);
|
||||||
__ret; \
|
__ret; \
|
||||||
})
|
})
|
||||||
|
|
||||||
#define xchg_relaxed(ptr, x) \
|
#define arch_xchg_relaxed(ptr, x) \
|
||||||
(__xchg_relaxed((x), (ptr), sizeof(*(ptr))))
|
(__xchg_relaxed((x), (ptr), sizeof(*(ptr))))
|
||||||
|
|
||||||
#define __cmpxchg_relaxed(ptr, old, new, size) \
|
#define __cmpxchg_relaxed(ptr, old, new, size) \
|
||||||
|
@ -61,14 +61,14 @@ extern void __bad_xchg(void);
|
||||||
__ret; \
|
__ret; \
|
||||||
})
|
})
|
||||||
|
|
||||||
#define cmpxchg_relaxed(ptr, o, n) \
|
#define arch_cmpxchg_relaxed(ptr, o, n) \
|
||||||
(__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr))))
|
(__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr))))
|
||||||
|
|
||||||
#define cmpxchg(ptr, o, n) \
|
#define arch_cmpxchg(ptr, o, n) \
|
||||||
({ \
|
({ \
|
||||||
__typeof__(*(ptr)) __ret; \
|
__typeof__(*(ptr)) __ret; \
|
||||||
__smp_release_fence(); \
|
__smp_release_fence(); \
|
||||||
__ret = cmpxchg_relaxed(ptr, o, n); \
|
__ret = arch_cmpxchg_relaxed(ptr, o, n); \
|
||||||
__smp_acquire_fence(); \
|
__smp_acquire_fence(); \
|
||||||
__ret; \
|
__ret; \
|
||||||
})
|
})
|
||||||
|
|
Loading…
Reference in New Issue