locking/atomics: Rework ordering barriers
Currently architectures can override __atomic_op_*() to define the barriers used before/after a relaxed atomic when used to build acquire/release/fence variants. This has the unfortunate property of requiring the architecture to define the full wrapper for the atomics, rather than just the barriers they care about, and gets in the way of generating atomics which can be easily read. Instead, this patch has architectures define an optional set of barriers: * __atomic_acquire_fence() * __atomic_release_fence() * __atomic_pre_full_fence() * __atomic_post_full_fence() ... which <linux/atomic.h> uses to build the wrappers. It would be nice if we could undef these, along with the __atomic_op_*() wrappers, but that would break the cmpxchg() wrappers, which are written in preprocessor. Undefs would have been nice, but alas. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Will Deacon <will.deacon@arm.com> Cc: Andrea Parri <parri.andrea@gmail.com> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: andy.shevchenko@gmail.com Cc: arnd@arndb.de Cc: aryabinin@virtuozzo.com Cc: catalin.marinas@arm.com Cc: dvyukov@google.com Cc: glider@google.com Cc: linux-arm-kernel@lists.infradead.org Cc: peter@hurleysoftware.com Link: http://lkml.kernel.org/r/20180716113017.3909-7-mark.rutland@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
4d2b25f630
commit
fd2efaa4eb
|
@ -18,11 +18,11 @@
|
|||
* To ensure dependency ordering is preserved for the _relaxed and
|
||||
* _release atomics, an smp_read_barrier_depends() is unconditionally
|
||||
* inserted into the _relaxed variants, which are used to build the
|
||||
* barriered versions. To avoid redundant back-to-back fences, we can
|
||||
* define the _acquire and _fence versions explicitly.
|
||||
* barriered versions. Avoid redundant back-to-back fences in the
|
||||
* _acquire and _fence versions.
|
||||
*/
|
||||
#define __atomic_op_acquire(op, args...) op##_relaxed(args)
|
||||
#define __atomic_op_fence __atomic_op_release
|
||||
#define __atomic_acquire_fence()
|
||||
#define __atomic_post_full_fence()
|
||||
|
||||
#define ATOMIC_INIT(i) { (i) }
|
||||
#define ATOMIC64_INIT(i) { (i) }
|
||||
|
|
|
@ -18,18 +18,11 @@
|
|||
* a "bne-" instruction at the end, so an isync is enough as a acquire barrier
|
||||
* on the platform without lwsync.
|
||||
*/
|
||||
#define __atomic_op_acquire(op, args...) \
|
||||
({ \
|
||||
typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
|
||||
__asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory"); \
|
||||
__ret; \
|
||||
})
|
||||
#define __atomic_acquire_fence() \
|
||||
__asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory")
|
||||
|
||||
#define __atomic_op_release(op, args...) \
|
||||
({ \
|
||||
__asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory"); \
|
||||
op##_relaxed(args); \
|
||||
})
|
||||
#define __atomic_release_fence() \
|
||||
__asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory")
|
||||
|
||||
static __inline__ int atomic_read(const atomic_t *v)
|
||||
{
|
||||
|
|
|
@ -25,18 +25,11 @@
|
|||
|
||||
#define ATOMIC_INIT(i) { (i) }
|
||||
|
||||
#define __atomic_op_acquire(op, args...) \
|
||||
({ \
|
||||
typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
|
||||
__asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory"); \
|
||||
__ret; \
|
||||
})
|
||||
#define __atomic_acquire_fence() \
|
||||
__asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory")
|
||||
|
||||
#define __atomic_op_release(op, args...) \
|
||||
({ \
|
||||
__asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory"); \
|
||||
op##_relaxed(args); \
|
||||
})
|
||||
#define __atomic_release_fence() \
|
||||
__asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory");
|
||||
|
||||
static __always_inline int atomic_read(const atomic_t *v)
|
||||
{
|
||||
|
|
|
@ -38,40 +38,46 @@
|
|||
* barriers on top of the relaxed variant. In the case where the relaxed
|
||||
* variant is already fully ordered, no additional barriers are needed.
|
||||
*
|
||||
* Besides, if an arch has a special barrier for acquire/release, it could
|
||||
* implement its own __atomic_op_* and use the same framework for building
|
||||
* variants
|
||||
*
|
||||
* If an architecture overrides __atomic_op_acquire() it will probably want
|
||||
* to define smp_mb__after_spinlock().
|
||||
* If an architecture overrides __atomic_acquire_fence() it will probably
|
||||
* want to define smp_mb__after_spinlock().
|
||||
*/
|
||||
#ifndef __atomic_op_acquire
|
||||
#ifndef __atomic_acquire_fence
|
||||
#define __atomic_acquire_fence smp_mb__after_atomic
|
||||
#endif
|
||||
|
||||
#ifndef __atomic_release_fence
|
||||
#define __atomic_release_fence smp_mb__before_atomic
|
||||
#endif
|
||||
|
||||
#ifndef __atomic_pre_full_fence
|
||||
#define __atomic_pre_full_fence smp_mb__before_atomic
|
||||
#endif
|
||||
|
||||
#ifndef __atomic_post_full_fence
|
||||
#define __atomic_post_full_fence smp_mb__after_atomic
|
||||
#endif
|
||||
|
||||
#define __atomic_op_acquire(op, args...) \
|
||||
({ \
|
||||
typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
|
||||
smp_mb__after_atomic(); \
|
||||
__atomic_acquire_fence(); \
|
||||
__ret; \
|
||||
})
|
||||
#endif
|
||||
|
||||
#ifndef __atomic_op_release
|
||||
#define __atomic_op_release(op, args...) \
|
||||
({ \
|
||||
smp_mb__before_atomic(); \
|
||||
__atomic_release_fence(); \
|
||||
op##_relaxed(args); \
|
||||
})
|
||||
#endif
|
||||
|
||||
#ifndef __atomic_op_fence
|
||||
#define __atomic_op_fence(op, args...) \
|
||||
({ \
|
||||
typeof(op##_relaxed(args)) __ret; \
|
||||
smp_mb__before_atomic(); \
|
||||
__atomic_pre_full_fence(); \
|
||||
__ret = op##_relaxed(args); \
|
||||
smp_mb__after_atomic(); \
|
||||
__atomic_post_full_fence(); \
|
||||
__ret; \
|
||||
})
|
||||
#endif
|
||||
|
||||
/* atomic_add_return_relaxed */
|
||||
#ifndef atomic_add_return_relaxed
|
||||
|
|
Loading…
Reference in New Issue