s390/atomic: get rid of gcc atomic builtins

s390 is the only architecture in the kernel which makes use of gcc's
atomic builtin functions. Even though I don't see any technical
problem with that right now, remove this code and open-code
compare-and-swap loops again, like every other architecture is doing
it also.
We can switch to a generic implementation when other architectures are
doing that also.

See also https://lwn.net/Articles/586838/ for forther details.

Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
This commit is contained in:
Heiko Carstens 2021-03-22 16:36:27 +01:00
parent ca897bb181
commit b23eb636d7
1 changed files with 30 additions and 6 deletions

View File

@ -156,22 +156,46 @@ __ATOMIC64_OPS(__atomic64_xor, "xgr")
static inline int __atomic_cmpxchg(int *ptr, int old, int new)
{
return __sync_val_compare_and_swap(ptr, old, new);
asm volatile(
" cs %[old],%[new],%[ptr]"
: [old] "+d" (old), [ptr] "+Q" (*ptr)
: [new] "d" (new)
: "cc", "memory");
return old;
}
static inline int __atomic_cmpxchg_bool(int *ptr, int old, int new)
static inline bool __atomic_cmpxchg_bool(int *ptr, int old, int new)
{
return __sync_bool_compare_and_swap(ptr, old, new);
int old_expected = old;
asm volatile(
" cs %[old],%[new],%[ptr]"
: [old] "+d" (old), [ptr] "+Q" (*ptr)
: [new] "d" (new)
: "cc", "memory");
return old == old_expected;
}
static inline long __atomic64_cmpxchg(long *ptr, long old, long new)
{
return __sync_val_compare_and_swap(ptr, old, new);
asm volatile(
" csg %[old],%[new],%[ptr]"
: [old] "+d" (old), [ptr] "+S" (*ptr)
: [new] "d" (new)
: "cc", "memory");
return old;
}
static inline long __atomic64_cmpxchg_bool(long *ptr, long old, long new)
static inline bool __atomic64_cmpxchg_bool(long *ptr, long old, long new)
{
return __sync_bool_compare_and_swap(ptr, old, new);
long old_expected = old;
asm volatile(
" csg %[old],%[new],%[ptr]"
: [old] "+d" (old), [ptr] "+S" (*ptr)
: [new] "d" (new)
: "cc", "memory");
return old == old_expected;
}
#endif /* __ARCH_S390_ATOMIC_OPS__ */