locking/atomic: scripts: build raw_atomic_long*() directly

Now that arch_atomic*() usage is limited to the atomic headers, we no
longer have any users of arch_atomic_long_*(), and can generate
raw_atomic_long_*() directly.

Generate the raw_atomic_long_*() ops directly.

There should be no functional change as a result of this patch.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Kees Cook <keescook@chromium.org>
Link: https://lore.kernel.org/r/20230605070124.3741859-20-mark.rutland@arm.com
This commit is contained in:
Mark Rutland 2023-06-05 08:01:16 +01:00 committed by Peter Zijlstra
parent 0f613bfa82
commit 1815da1718
5 changed files with 345 additions and 859 deletions

View File

@ -78,8 +78,8 @@
})
#include <linux/atomic/atomic-arch-fallback.h>
#include <linux/atomic/atomic-long.h>
#include <linux/atomic/atomic-raw.h>
#include <linux/atomic/atomic-long.h>
#include <linux/atomic/atomic-instrumented.h>
#endif /* _LINUX_ATOMIC_H */

File diff suppressed because it is too large Load Diff

View File

@ -1026,516 +1026,6 @@ raw_atomic64_dec_if_positive(atomic64_t *v)
return arch_atomic64_dec_if_positive(v);
}
static __always_inline long
raw_atomic_long_read(const atomic_long_t *v)
{
return arch_atomic_long_read(v);
}
static __always_inline long
raw_atomic_long_read_acquire(const atomic_long_t *v)
{
return arch_atomic_long_read_acquire(v);
}
static __always_inline void
raw_atomic_long_set(atomic_long_t *v, long i)
{
arch_atomic_long_set(v, i);
}
static __always_inline void
raw_atomic_long_set_release(atomic_long_t *v, long i)
{
arch_atomic_long_set_release(v, i);
}
static __always_inline void
raw_atomic_long_add(long i, atomic_long_t *v)
{
arch_atomic_long_add(i, v);
}
static __always_inline long
raw_atomic_long_add_return(long i, atomic_long_t *v)
{
return arch_atomic_long_add_return(i, v);
}
static __always_inline long
raw_atomic_long_add_return_acquire(long i, atomic_long_t *v)
{
return arch_atomic_long_add_return_acquire(i, v);
}
static __always_inline long
raw_atomic_long_add_return_release(long i, atomic_long_t *v)
{
return arch_atomic_long_add_return_release(i, v);
}
static __always_inline long
raw_atomic_long_add_return_relaxed(long i, atomic_long_t *v)
{
return arch_atomic_long_add_return_relaxed(i, v);
}
static __always_inline long
raw_atomic_long_fetch_add(long i, atomic_long_t *v)
{
return arch_atomic_long_fetch_add(i, v);
}
static __always_inline long
raw_atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
{
return arch_atomic_long_fetch_add_acquire(i, v);
}
static __always_inline long
raw_atomic_long_fetch_add_release(long i, atomic_long_t *v)
{
return arch_atomic_long_fetch_add_release(i, v);
}
static __always_inline long
raw_atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
{
return arch_atomic_long_fetch_add_relaxed(i, v);
}
static __always_inline void
raw_atomic_long_sub(long i, atomic_long_t *v)
{
arch_atomic_long_sub(i, v);
}
static __always_inline long
raw_atomic_long_sub_return(long i, atomic_long_t *v)
{
return arch_atomic_long_sub_return(i, v);
}
static __always_inline long
raw_atomic_long_sub_return_acquire(long i, atomic_long_t *v)
{
return arch_atomic_long_sub_return_acquire(i, v);
}
static __always_inline long
raw_atomic_long_sub_return_release(long i, atomic_long_t *v)
{
return arch_atomic_long_sub_return_release(i, v);
}
static __always_inline long
raw_atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
{
return arch_atomic_long_sub_return_relaxed(i, v);
}
static __always_inline long
raw_atomic_long_fetch_sub(long i, atomic_long_t *v)
{
return arch_atomic_long_fetch_sub(i, v);
}
static __always_inline long
raw_atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
{
return arch_atomic_long_fetch_sub_acquire(i, v);
}
static __always_inline long
raw_atomic_long_fetch_sub_release(long i, atomic_long_t *v)
{
return arch_atomic_long_fetch_sub_release(i, v);
}
static __always_inline long
raw_atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
{
return arch_atomic_long_fetch_sub_relaxed(i, v);
}
static __always_inline void
raw_atomic_long_inc(atomic_long_t *v)
{
arch_atomic_long_inc(v);
}
static __always_inline long
raw_atomic_long_inc_return(atomic_long_t *v)
{
return arch_atomic_long_inc_return(v);
}
static __always_inline long
raw_atomic_long_inc_return_acquire(atomic_long_t *v)
{
return arch_atomic_long_inc_return_acquire(v);
}
static __always_inline long
raw_atomic_long_inc_return_release(atomic_long_t *v)
{
return arch_atomic_long_inc_return_release(v);
}
static __always_inline long
raw_atomic_long_inc_return_relaxed(atomic_long_t *v)
{
return arch_atomic_long_inc_return_relaxed(v);
}
static __always_inline long
raw_atomic_long_fetch_inc(atomic_long_t *v)
{
return arch_atomic_long_fetch_inc(v);
}
static __always_inline long
raw_atomic_long_fetch_inc_acquire(atomic_long_t *v)
{
return arch_atomic_long_fetch_inc_acquire(v);
}
static __always_inline long
raw_atomic_long_fetch_inc_release(atomic_long_t *v)
{
return arch_atomic_long_fetch_inc_release(v);
}
static __always_inline long
raw_atomic_long_fetch_inc_relaxed(atomic_long_t *v)
{
return arch_atomic_long_fetch_inc_relaxed(v);
}
static __always_inline void
raw_atomic_long_dec(atomic_long_t *v)
{
arch_atomic_long_dec(v);
}
static __always_inline long
raw_atomic_long_dec_return(atomic_long_t *v)
{
return arch_atomic_long_dec_return(v);
}
static __always_inline long
raw_atomic_long_dec_return_acquire(atomic_long_t *v)
{
return arch_atomic_long_dec_return_acquire(v);
}
static __always_inline long
raw_atomic_long_dec_return_release(atomic_long_t *v)
{
return arch_atomic_long_dec_return_release(v);
}
static __always_inline long
raw_atomic_long_dec_return_relaxed(atomic_long_t *v)
{
return arch_atomic_long_dec_return_relaxed(v);
}
static __always_inline long
raw_atomic_long_fetch_dec(atomic_long_t *v)
{
return arch_atomic_long_fetch_dec(v);
}
static __always_inline long
raw_atomic_long_fetch_dec_acquire(atomic_long_t *v)
{
return arch_atomic_long_fetch_dec_acquire(v);
}
static __always_inline long
raw_atomic_long_fetch_dec_release(atomic_long_t *v)
{
return arch_atomic_long_fetch_dec_release(v);
}
static __always_inline long
raw_atomic_long_fetch_dec_relaxed(atomic_long_t *v)
{
return arch_atomic_long_fetch_dec_relaxed(v);
}
static __always_inline void
raw_atomic_long_and(long i, atomic_long_t *v)
{
arch_atomic_long_and(i, v);
}
static __always_inline long
raw_atomic_long_fetch_and(long i, atomic_long_t *v)
{
return arch_atomic_long_fetch_and(i, v);
}
static __always_inline long
raw_atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
{
return arch_atomic_long_fetch_and_acquire(i, v);
}
static __always_inline long
raw_atomic_long_fetch_and_release(long i, atomic_long_t *v)
{
return arch_atomic_long_fetch_and_release(i, v);
}
static __always_inline long
raw_atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
{
return arch_atomic_long_fetch_and_relaxed(i, v);
}
static __always_inline void
raw_atomic_long_andnot(long i, atomic_long_t *v)
{
arch_atomic_long_andnot(i, v);
}
static __always_inline long
raw_atomic_long_fetch_andnot(long i, atomic_long_t *v)
{
return arch_atomic_long_fetch_andnot(i, v);
}
static __always_inline long
raw_atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
{
return arch_atomic_long_fetch_andnot_acquire(i, v);
}
static __always_inline long
raw_atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
{
return arch_atomic_long_fetch_andnot_release(i, v);
}
static __always_inline long
raw_atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
{
return arch_atomic_long_fetch_andnot_relaxed(i, v);
}
static __always_inline void
raw_atomic_long_or(long i, atomic_long_t *v)
{
arch_atomic_long_or(i, v);
}
static __always_inline long
raw_atomic_long_fetch_or(long i, atomic_long_t *v)
{
return arch_atomic_long_fetch_or(i, v);
}
static __always_inline long
raw_atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
{
return arch_atomic_long_fetch_or_acquire(i, v);
}
static __always_inline long
raw_atomic_long_fetch_or_release(long i, atomic_long_t *v)
{
return arch_atomic_long_fetch_or_release(i, v);
}
static __always_inline long
raw_atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
{
return arch_atomic_long_fetch_or_relaxed(i, v);
}
static __always_inline void
raw_atomic_long_xor(long i, atomic_long_t *v)
{
arch_atomic_long_xor(i, v);
}
static __always_inline long
raw_atomic_long_fetch_xor(long i, atomic_long_t *v)
{
return arch_atomic_long_fetch_xor(i, v);
}
static __always_inline long
raw_atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
{
return arch_atomic_long_fetch_xor_acquire(i, v);
}
static __always_inline long
raw_atomic_long_fetch_xor_release(long i, atomic_long_t *v)
{
return arch_atomic_long_fetch_xor_release(i, v);
}
static __always_inline long
raw_atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
{
return arch_atomic_long_fetch_xor_relaxed(i, v);
}
static __always_inline long
raw_atomic_long_xchg(atomic_long_t *v, long i)
{
return arch_atomic_long_xchg(v, i);
}
static __always_inline long
raw_atomic_long_xchg_acquire(atomic_long_t *v, long i)
{
return arch_atomic_long_xchg_acquire(v, i);
}
static __always_inline long
raw_atomic_long_xchg_release(atomic_long_t *v, long i)
{
return arch_atomic_long_xchg_release(v, i);
}
static __always_inline long
raw_atomic_long_xchg_relaxed(atomic_long_t *v, long i)
{
return arch_atomic_long_xchg_relaxed(v, i);
}
static __always_inline long
raw_atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
{
return arch_atomic_long_cmpxchg(v, old, new);
}
static __always_inline long
raw_atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
{
return arch_atomic_long_cmpxchg_acquire(v, old, new);
}
static __always_inline long
raw_atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
{
return arch_atomic_long_cmpxchg_release(v, old, new);
}
static __always_inline long
raw_atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
{
return arch_atomic_long_cmpxchg_relaxed(v, old, new);
}
static __always_inline bool
raw_atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
{
return arch_atomic_long_try_cmpxchg(v, old, new);
}
static __always_inline bool
raw_atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
{
return arch_atomic_long_try_cmpxchg_acquire(v, old, new);
}
static __always_inline bool
raw_atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
{
return arch_atomic_long_try_cmpxchg_release(v, old, new);
}
static __always_inline bool
raw_atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
{
return arch_atomic_long_try_cmpxchg_relaxed(v, old, new);
}
static __always_inline bool
raw_atomic_long_sub_and_test(long i, atomic_long_t *v)
{
return arch_atomic_long_sub_and_test(i, v);
}
static __always_inline bool
raw_atomic_long_dec_and_test(atomic_long_t *v)
{
return arch_atomic_long_dec_and_test(v);
}
static __always_inline bool
raw_atomic_long_inc_and_test(atomic_long_t *v)
{
return arch_atomic_long_inc_and_test(v);
}
static __always_inline bool
raw_atomic_long_add_negative(long i, atomic_long_t *v)
{
return arch_atomic_long_add_negative(i, v);
}
static __always_inline bool
raw_atomic_long_add_negative_acquire(long i, atomic_long_t *v)
{
return arch_atomic_long_add_negative_acquire(i, v);
}
static __always_inline bool
raw_atomic_long_add_negative_release(long i, atomic_long_t *v)
{
return arch_atomic_long_add_negative_release(i, v);
}
static __always_inline bool
raw_atomic_long_add_negative_relaxed(long i, atomic_long_t *v)
{
return arch_atomic_long_add_negative_relaxed(i, v);
}
static __always_inline long
raw_atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
{
return arch_atomic_long_fetch_add_unless(v, a, u);
}
static __always_inline bool
raw_atomic_long_add_unless(atomic_long_t *v, long a, long u)
{
return arch_atomic_long_add_unless(v, a, u);
}
static __always_inline bool
raw_atomic_long_inc_not_zero(atomic_long_t *v)
{
return arch_atomic_long_inc_not_zero(v);
}
static __always_inline bool
raw_atomic_long_inc_unless_negative(atomic_long_t *v)
{
return arch_atomic_long_inc_unless_negative(v);
}
static __always_inline bool
raw_atomic_long_dec_unless_positive(atomic_long_t *v)
{
return arch_atomic_long_dec_unless_positive(v);
}
static __always_inline long
raw_atomic_long_dec_if_positive(atomic_long_t *v)
{
return arch_atomic_long_dec_if_positive(v);
}
#define raw_xchg(...) \
arch_xchg(__VA_ARGS__)
@ -1642,4 +1132,4 @@ raw_atomic_long_dec_if_positive(atomic_long_t *v)
arch_try_cmpxchg128_local(__VA_ARGS__)
#endif /* _LINUX_ATOMIC_RAW_H */
// 01d54200571b3857755a07c10074a4fd58cef6b1
// b23ed4424e85200e200ded094522e1d743b3a5b1

View File

@ -47,9 +47,9 @@ gen_proto_order_variant()
cat <<EOF
static __always_inline ${ret}
arch_atomic_long_${name}(${params})
raw_atomic_long_${name}(${params})
{
${retstmt}arch_${atomic}_${name}(${argscast});
${retstmt}raw_${atomic}_${name}(${argscast});
}
EOF

View File

@ -63,10 +63,6 @@ grep '^[a-z]' "$1" | while read name meta args; do
gen_proto "${meta}" "${name}" "atomic64" "s64" ${args}
done
grep '^[a-z]' "$1" | while read name meta args; do
gen_proto "${meta}" "${name}" "atomic_long" "long" ${args}
done
for xchg in "xchg" "cmpxchg" "cmpxchg64" "cmpxchg128" "try_cmpxchg" "try_cmpxchg64" "try_cmpxchg128"; do
for order in "" "_acquire" "_release" "_relaxed"; do
gen_xchg "${xchg}" "${order}"