asm-generic/atomic: Use __always_inline for fallback wrappers
Use __always_inline for atomic fallback wrappers. When building for size (CC_OPTIMIZE_FOR_SIZE), some compilers appear to be less inclined to inline even relatively small static inline functions that are assumed to be inlinable such as atomic ops. This can cause problems, for example in UACCESS regions. While the fallback wrappers aren't pure wrappers, they are trivial nonetheless, and the function they wrap should determine the final inlining policy. For x86 tinyconfig we observe: - vmlinux baseline: 1315988 - vmlinux with patch: 1315928 (-60 bytes) Suggested-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Marco Elver <elver@google.com> Acked-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
This commit is contained in:
parent
c020395b66
commit
944bc9cca7
File diff suppressed because it is too large
Load Diff
|
@ -1,5 +1,5 @@
|
|||
cat <<EOF
|
||||
static inline ${ret}
|
||||
static __always_inline ${ret}
|
||||
${atomic}_${pfx}${name}${sfx}_acquire(${params})
|
||||
{
|
||||
${ret} ret = ${atomic}_${pfx}${name}${sfx}_relaxed(${args});
|
||||
|
|
|
@ -8,7 +8,7 @@ cat <<EOF
|
|||
* if the result is negative, or false when
|
||||
* result is greater than or equal to zero.
|
||||
*/
|
||||
static inline bool
|
||||
static __always_inline bool
|
||||
${atomic}_add_negative(${int} i, ${atomic}_t *v)
|
||||
{
|
||||
return ${atomic}_add_return(i, v) < 0;
|
||||
|
|
|
@ -8,7 +8,7 @@ cat << EOF
|
|||
* Atomically adds @a to @v, if @v was not already @u.
|
||||
* Returns true if the addition was done.
|
||||
*/
|
||||
static inline bool
|
||||
static __always_inline bool
|
||||
${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u)
|
||||
{
|
||||
return ${atomic}_fetch_add_unless(v, a, u) != u;
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
cat <<EOF
|
||||
static inline ${ret}
|
||||
static __always_inline ${ret}
|
||||
${atomic}_${pfx}andnot${sfx}${order}(${int} i, ${atomic}_t *v)
|
||||
{
|
||||
${retstmt}${atomic}_${pfx}and${sfx}${order}(~i, v);
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
cat <<EOF
|
||||
static inline ${ret}
|
||||
static __always_inline ${ret}
|
||||
${atomic}_${pfx}dec${sfx}${order}(${atomic}_t *v)
|
||||
{
|
||||
${retstmt}${atomic}_${pfx}sub${sfx}${order}(1, v);
|
||||
|
|
|
@ -7,7 +7,7 @@ cat <<EOF
|
|||
* returns true if the result is 0, or false for all other
|
||||
* cases.
|
||||
*/
|
||||
static inline bool
|
||||
static __always_inline bool
|
||||
${atomic}_dec_and_test(${atomic}_t *v)
|
||||
{
|
||||
return ${atomic}_dec_return(v) == 0;
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
cat <<EOF
|
||||
static inline ${ret}
|
||||
static __always_inline ${ret}
|
||||
${atomic}_dec_if_positive(${atomic}_t *v)
|
||||
{
|
||||
${int} dec, c = ${atomic}_read(v);
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
cat <<EOF
|
||||
static inline bool
|
||||
static __always_inline bool
|
||||
${atomic}_dec_unless_positive(${atomic}_t *v)
|
||||
{
|
||||
${int} c = ${atomic}_read(v);
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
cat <<EOF
|
||||
static inline ${ret}
|
||||
static __always_inline ${ret}
|
||||
${atomic}_${pfx}${name}${sfx}(${params})
|
||||
{
|
||||
${ret} ret;
|
||||
|
|
|
@ -8,7 +8,7 @@ cat << EOF
|
|||
* Atomically adds @a to @v, so long as @v was not already @u.
|
||||
* Returns original value of @v
|
||||
*/
|
||||
static inline ${int}
|
||||
static __always_inline ${int}
|
||||
${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u)
|
||||
{
|
||||
${int} c = ${atomic}_read(v);
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
cat <<EOF
|
||||
static inline ${ret}
|
||||
static __always_inline ${ret}
|
||||
${atomic}_${pfx}inc${sfx}${order}(${atomic}_t *v)
|
||||
{
|
||||
${retstmt}${atomic}_${pfx}add${sfx}${order}(1, v);
|
||||
|
|
|
@ -7,7 +7,7 @@ cat <<EOF
|
|||
* and returns true if the result is zero, or false for all
|
||||
* other cases.
|
||||
*/
|
||||
static inline bool
|
||||
static __always_inline bool
|
||||
${atomic}_inc_and_test(${atomic}_t *v)
|
||||
{
|
||||
return ${atomic}_inc_return(v) == 0;
|
||||
|
|
|
@ -6,7 +6,7 @@ cat <<EOF
|
|||
* Atomically increments @v by 1, if @v is non-zero.
|
||||
* Returns true if the increment was done.
|
||||
*/
|
||||
static inline bool
|
||||
static __always_inline bool
|
||||
${atomic}_inc_not_zero(${atomic}_t *v)
|
||||
{
|
||||
return ${atomic}_add_unless(v, 1, 0);
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
cat <<EOF
|
||||
static inline bool
|
||||
static __always_inline bool
|
||||
${atomic}_inc_unless_negative(${atomic}_t *v)
|
||||
{
|
||||
${int} c = ${atomic}_read(v);
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
cat <<EOF
|
||||
static inline ${ret}
|
||||
static __always_inline ${ret}
|
||||
${atomic}_read_acquire(const ${atomic}_t *v)
|
||||
{
|
||||
return smp_load_acquire(&(v)->counter);
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
cat <<EOF
|
||||
static inline ${ret}
|
||||
static __always_inline ${ret}
|
||||
${atomic}_${pfx}${name}${sfx}_release(${params})
|
||||
{
|
||||
__atomic_release_fence();
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
cat <<EOF
|
||||
static inline void
|
||||
static __always_inline void
|
||||
${atomic}_set_release(${atomic}_t *v, ${int} i)
|
||||
{
|
||||
smp_store_release(&(v)->counter, i);
|
||||
|
|
|
@ -8,7 +8,7 @@ cat <<EOF
|
|||
* true if the result is zero, or false for all
|
||||
* other cases.
|
||||
*/
|
||||
static inline bool
|
||||
static __always_inline bool
|
||||
${atomic}_sub_and_test(${int} i, ${atomic}_t *v)
|
||||
{
|
||||
return ${atomic}_sub_return(i, v) == 0;
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
cat <<EOF
|
||||
static inline bool
|
||||
static __always_inline bool
|
||||
${atomic}_try_cmpxchg${order}(${atomic}_t *v, ${int} *old, ${int} new)
|
||||
{
|
||||
${int} r, o = *old;
|
||||
|
|
|
@ -149,6 +149,8 @@ cat << EOF
|
|||
#ifndef _LINUX_ATOMIC_FALLBACK_H
|
||||
#define _LINUX_ATOMIC_FALLBACK_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
|
||||
EOF
|
||||
|
||||
for xchg in "xchg" "cmpxchg" "cmpxchg64"; do
|
||||
|
|
Loading…
Reference in New Issue