locking/atomic/x86: Use atomic_try_cmpxchg()

Better code generation:

      text           data  bss        name
  10665111        4530096  843776     defconfig-build/vmlinux.3
  10655703        4530096  843776     defconfig-build/vmlinux.4

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Peter Zijlstra 2017-03-17 20:44:45 +01:00 committed by Ingo Molnar
parent b78c0d4712
commit e6790e4b5d
2 changed files with 22 additions and 45 deletions

View File

@ -207,16 +207,12 @@ static inline void atomic_##op(int i, atomic_t *v) \
} }
#define ATOMIC_FETCH_OP(op, c_op) \ #define ATOMIC_FETCH_OP(op, c_op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \ static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \ { \
int old, val = atomic_read(v); \ int val = atomic_read(v); \
for (;;) { \ do { \
old = atomic_cmpxchg(v, val, val c_op i); \ } while (!atomic_try_cmpxchg(v, &val, val c_op i)); \
if (old == val) \ return val; \
break; \
val = old; \
} \
return old; \
} }
#define ATOMIC_OPS(op, c_op) \ #define ATOMIC_OPS(op, c_op) \
@ -242,16 +238,11 @@ ATOMIC_OPS(xor, ^)
*/ */
static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u) static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
{ {
int c, old; int c = atomic_read(v);
c = atomic_read(v); do {
for (;;) { if (unlikely(c == u))
if (unlikely(c == (u)))
break; break;
old = atomic_cmpxchg((v), c, c + (a)); } while (!atomic_try_cmpxchg(v, &c, c + a));
if (likely(old == c))
break;
c = old;
}
return c; return c;
} }

View File

@ -198,17 +198,12 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
*/ */
static inline bool atomic64_add_unless(atomic64_t *v, long a, long u) static inline bool atomic64_add_unless(atomic64_t *v, long a, long u)
{ {
long c, old; long c = atomic64_read(v);
c = atomic64_read(v); do {
for (;;) { if (unlikely(c == u))
if (unlikely(c == (u))) return false;
break; } while (!atomic64_try_cmpxchg(v, &c, c + a));
old = atomic64_cmpxchg((v), c, c + (a)); return true;
if (likely(old == c))
break;
c = old;
}
return c != (u);
} }
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
@ -222,17 +217,12 @@ static inline bool atomic64_add_unless(atomic64_t *v, long a, long u)
*/ */
static inline long atomic64_dec_if_positive(atomic64_t *v) static inline long atomic64_dec_if_positive(atomic64_t *v)
{ {
long c, old, dec; long dec, c = atomic64_read(v);
c = atomic64_read(v); do {
for (;;) {
dec = c - 1; dec = c - 1;
if (unlikely(dec < 0)) if (unlikely(dec < 0))
break; break;
old = atomic64_cmpxchg((v), c, dec); } while (!atomic64_try_cmpxchg(v, &c, dec));
if (likely(old == c))
break;
c = old;
}
return dec; return dec;
} }
@ -248,14 +238,10 @@ static inline void atomic64_##op(long i, atomic64_t *v) \
#define ATOMIC64_FETCH_OP(op, c_op) \ #define ATOMIC64_FETCH_OP(op, c_op) \
static inline long atomic64_fetch_##op(long i, atomic64_t *v) \ static inline long atomic64_fetch_##op(long i, atomic64_t *v) \
{ \ { \
long old, val = atomic64_read(v); \ long val = atomic64_read(v); \
for (;;) { \ do { \
old = atomic64_cmpxchg(v, val, val c_op i); \ } while (!atomic64_try_cmpxchg(v, &val, val c_op i)); \
if (old == val) \ return val; \
break; \
val = old; \
} \
return old; \
} }
#define ATOMIC64_OPS(op, c_op) \ #define ATOMIC64_OPS(op, c_op) \