atomics: Make conditional ops return 'bool'
Some of the atomics return a status value, which is a boolean value describing whether the operation was performed. To make it clear that this is a boolean value, let's update the common fallbacks to return bool, fixing up the return values and comments likewise. At the same time, let's simplify the description of the operations in their respective comments. The instrumented atomics and generic atomic64 implementation are updated accordingly. Note that atomic64_dec_if_positive() doesn't follow the usual test op pattern, and returns the would-be decremented value. This is not changed. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Will Deacon <will.deacon@arm.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Thomas Gleixner <tglx@linutronix.de> Link: https://lore.kernel.org/lkml/20180621121321.4761-5-mark.rutland@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
f74445b6dd
commit
ade5ef9280
|
@ -205,7 +205,7 @@ static __always_inline s64 atomic64_dec_return(atomic64_t *v)
|
|||
return arch_atomic64_dec_return(v);
|
||||
}
|
||||
|
||||
static __always_inline s64 atomic64_inc_not_zero(atomic64_t *v)
|
||||
static __always_inline bool atomic64_inc_not_zero(atomic64_t *v)
|
||||
{
|
||||
kasan_check_write(v, sizeof(*v));
|
||||
return arch_atomic64_inc_not_zero(v);
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
*/
|
||||
#ifndef _ASM_GENERIC_ATOMIC64_H
|
||||
#define _ASM_GENERIC_ATOMIC64_H
|
||||
#include <linux/types.h>
|
||||
|
||||
typedef struct {
|
||||
long long counter;
|
||||
|
@ -52,7 +53,7 @@ ATOMIC64_OPS(xor)
|
|||
extern long long atomic64_dec_if_positive(atomic64_t *v);
|
||||
extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n);
|
||||
extern long long atomic64_xchg(atomic64_t *v, long long new);
|
||||
extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
|
||||
extern bool atomic64_add_unless(atomic64_t *v, long long a, long long u);
|
||||
|
||||
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
|
||||
#define atomic64_inc(v) atomic64_add(1LL, (v))
|
||||
|
|
|
@ -2,6 +2,8 @@
|
|||
/* Atomic operations usable in machine independent code */
|
||||
#ifndef _LINUX_ATOMIC_H
|
||||
#define _LINUX_ATOMIC_H
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/barrier.h>
|
||||
|
||||
|
@ -525,10 +527,10 @@
|
|||
* @a: the amount to add to v...
|
||||
* @u: ...unless v is equal to u.
|
||||
*
|
||||
* Atomically adds @a to @v, so long as @v was not already @u.
|
||||
* Returns non-zero if @v was not @u, and zero otherwise.
|
||||
* Atomically adds @a to @v, if @v was not already @u.
|
||||
* Returns true if the addition was done.
|
||||
*/
|
||||
static inline int atomic_add_unless(atomic_t *v, int a, int u)
|
||||
static inline bool atomic_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
return atomic_fetch_add_unless(v, a, u) != u;
|
||||
}
|
||||
|
@ -537,8 +539,8 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
|
|||
* atomic_inc_not_zero - increment unless the number is zero
|
||||
* @v: pointer of type atomic_t
|
||||
*
|
||||
* Atomically increments @v by 1, so long as @v is non-zero.
|
||||
* Returns non-zero if @v was non-zero, and zero otherwise.
|
||||
* Atomically increments @v by 1, if @v is non-zero.
|
||||
* Returns true if the increment was done.
|
||||
*/
|
||||
#ifndef atomic_inc_not_zero
|
||||
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
|
||||
|
@ -572,28 +574,28 @@ static inline int atomic_fetch_andnot_release(int i, atomic_t *v)
|
|||
#endif
|
||||
|
||||
#ifndef atomic_inc_unless_negative
|
||||
static inline int atomic_inc_unless_negative(atomic_t *p)
|
||||
static inline bool atomic_inc_unless_negative(atomic_t *p)
|
||||
{
|
||||
int v, v1;
|
||||
for (v = 0; v >= 0; v = v1) {
|
||||
v1 = atomic_cmpxchg(p, v, v + 1);
|
||||
if (likely(v1 == v))
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef atomic_dec_unless_positive
|
||||
static inline int atomic_dec_unless_positive(atomic_t *p)
|
||||
static inline bool atomic_dec_unless_positive(atomic_t *p)
|
||||
{
|
||||
int v, v1;
|
||||
for (v = 0; v <= 0; v = v1) {
|
||||
v1 = atomic_cmpxchg(p, v, v - 1);
|
||||
if (likely(v1 == v))
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -178,16 +178,16 @@ long long atomic64_xchg(atomic64_t *v, long long new)
|
|||
}
|
||||
EXPORT_SYMBOL(atomic64_xchg);
|
||||
|
||||
int atomic64_add_unless(atomic64_t *v, long long a, long long u)
|
||||
bool atomic64_add_unless(atomic64_t *v, long long a, long long u)
|
||||
{
|
||||
unsigned long flags;
|
||||
raw_spinlock_t *lock = lock_addr(v);
|
||||
int ret = 0;
|
||||
bool ret = false;
|
||||
|
||||
raw_spin_lock_irqsave(lock, flags);
|
||||
if (v->counter != u) {
|
||||
v->counter += a;
|
||||
ret = 1;
|
||||
ret = true;
|
||||
}
|
||||
raw_spin_unlock_irqrestore(lock, flags);
|
||||
return ret;
|
||||
|
|
Loading…
Reference in New Issue