locking/atomic, alpha: Use s64 for atomic64
As a step towards making the atomic64 API use consistent types treewide, let's have the alpha atomic64 implementation use s64 as the underlying type for atomic64_t, rather than long, matching the generated headers. As atomic64_read() depends on the generic defintion of atomic64_t, this still returns long. This will be converted in a subsequent patch. Otherwise, there should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Matt Turner <mattst88@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Richard Henderson <rth@twiddle.net> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will.deacon@arm.com> Cc: aou@eecs.berkeley.edu Cc: arnd@arndb.de Cc: bp@alien8.de Cc: catalin.marinas@arm.com Cc: davem@davemloft.net Cc: fenghua.yu@intel.com Cc: heiko.carstens@de.ibm.com Cc: herbert@gondor.apana.org.au Cc: jhogan@kernel.org Cc: linux@armlinux.org.uk Cc: mpe@ellerman.id.au Cc: palmer@sifive.com Cc: paul.burton@mips.com Cc: paulus@samba.org Cc: ralf@linux-mips.org Cc: tony.luck@intel.com Cc: vgupta@synopsys.com Link: https://lkml.kernel.org/r/20190522132250.26499-5-mark.rutland@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
9255813d58
commit
0203fdc160
|
@ -93,9 +93,9 @@ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
|
|||
}
|
||||
|
||||
#define ATOMIC64_OP(op, asm_op) \
|
||||
static __inline__ void atomic64_##op(long i, atomic64_t * v) \
|
||||
static __inline__ void atomic64_##op(s64 i, atomic64_t * v) \
|
||||
{ \
|
||||
unsigned long temp; \
|
||||
s64 temp; \
|
||||
__asm__ __volatile__( \
|
||||
"1: ldq_l %0,%1\n" \
|
||||
" " #asm_op " %0,%2,%0\n" \
|
||||
|
@ -109,9 +109,9 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \
|
|||
} \
|
||||
|
||||
#define ATOMIC64_OP_RETURN(op, asm_op) \
|
||||
static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
|
||||
static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
|
||||
{ \
|
||||
long temp, result; \
|
||||
s64 temp, result; \
|
||||
__asm__ __volatile__( \
|
||||
"1: ldq_l %0,%1\n" \
|
||||
" " #asm_op " %0,%3,%2\n" \
|
||||
|
@ -128,9 +128,9 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
|
|||
}
|
||||
|
||||
#define ATOMIC64_FETCH_OP(op, asm_op) \
|
||||
static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
|
||||
static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
|
||||
{ \
|
||||
long temp, result; \
|
||||
s64 temp, result; \
|
||||
__asm__ __volatile__( \
|
||||
"1: ldq_l %2,%1\n" \
|
||||
" " #asm_op " %2,%3,%0\n" \
|
||||
|
@ -246,9 +246,9 @@ static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
|
|||
* Atomically adds @a to @v, so long as it was not @u.
|
||||
* Returns the old value of @v.
|
||||
*/
|
||||
static __inline__ long atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
|
||||
static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
|
||||
{
|
||||
long c, new, old;
|
||||
s64 c, new, old;
|
||||
smp_mb();
|
||||
__asm__ __volatile__(
|
||||
"1: ldq_l %[old],%[mem]\n"
|
||||
|
@ -276,9 +276,9 @@ static __inline__ long atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
|
|||
* The function returns the old value of *v minus 1, even if
|
||||
* the atomic variable, v, was not decremented.
|
||||
*/
|
||||
static inline long atomic64_dec_if_positive(atomic64_t *v)
|
||||
static inline s64 atomic64_dec_if_positive(atomic64_t *v)
|
||||
{
|
||||
long old, tmp;
|
||||
s64 old, tmp;
|
||||
smp_mb();
|
||||
__asm__ __volatile__(
|
||||
"1: ldq_l %[old],%[mem]\n"
|
||||
|
|
Loading…
Reference in New Issue