locking/atomic, s390: Use s64 for atomic64
As a step towards making the atomic64 API use consistent types treewide, let's have the s390 atomic64 implementation use s64 as the underlying type for atomic64_t, rather than long, matching the generated headers. As atomic64_read() depends on the generic defintion of atomic64_t, this still returns long. This will be converted in a subsequent patch. The s390-internal __atomic64_*() ops are also used by the s390 bitops, and expect pointers to long. Since atomic64_t::counter will be converted to s64 in a subsequent patch, pointes to this are explicitly cast to pointers to long when passed to __atomic64_*() ops. Otherwise, there should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will.deacon@arm.com> Cc: aou@eecs.berkeley.edu Cc: arnd@arndb.de Cc: bp@alien8.de Cc: catalin.marinas@arm.com Cc: davem@davemloft.net Cc: fenghua.yu@intel.com Cc: herbert@gondor.apana.org.au Cc: ink@jurassic.park.msu.ru Cc: jhogan@kernel.org Cc: linux@armlinux.org.uk Cc: mattst88@gmail.com Cc: mpe@ellerman.id.au Cc: palmer@sifive.com Cc: paul.burton@mips.com Cc: paulus@samba.org Cc: ralf@linux-mips.org Cc: rth@twiddle.net Cc: tony.luck@intel.com Cc: vgupta@synopsys.com Link: https://lkml.kernel.org/r/20190522132250.26499-14-mark.rutland@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
0754211847
commit
0ca9480076
|
@ -84,9 +84,9 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
|
||||||
|
|
||||||
#define ATOMIC64_INIT(i) { (i) }
|
#define ATOMIC64_INIT(i) { (i) }
|
||||||
|
|
||||||
static inline long atomic64_read(const atomic64_t *v)
|
static inline s64 atomic64_read(const atomic64_t *v)
|
||||||
{
|
{
|
||||||
long c;
|
s64 c;
|
||||||
|
|
||||||
asm volatile(
|
asm volatile(
|
||||||
" lg %0,%1\n"
|
" lg %0,%1\n"
|
||||||
|
@ -94,49 +94,49 @@ static inline long atomic64_read(const atomic64_t *v)
|
||||||
return c;
|
return c;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void atomic64_set(atomic64_t *v, long i)
|
static inline void atomic64_set(atomic64_t *v, s64 i)
|
||||||
{
|
{
|
||||||
asm volatile(
|
asm volatile(
|
||||||
" stg %1,%0\n"
|
" stg %1,%0\n"
|
||||||
: "=Q" (v->counter) : "d" (i));
|
: "=Q" (v->counter) : "d" (i));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline long atomic64_add_return(long i, atomic64_t *v)
|
static inline s64 atomic64_add_return(s64 i, atomic64_t *v)
|
||||||
{
|
{
|
||||||
return __atomic64_add_barrier(i, &v->counter) + i;
|
return __atomic64_add_barrier(i, (long *)&v->counter) + i;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline long atomic64_fetch_add(long i, atomic64_t *v)
|
static inline s64 atomic64_fetch_add(s64 i, atomic64_t *v)
|
||||||
{
|
{
|
||||||
return __atomic64_add_barrier(i, &v->counter);
|
return __atomic64_add_barrier(i, (long *)&v->counter);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void atomic64_add(long i, atomic64_t *v)
|
static inline void atomic64_add(s64 i, atomic64_t *v)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
|
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
|
||||||
if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
|
if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
|
||||||
__atomic64_add_const(i, &v->counter);
|
__atomic64_add_const(i, (long *)&v->counter);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
__atomic64_add(i, &v->counter);
|
__atomic64_add(i, (long *)&v->counter);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
|
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
|
||||||
|
|
||||||
static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
|
static inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
|
||||||
{
|
{
|
||||||
return __atomic64_cmpxchg(&v->counter, old, new);
|
return __atomic64_cmpxchg((long *)&v->counter, old, new);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define ATOMIC64_OPS(op) \
|
#define ATOMIC64_OPS(op) \
|
||||||
static inline void atomic64_##op(long i, atomic64_t *v) \
|
static inline void atomic64_##op(s64 i, atomic64_t *v) \
|
||||||
{ \
|
{ \
|
||||||
__atomic64_##op(i, &v->counter); \
|
__atomic64_##op(i, (long *)&v->counter); \
|
||||||
} \
|
} \
|
||||||
static inline long atomic64_fetch_##op(long i, atomic64_t *v) \
|
static inline long atomic64_fetch_##op(s64 i, atomic64_t *v) \
|
||||||
{ \
|
{ \
|
||||||
return __atomic64_##op##_barrier(i, &v->counter); \
|
return __atomic64_##op##_barrier(i, (long *)&v->counter); \
|
||||||
}
|
}
|
||||||
|
|
||||||
ATOMIC64_OPS(and)
|
ATOMIC64_OPS(and)
|
||||||
|
@ -145,8 +145,8 @@ ATOMIC64_OPS(xor)
|
||||||
|
|
||||||
#undef ATOMIC64_OPS
|
#undef ATOMIC64_OPS
|
||||||
|
|
||||||
#define atomic64_sub_return(_i, _v) atomic64_add_return(-(long)(_i), _v)
|
#define atomic64_sub_return(_i, _v) atomic64_add_return(-(s64)(_i), _v)
|
||||||
#define atomic64_fetch_sub(_i, _v) atomic64_fetch_add(-(long)(_i), _v)
|
#define atomic64_fetch_sub(_i, _v) atomic64_fetch_add(-(s64)(_i), _v)
|
||||||
#define atomic64_sub(_i, _v) atomic64_add(-(long)(_i), _v)
|
#define atomic64_sub(_i, _v) atomic64_add(-(s64)(_i), _v)
|
||||||
|
|
||||||
#endif /* __ARCH_S390_ATOMIC__ */
|
#endif /* __ARCH_S390_ATOMIC__ */
|
||||||
|
|
Loading…
Reference in New Issue