2010-05-29 11:09:12 +08:00
|
|
|
/*
|
|
|
|
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation, version 2.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
|
|
* NON INFRINGEMENT. See the GNU General Public License for
|
|
|
|
* more details.
|
|
|
|
*
|
2011-07-27 07:09:06 +08:00
|
|
|
* Do not include directly; use <linux/atomic.h>.
|
2010-05-29 11:09:12 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _ASM_TILE_ATOMIC_32_H
|
|
|
|
#define _ASM_TILE_ATOMIC_32_H
|
|
|
|
|
2012-03-29 01:30:03 +08:00
|
|
|
#include <asm/barrier.h>
|
2010-05-29 11:09:12 +08:00
|
|
|
#include <arch/chip.h>
|
|
|
|
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
|
|
|
|
/**
|
|
|
|
* atomic_add - add integer to atomic variable
|
|
|
|
* @i: integer value to add
|
|
|
|
* @v: pointer of type atomic_t
|
|
|
|
*
|
|
|
|
* Atomically adds @i to @v.
|
|
|
|
*/
|
|
|
|
static inline void atomic_add(int i, atomic_t *v)
|
|
|
|
{
|
tile: rework <asm/cmpxchg.h>
The macrology in cmpxchg.h was designed to allow arbitrary pointer
and integer values to be passed through the routines. To support
cmpxchg() on 64-bit values on the 32-bit tilepro architecture, we
used the idiom "(typeof(val))(typeof(val-val))". This way, in the
"size 8" branch of the switch, when the underlying cmpxchg routine
returns a 64-bit quantity, we cast it first to a typeof(val-val)
quantity (i.e. size_t if "val" is a pointer) with no warnings about
casting between pointers and integers of different sizes, then cast
onwards to typeof(val), again with no warnings. If val is not a
pointer type, the additional cast is a no-op. We can't replace the
typeof(val-val) cast with (for example) unsigned long, since then if
"val" is really a 64-bit type, we cast away the high bits.
HOWEVER, this fails with current gcc (through 4.7 at least) if "val"
is a pointer to an incomplete type. Unfortunately gcc isn't smart
enough to realize that "val - val" will always be a size_t type
even if it's an incomplete type pointer.
Accordingly, I've reworked the way we handle the casting. We have
given up the ability to use cmpxchg() on 64-bit values on tilepro,
which is OK in the kernel since we should use cmpxchg64() explicitly
on such values anyway. As a result, I can just use simple "unsigned
long" casts internally.
As I reworked it, I realized it would be cleaner to move the
architecture-specific conditionals for cmpxchg and xchg out of the
atomic.h headers and into cmpxchg, and then use the cmpxchg() and
xchg() primitives directly in atomic.h and elsewhere. This allowed
the cmpxchg.h header to stand on its own without relying on the
implicit include of it that is performed by <asm/atomic.h>.
It also allowed collapsing the atomic_xchg/atomic_cmpxchg routines
from atomic_{32,64}.h into atomic.h.
I improved the tests that guard the allowed size of the arguments
to the routines to use a __compiletime_error() test. (By avoiding
the use of BUILD_BUG, I could include cmpxchg.h into bitops.h as
well and use the macros there, which is otherwise impossible due
to include order dependency issues.)
The tilepro _atomic_xxx internal methods were previously set up to
take atomic_t and atomic64_t arguments, which isn't as convenient
with the new model, so I modified them to take int or u64 arguments,
which is consistent with how they used the arguments internally
anyway, so provided some nice simplification there too.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
2013-09-06 20:56:45 +08:00
|
|
|
_atomic_xchg_add(&v->counter, i);
|
2010-05-29 11:09:12 +08:00
|
|
|
}
|
|
|
|
|
2016-04-18 07:16:03 +08:00
|
|
|
#define ATOMIC_OPS(op) \
|
|
|
|
unsigned long _atomic_fetch_##op(volatile unsigned long *p, unsigned long mask); \
|
tile: Provide atomic_{or,xor,and}
Implement atomic logic ops -- atomic_{or,xor,and}.
For tilegx, these are relatively straightforward; the architecture
provides atomic "or" and "and", both 32-bit and 64-bit. To support
xor we provide a loop using "cmpexch".
For the older 32-bit tilepro architecture, we have to extend
the set of low-level assembly routines to include 32-bit "and",
as well as all three 64-bit routines. Somewhat confusingly,
some 32-bit versions are already used by the bitops inlines, with
parameter types appropriate for bitops, so we have to do a bit of
casting to match "int" to "unsigned long".
Signed-off-by: Chris Metcalf <cmetcalf@ezchip.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1436474297-32187-1-git-send-email-cmetcalf@ezchip.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2015-07-10 04:38:17 +08:00
|
|
|
static inline void atomic_##op(int i, atomic_t *v) \
|
|
|
|
{ \
|
2016-04-18 07:16:03 +08:00
|
|
|
_atomic_fetch_##op((unsigned long *)&v->counter, i); \
|
|
|
|
} \
|
|
|
|
static inline int atomic_fetch_##op(int i, atomic_t *v) \
|
|
|
|
{ \
|
|
|
|
smp_mb(); \
|
|
|
|
return _atomic_fetch_##op((unsigned long *)&v->counter, i); \
|
tile: Provide atomic_{or,xor,and}
Implement atomic logic ops -- atomic_{or,xor,and}.
For tilegx, these are relatively straightforward; the architecture
provides atomic "or" and "and", both 32-bit and 64-bit. To support
xor we provide a loop using "cmpexch".
For the older 32-bit tilepro architecture, we have to extend
the set of low-level assembly routines to include 32-bit "and",
as well as all three 64-bit routines. Somewhat confusingly,
some 32-bit versions are already used by the bitops inlines, with
parameter types appropriate for bitops, so we have to do a bit of
casting to match "int" to "unsigned long".
Signed-off-by: Chris Metcalf <cmetcalf@ezchip.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1436474297-32187-1-git-send-email-cmetcalf@ezchip.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2015-07-10 04:38:17 +08:00
|
|
|
}
|
|
|
|
|
2016-04-18 07:16:03 +08:00
|
|
|
ATOMIC_OPS(and)
|
|
|
|
ATOMIC_OPS(or)
|
|
|
|
ATOMIC_OPS(xor)
|
|
|
|
|
|
|
|
#undef ATOMIC_OPS
|
tile: Provide atomic_{or,xor,and}
Implement atomic logic ops -- atomic_{or,xor,and}.
For tilegx, these are relatively straightforward; the architecture
provides atomic "or" and "and", both 32-bit and 64-bit. To support
xor we provide a loop using "cmpexch".
For the older 32-bit tilepro architecture, we have to extend
the set of low-level assembly routines to include 32-bit "and",
as well as all three 64-bit routines. Somewhat confusingly,
some 32-bit versions are already used by the bitops inlines, with
parameter types appropriate for bitops, so we have to do a bit of
casting to match "int" to "unsigned long".
Signed-off-by: Chris Metcalf <cmetcalf@ezchip.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1436474297-32187-1-git-send-email-cmetcalf@ezchip.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2015-07-10 04:38:17 +08:00
|
|
|
|
2016-04-18 07:16:03 +08:00
|
|
|
static inline int atomic_fetch_add(int i, atomic_t *v)
|
|
|
|
{
|
|
|
|
smp_mb();
|
|
|
|
return _atomic_xchg_add(&v->counter, i);
|
|
|
|
}
|
tile: Provide atomic_{or,xor,and}
Implement atomic logic ops -- atomic_{or,xor,and}.
For tilegx, these are relatively straightforward; the architecture
provides atomic "or" and "and", both 32-bit and 64-bit. To support
xor we provide a loop using "cmpexch".
For the older 32-bit tilepro architecture, we have to extend
the set of low-level assembly routines to include 32-bit "and",
as well as all three 64-bit routines. Somewhat confusingly,
some 32-bit versions are already used by the bitops inlines, with
parameter types appropriate for bitops, so we have to do a bit of
casting to match "int" to "unsigned long".
Signed-off-by: Chris Metcalf <cmetcalf@ezchip.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1436474297-32187-1-git-send-email-cmetcalf@ezchip.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2015-07-10 04:38:17 +08:00
|
|
|
|
2010-05-29 11:09:12 +08:00
|
|
|
/**
|
|
|
|
* atomic_add_return - add integer and return
|
|
|
|
* @v: pointer of type atomic_t
|
|
|
|
* @i: integer value to add
|
|
|
|
*
|
|
|
|
* Atomically adds @i to @v and returns @i + @v
|
|
|
|
*/
|
|
|
|
static inline int atomic_add_return(int i, atomic_t *v)
|
|
|
|
{
|
|
|
|
smp_mb(); /* barrier for proper semantics */
|
tile: rework <asm/cmpxchg.h>
The macrology in cmpxchg.h was designed to allow arbitrary pointer
and integer values to be passed through the routines. To support
cmpxchg() on 64-bit values on the 32-bit tilepro architecture, we
used the idiom "(typeof(val))(typeof(val-val))". This way, in the
"size 8" branch of the switch, when the underlying cmpxchg routine
returns a 64-bit quantity, we cast it first to a typeof(val-val)
quantity (i.e. size_t if "val" is a pointer) with no warnings about
casting between pointers and integers of different sizes, then cast
onwards to typeof(val), again with no warnings. If val is not a
pointer type, the additional cast is a no-op. We can't replace the
typeof(val-val) cast with (for example) unsigned long, since then if
"val" is really a 64-bit type, we cast away the high bits.
HOWEVER, this fails with current gcc (through 4.7 at least) if "val"
is a pointer to an incomplete type. Unfortunately gcc isn't smart
enough to realize that "val - val" will always be a size_t type
even if it's an incomplete type pointer.
Accordingly, I've reworked the way we handle the casting. We have
given up the ability to use cmpxchg() on 64-bit values on tilepro,
which is OK in the kernel since we should use cmpxchg64() explicitly
on such values anyway. As a result, I can just use simple "unsigned
long" casts internally.
As I reworked it, I realized it would be cleaner to move the
architecture-specific conditionals for cmpxchg and xchg out of the
atomic.h headers and into cmpxchg, and then use the cmpxchg() and
xchg() primitives directly in atomic.h and elsewhere. This allowed
the cmpxchg.h header to stand on its own without relying on the
implicit include of it that is performed by <asm/atomic.h>.
It also allowed collapsing the atomic_xchg/atomic_cmpxchg routines
from atomic_{32,64}.h into atomic.h.
I improved the tests that guard the allowed size of the arguments
to the routines to use a __compiletime_error() test. (By avoiding
the use of BUILD_BUG, I could include cmpxchg.h into bitops.h as
well and use the macros there, which is otherwise impossible due
to include order dependency issues.)
The tilepro _atomic_xxx internal methods were previously set up to
take atomic_t and atomic64_t arguments, which isn't as convenient
with the new model, so I modified them to take int or u64 arguments,
which is consistent with how they used the arguments internally
anyway, so provided some nice simplification there too.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
2013-09-06 20:56:45 +08:00
|
|
|
return _atomic_xchg_add(&v->counter, i) + i;
|
2010-05-29 11:09:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2011-07-27 07:09:07 +08:00
|
|
|
* __atomic_add_unless - add unless the number is already a given value
|
2010-05-29 11:09:12 +08:00
|
|
|
* @v: pointer of type atomic_t
|
|
|
|
* @a: the amount to add to v...
|
|
|
|
* @u: ...unless v is equal to u.
|
|
|
|
*
|
|
|
|
* Atomically adds @a to @v, so long as @v was not already @u.
|
2011-07-27 07:09:07 +08:00
|
|
|
* Returns the old value of @v.
|
2010-05-29 11:09:12 +08:00
|
|
|
*/
|
2011-07-27 07:09:07 +08:00
|
|
|
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
2010-05-29 11:09:12 +08:00
|
|
|
{
|
|
|
|
smp_mb(); /* barrier for proper semantics */
|
tile: rework <asm/cmpxchg.h>
The macrology in cmpxchg.h was designed to allow arbitrary pointer
and integer values to be passed through the routines. To support
cmpxchg() on 64-bit values on the 32-bit tilepro architecture, we
used the idiom "(typeof(val))(typeof(val-val))". This way, in the
"size 8" branch of the switch, when the underlying cmpxchg routine
returns a 64-bit quantity, we cast it first to a typeof(val-val)
quantity (i.e. size_t if "val" is a pointer) with no warnings about
casting between pointers and integers of different sizes, then cast
onwards to typeof(val), again with no warnings. If val is not a
pointer type, the additional cast is a no-op. We can't replace the
typeof(val-val) cast with (for example) unsigned long, since then if
"val" is really a 64-bit type, we cast away the high bits.
HOWEVER, this fails with current gcc (through 4.7 at least) if "val"
is a pointer to an incomplete type. Unfortunately gcc isn't smart
enough to realize that "val - val" will always be a size_t type
even if it's an incomplete type pointer.
Accordingly, I've reworked the way we handle the casting. We have
given up the ability to use cmpxchg() on 64-bit values on tilepro,
which is OK in the kernel since we should use cmpxchg64() explicitly
on such values anyway. As a result, I can just use simple "unsigned
long" casts internally.
As I reworked it, I realized it would be cleaner to move the
architecture-specific conditionals for cmpxchg and xchg out of the
atomic.h headers and into cmpxchg, and then use the cmpxchg() and
xchg() primitives directly in atomic.h and elsewhere. This allowed
the cmpxchg.h header to stand on its own without relying on the
implicit include of it that is performed by <asm/atomic.h>.
It also allowed collapsing the atomic_xchg/atomic_cmpxchg routines
from atomic_{32,64}.h into atomic.h.
I improved the tests that guard the allowed size of the arguments
to the routines to use a __compiletime_error() test. (By avoiding
the use of BUILD_BUG, I could include cmpxchg.h into bitops.h as
well and use the macros there, which is otherwise impossible due
to include order dependency issues.)
The tilepro _atomic_xxx internal methods were previously set up to
take atomic_t and atomic64_t arguments, which isn't as convenient
with the new model, so I modified them to take int or u64 arguments,
which is consistent with how they used the arguments internally
anyway, so provided some nice simplification there too.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
2013-09-06 20:56:45 +08:00
|
|
|
return _atomic_xchg_add_unless(&v->counter, a, u);
|
2010-05-29 11:09:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* atomic_set - set atomic variable
|
|
|
|
* @v: pointer of type atomic_t
|
|
|
|
* @i: required value
|
|
|
|
*
|
|
|
|
* Atomically sets the value of @v to @i.
|
|
|
|
*
|
|
|
|
* atomic_set() can't be just a raw store, since it would be lost if it
|
|
|
|
* fell between the load and store of one of the other atomic ops.
|
|
|
|
*/
|
|
|
|
static inline void atomic_set(atomic_t *v, int n)
|
|
|
|
{
|
tile: rework <asm/cmpxchg.h>
The macrology in cmpxchg.h was designed to allow arbitrary pointer
and integer values to be passed through the routines. To support
cmpxchg() on 64-bit values on the 32-bit tilepro architecture, we
used the idiom "(typeof(val))(typeof(val-val))". This way, in the
"size 8" branch of the switch, when the underlying cmpxchg routine
returns a 64-bit quantity, we cast it first to a typeof(val-val)
quantity (i.e. size_t if "val" is a pointer) with no warnings about
casting between pointers and integers of different sizes, then cast
onwards to typeof(val), again with no warnings. If val is not a
pointer type, the additional cast is a no-op. We can't replace the
typeof(val-val) cast with (for example) unsigned long, since then if
"val" is really a 64-bit type, we cast away the high bits.
HOWEVER, this fails with current gcc (through 4.7 at least) if "val"
is a pointer to an incomplete type. Unfortunately gcc isn't smart
enough to realize that "val - val" will always be a size_t type
even if it's an incomplete type pointer.
Accordingly, I've reworked the way we handle the casting. We have
given up the ability to use cmpxchg() on 64-bit values on tilepro,
which is OK in the kernel since we should use cmpxchg64() explicitly
on such values anyway. As a result, I can just use simple "unsigned
long" casts internally.
As I reworked it, I realized it would be cleaner to move the
architecture-specific conditionals for cmpxchg and xchg out of the
atomic.h headers and into cmpxchg, and then use the cmpxchg() and
xchg() primitives directly in atomic.h and elsewhere. This allowed
the cmpxchg.h header to stand on its own without relying on the
implicit include of it that is performed by <asm/atomic.h>.
It also allowed collapsing the atomic_xchg/atomic_cmpxchg routines
from atomic_{32,64}.h into atomic.h.
I improved the tests that guard the allowed size of the arguments
to the routines to use a __compiletime_error() test. (By avoiding
the use of BUILD_BUG, I could include cmpxchg.h into bitops.h as
well and use the macros there, which is otherwise impossible due
to include order dependency issues.)
The tilepro _atomic_xxx internal methods were previously set up to
take atomic_t and atomic64_t arguments, which isn't as convenient
with the new model, so I modified them to take int or u64 arguments,
which is consistent with how they used the arguments internally
anyway, so provided some nice simplification there too.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
2013-09-06 20:56:45 +08:00
|
|
|
_atomic_xchg(&v->counter, n);
|
2010-05-29 11:09:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* A 64bit atomic type */
|
|
|
|
|
|
|
|
typedef struct {
|
2013-09-25 12:14:08 +08:00
|
|
|
long long counter;
|
2010-05-29 11:09:12 +08:00
|
|
|
} atomic64_t;
|
|
|
|
|
|
|
|
#define ATOMIC64_INIT(val) { (val) }
|
|
|
|
|
|
|
|
/**
|
|
|
|
* atomic64_read - read atomic variable
|
|
|
|
* @v: pointer of type atomic64_t
|
|
|
|
*
|
|
|
|
* Atomically reads the value of @v.
|
|
|
|
*/
|
2013-09-25 12:14:08 +08:00
|
|
|
static inline long long atomic64_read(const atomic64_t *v)
|
2010-05-29 11:09:12 +08:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Requires an atomic op to read both 32-bit parts consistently.
|
|
|
|
* Casting away const is safe since the atomic support routines
|
|
|
|
* do not write to memory if the value has not been modified.
|
|
|
|
*/
|
2013-09-25 12:14:08 +08:00
|
|
|
return _atomic64_xchg_add((long long *)&v->counter, 0);
|
2010-05-29 11:09:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* atomic64_add - add integer to atomic variable
|
|
|
|
* @i: integer value to add
|
|
|
|
* @v: pointer of type atomic64_t
|
|
|
|
*
|
|
|
|
* Atomically adds @i to @v.
|
|
|
|
*/
|
2013-09-25 12:14:08 +08:00
|
|
|
static inline void atomic64_add(long long i, atomic64_t *v)
|
2010-05-29 11:09:12 +08:00
|
|
|
{
|
tile: rework <asm/cmpxchg.h>
The macrology in cmpxchg.h was designed to allow arbitrary pointer
and integer values to be passed through the routines. To support
cmpxchg() on 64-bit values on the 32-bit tilepro architecture, we
used the idiom "(typeof(val))(typeof(val-val))". This way, in the
"size 8" branch of the switch, when the underlying cmpxchg routine
returns a 64-bit quantity, we cast it first to a typeof(val-val)
quantity (i.e. size_t if "val" is a pointer) with no warnings about
casting between pointers and integers of different sizes, then cast
onwards to typeof(val), again with no warnings. If val is not a
pointer type, the additional cast is a no-op. We can't replace the
typeof(val-val) cast with (for example) unsigned long, since then if
"val" is really a 64-bit type, we cast away the high bits.
HOWEVER, this fails with current gcc (through 4.7 at least) if "val"
is a pointer to an incomplete type. Unfortunately gcc isn't smart
enough to realize that "val - val" will always be a size_t type
even if it's an incomplete type pointer.
Accordingly, I've reworked the way we handle the casting. We have
given up the ability to use cmpxchg() on 64-bit values on tilepro,
which is OK in the kernel since we should use cmpxchg64() explicitly
on such values anyway. As a result, I can just use simple "unsigned
long" casts internally.
As I reworked it, I realized it would be cleaner to move the
architecture-specific conditionals for cmpxchg and xchg out of the
atomic.h headers and into cmpxchg, and then use the cmpxchg() and
xchg() primitives directly in atomic.h and elsewhere. This allowed
the cmpxchg.h header to stand on its own without relying on the
implicit include of it that is performed by <asm/atomic.h>.
It also allowed collapsing the atomic_xchg/atomic_cmpxchg routines
from atomic_{32,64}.h into atomic.h.
I improved the tests that guard the allowed size of the arguments
to the routines to use a __compiletime_error() test. (By avoiding
the use of BUILD_BUG, I could include cmpxchg.h into bitops.h as
well and use the macros there, which is otherwise impossible due
to include order dependency issues.)
The tilepro _atomic_xxx internal methods were previously set up to
take atomic_t and atomic64_t arguments, which isn't as convenient
with the new model, so I modified them to take int or u64 arguments,
which is consistent with how they used the arguments internally
anyway, so provided some nice simplification there too.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
2013-09-06 20:56:45 +08:00
|
|
|
_atomic64_xchg_add(&v->counter, i);
|
2010-05-29 11:09:12 +08:00
|
|
|
}
|
|
|
|
|
2016-04-18 07:16:03 +08:00
|
|
|
#define ATOMIC64_OPS(op) \
|
|
|
|
long long _atomic64_fetch_##op(long long *v, long long n); \
|
|
|
|
static inline void atomic64_##op(long long i, atomic64_t *v) \
|
|
|
|
{ \
|
|
|
|
_atomic64_fetch_##op(&v->counter, i); \
|
|
|
|
} \
|
2016-06-22 17:16:49 +08:00
|
|
|
static inline long long atomic64_fetch_##op(long long i, atomic64_t *v) \
|
tile: Provide atomic_{or,xor,and}
Implement atomic logic ops -- atomic_{or,xor,and}.
For tilegx, these are relatively straightforward; the architecture
provides atomic "or" and "and", both 32-bit and 64-bit. To support
xor we provide a loop using "cmpexch".
For the older 32-bit tilepro architecture, we have to extend
the set of low-level assembly routines to include 32-bit "and",
as well as all three 64-bit routines. Somewhat confusingly,
some 32-bit versions are already used by the bitops inlines, with
parameter types appropriate for bitops, so we have to do a bit of
casting to match "int" to "unsigned long".
Signed-off-by: Chris Metcalf <cmetcalf@ezchip.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1436474297-32187-1-git-send-email-cmetcalf@ezchip.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2015-07-10 04:38:17 +08:00
|
|
|
{ \
|
2016-04-18 07:16:03 +08:00
|
|
|
smp_mb(); \
|
|
|
|
return _atomic64_fetch_##op(&v->counter, i); \
|
tile: Provide atomic_{or,xor,and}
Implement atomic logic ops -- atomic_{or,xor,and}.
For tilegx, these are relatively straightforward; the architecture
provides atomic "or" and "and", both 32-bit and 64-bit. To support
xor we provide a loop using "cmpexch".
For the older 32-bit tilepro architecture, we have to extend
the set of low-level assembly routines to include 32-bit "and",
as well as all three 64-bit routines. Somewhat confusingly,
some 32-bit versions are already used by the bitops inlines, with
parameter types appropriate for bitops, so we have to do a bit of
casting to match "int" to "unsigned long".
Signed-off-by: Chris Metcalf <cmetcalf@ezchip.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1436474297-32187-1-git-send-email-cmetcalf@ezchip.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2015-07-10 04:38:17 +08:00
|
|
|
}
|
|
|
|
|
2016-06-22 17:16:49 +08:00
|
|
|
ATOMIC64_OPS(and)
|
|
|
|
ATOMIC64_OPS(or)
|
|
|
|
ATOMIC64_OPS(xor)
|
tile: Provide atomic_{or,xor,and}
Implement atomic logic ops -- atomic_{or,xor,and}.
For tilegx, these are relatively straightforward; the architecture
provides atomic "or" and "and", both 32-bit and 64-bit. To support
xor we provide a loop using "cmpexch".
For the older 32-bit tilepro architecture, we have to extend
the set of low-level assembly routines to include 32-bit "and",
as well as all three 64-bit routines. Somewhat confusingly,
some 32-bit versions are already used by the bitops inlines, with
parameter types appropriate for bitops, so we have to do a bit of
casting to match "int" to "unsigned long".
Signed-off-by: Chris Metcalf <cmetcalf@ezchip.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1436474297-32187-1-git-send-email-cmetcalf@ezchip.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2015-07-10 04:38:17 +08:00
|
|
|
|
2016-04-18 07:16:03 +08:00
|
|
|
#undef ATOMIC64_OPS
|
|
|
|
|
|
|
|
static inline long long atomic64_fetch_add(long long i, atomic64_t *v)
|
|
|
|
{
|
|
|
|
smp_mb();
|
|
|
|
return _atomic64_xchg_add(&v->counter, i);
|
|
|
|
}
|
|
|
|
|
2010-05-29 11:09:12 +08:00
|
|
|
/**
|
|
|
|
* atomic64_add_return - add integer and return
|
|
|
|
* @v: pointer of type atomic64_t
|
|
|
|
* @i: integer value to add
|
|
|
|
*
|
|
|
|
* Atomically adds @i to @v and returns @i + @v
|
|
|
|
*/
|
2013-09-25 12:14:08 +08:00
|
|
|
static inline long long atomic64_add_return(long long i, atomic64_t *v)
|
2010-05-29 11:09:12 +08:00
|
|
|
{
|
|
|
|
smp_mb(); /* barrier for proper semantics */
|
tile: rework <asm/cmpxchg.h>
The macrology in cmpxchg.h was designed to allow arbitrary pointer
and integer values to be passed through the routines. To support
cmpxchg() on 64-bit values on the 32-bit tilepro architecture, we
used the idiom "(typeof(val))(typeof(val-val))". This way, in the
"size 8" branch of the switch, when the underlying cmpxchg routine
returns a 64-bit quantity, we cast it first to a typeof(val-val)
quantity (i.e. size_t if "val" is a pointer) with no warnings about
casting between pointers and integers of different sizes, then cast
onwards to typeof(val), again with no warnings. If val is not a
pointer type, the additional cast is a no-op. We can't replace the
typeof(val-val) cast with (for example) unsigned long, since then if
"val" is really a 64-bit type, we cast away the high bits.
HOWEVER, this fails with current gcc (through 4.7 at least) if "val"
is a pointer to an incomplete type. Unfortunately gcc isn't smart
enough to realize that "val - val" will always be a size_t type
even if it's an incomplete type pointer.
Accordingly, I've reworked the way we handle the casting. We have
given up the ability to use cmpxchg() on 64-bit values on tilepro,
which is OK in the kernel since we should use cmpxchg64() explicitly
on such values anyway. As a result, I can just use simple "unsigned
long" casts internally.
As I reworked it, I realized it would be cleaner to move the
architecture-specific conditionals for cmpxchg and xchg out of the
atomic.h headers and into cmpxchg, and then use the cmpxchg() and
xchg() primitives directly in atomic.h and elsewhere. This allowed
the cmpxchg.h header to stand on its own without relying on the
implicit include of it that is performed by <asm/atomic.h>.
It also allowed collapsing the atomic_xchg/atomic_cmpxchg routines
from atomic_{32,64}.h into atomic.h.
I improved the tests that guard the allowed size of the arguments
to the routines to use a __compiletime_error() test. (By avoiding
the use of BUILD_BUG, I could include cmpxchg.h into bitops.h as
well and use the macros there, which is otherwise impossible due
to include order dependency issues.)
The tilepro _atomic_xxx internal methods were previously set up to
take atomic_t and atomic64_t arguments, which isn't as convenient
with the new model, so I modified them to take int or u64 arguments,
which is consistent with how they used the arguments internally
anyway, so provided some nice simplification there too.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
2013-09-06 20:56:45 +08:00
|
|
|
return _atomic64_xchg_add(&v->counter, i) + i;
|
2010-05-29 11:09:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* atomic64_add_unless - add unless the number is already a given value
|
|
|
|
* @v: pointer of type atomic64_t
|
|
|
|
* @a: the amount to add to v...
|
|
|
|
* @u: ...unless v is equal to u.
|
|
|
|
*
|
|
|
|
* Atomically adds @a to @v, so long as @v was not already @u.
|
2012-03-28 02:10:03 +08:00
|
|
|
* Returns non-zero if @v was not @u, and zero otherwise.
|
2010-05-29 11:09:12 +08:00
|
|
|
*/
|
2013-09-25 12:14:08 +08:00
|
|
|
static inline long long atomic64_add_unless(atomic64_t *v, long long a,
|
|
|
|
long long u)
|
2010-05-29 11:09:12 +08:00
|
|
|
{
|
|
|
|
smp_mb(); /* barrier for proper semantics */
|
tile: rework <asm/cmpxchg.h>
The macrology in cmpxchg.h was designed to allow arbitrary pointer
and integer values to be passed through the routines. To support
cmpxchg() on 64-bit values on the 32-bit tilepro architecture, we
used the idiom "(typeof(val))(typeof(val-val))". This way, in the
"size 8" branch of the switch, when the underlying cmpxchg routine
returns a 64-bit quantity, we cast it first to a typeof(val-val)
quantity (i.e. size_t if "val" is a pointer) with no warnings about
casting between pointers and integers of different sizes, then cast
onwards to typeof(val), again with no warnings. If val is not a
pointer type, the additional cast is a no-op. We can't replace the
typeof(val-val) cast with (for example) unsigned long, since then if
"val" is really a 64-bit type, we cast away the high bits.
HOWEVER, this fails with current gcc (through 4.7 at least) if "val"
is a pointer to an incomplete type. Unfortunately gcc isn't smart
enough to realize that "val - val" will always be a size_t type
even if it's an incomplete type pointer.
Accordingly, I've reworked the way we handle the casting. We have
given up the ability to use cmpxchg() on 64-bit values on tilepro,
which is OK in the kernel since we should use cmpxchg64() explicitly
on such values anyway. As a result, I can just use simple "unsigned
long" casts internally.
As I reworked it, I realized it would be cleaner to move the
architecture-specific conditionals for cmpxchg and xchg out of the
atomic.h headers and into cmpxchg, and then use the cmpxchg() and
xchg() primitives directly in atomic.h and elsewhere. This allowed
the cmpxchg.h header to stand on its own without relying on the
implicit include of it that is performed by <asm/atomic.h>.
It also allowed collapsing the atomic_xchg/atomic_cmpxchg routines
from atomic_{32,64}.h into atomic.h.
I improved the tests that guard the allowed size of the arguments
to the routines to use a __compiletime_error() test. (By avoiding
the use of BUILD_BUG, I could include cmpxchg.h into bitops.h as
well and use the macros there, which is otherwise impossible due
to include order dependency issues.)
The tilepro _atomic_xxx internal methods were previously set up to
take atomic_t and atomic64_t arguments, which isn't as convenient
with the new model, so I modified them to take int or u64 arguments,
which is consistent with how they used the arguments internally
anyway, so provided some nice simplification there too.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
2013-09-06 20:56:45 +08:00
|
|
|
return _atomic64_xchg_add_unless(&v->counter, a, u) != u;
|
2010-05-29 11:09:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* atomic64_set - set atomic variable
|
|
|
|
* @v: pointer of type atomic64_t
|
|
|
|
* @i: required value
|
|
|
|
*
|
|
|
|
* Atomically sets the value of @v to @i.
|
|
|
|
*
|
|
|
|
* atomic64_set() can't be just a raw store, since it would be lost if it
|
|
|
|
* fell between the load and store of one of the other atomic ops.
|
|
|
|
*/
|
2013-09-25 12:14:08 +08:00
|
|
|
static inline void atomic64_set(atomic64_t *v, long long n)
|
2010-05-29 11:09:12 +08:00
|
|
|
{
|
tile: rework <asm/cmpxchg.h>
The macrology in cmpxchg.h was designed to allow arbitrary pointer
and integer values to be passed through the routines. To support
cmpxchg() on 64-bit values on the 32-bit tilepro architecture, we
used the idiom "(typeof(val))(typeof(val-val))". This way, in the
"size 8" branch of the switch, when the underlying cmpxchg routine
returns a 64-bit quantity, we cast it first to a typeof(val-val)
quantity (i.e. size_t if "val" is a pointer) with no warnings about
casting between pointers and integers of different sizes, then cast
onwards to typeof(val), again with no warnings. If val is not a
pointer type, the additional cast is a no-op. We can't replace the
typeof(val-val) cast with (for example) unsigned long, since then if
"val" is really a 64-bit type, we cast away the high bits.
HOWEVER, this fails with current gcc (through 4.7 at least) if "val"
is a pointer to an incomplete type. Unfortunately gcc isn't smart
enough to realize that "val - val" will always be a size_t type
even if it's an incomplete type pointer.
Accordingly, I've reworked the way we handle the casting. We have
given up the ability to use cmpxchg() on 64-bit values on tilepro,
which is OK in the kernel since we should use cmpxchg64() explicitly
on such values anyway. As a result, I can just use simple "unsigned
long" casts internally.
As I reworked it, I realized it would be cleaner to move the
architecture-specific conditionals for cmpxchg and xchg out of the
atomic.h headers and into cmpxchg, and then use the cmpxchg() and
xchg() primitives directly in atomic.h and elsewhere. This allowed
the cmpxchg.h header to stand on its own without relying on the
implicit include of it that is performed by <asm/atomic.h>.
It also allowed collapsing the atomic_xchg/atomic_cmpxchg routines
from atomic_{32,64}.h into atomic.h.
I improved the tests that guard the allowed size of the arguments
to the routines to use a __compiletime_error() test. (By avoiding
the use of BUILD_BUG, I could include cmpxchg.h into bitops.h as
well and use the macros there, which is otherwise impossible due
to include order dependency issues.)
The tilepro _atomic_xxx internal methods were previously set up to
take atomic_t and atomic64_t arguments, which isn't as convenient
with the new model, so I modified them to take int or u64 arguments,
which is consistent with how they used the arguments internally
anyway, so provided some nice simplification there too.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
2013-09-06 20:56:45 +08:00
|
|
|
_atomic64_xchg(&v->counter, n);
|
2010-05-29 11:09:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
|
|
|
|
#define atomic64_inc(v) atomic64_add(1LL, (v))
|
|
|
|
#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
|
|
|
|
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
|
|
|
|
#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
|
2016-04-18 07:16:03 +08:00
|
|
|
#define atomic64_fetch_sub(i, v) atomic64_fetch_add(-(i), (v))
|
2010-05-29 11:09:12 +08:00
|
|
|
#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
|
|
|
|
#define atomic64_sub(i, v) atomic64_add(-(i), (v))
|
|
|
|
#define atomic64_dec(v) atomic64_sub(1LL, (v))
|
|
|
|
#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
|
|
|
|
#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
|
|
|
|
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
|
|
|
|
|
|
|
|
#endif /* !__ASSEMBLY__ */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Internal definitions only beyond this point.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Number of atomic locks in atomic_locks[]. Must be a power of two.
|
|
|
|
* There is no reason for more than PAGE_SIZE / 8 entries, since that
|
|
|
|
* is the maximum number of pointer bits we can use to index this.
|
|
|
|
* And we cannot have more than PAGE_SIZE / 4, since this has to
|
|
|
|
* fit on a single page and each entry takes 4 bytes.
|
|
|
|
*/
|
|
|
|
#define ATOMIC_HASH_SHIFT (PAGE_SHIFT - 3)
|
|
|
|
#define ATOMIC_HASH_SIZE (1 << ATOMIC_HASH_SHIFT)
|
|
|
|
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
extern int atomic_locks[];
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* All the code that may fault while holding an atomic lock must
|
|
|
|
* place the pointer to the lock in ATOMIC_LOCK_REG so the fault code
|
|
|
|
* can correctly release and reacquire the lock. Note that we
|
|
|
|
* mention the register number in a comment in "lib/atomic_asm.S" to help
|
|
|
|
* assembly coders from using this register by mistake, so if it
|
|
|
|
* is changed here, change that comment as well.
|
|
|
|
*/
|
|
|
|
#define ATOMIC_LOCK_REG 20
|
|
|
|
#define ATOMIC_LOCK_REG_NAME r20
|
|
|
|
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
/* Called from setup to initialize a hash table to point to per_cpu locks. */
|
|
|
|
void __init_atomic_per_cpu(void);
|
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
/* Support releasing the atomic lock in do_page_fault_ics(). */
|
|
|
|
void __atomic_fault_unlock(int *lock_ptr);
|
|
|
|
#endif
|
2010-06-26 05:04:17 +08:00
|
|
|
|
2012-03-30 01:39:51 +08:00
|
|
|
/* Return a pointer to the lock for the given address. */
|
|
|
|
int *__atomic_hashed_lock(volatile void *v);
|
|
|
|
|
2010-06-26 05:04:17 +08:00
|
|
|
/* Private helper routines in lib/atomic_asm_32.S */
|
2012-03-30 01:39:51 +08:00
|
|
|
struct __get_user {
|
|
|
|
unsigned long val;
|
|
|
|
int err;
|
|
|
|
};
|
2016-06-22 17:16:49 +08:00
|
|
|
extern struct __get_user __atomic32_cmpxchg(volatile int *p,
|
2010-06-26 05:04:17 +08:00
|
|
|
int *lock, int o, int n);
|
2016-06-22 17:16:49 +08:00
|
|
|
extern struct __get_user __atomic32_xchg(volatile int *p, int *lock, int n);
|
|
|
|
extern struct __get_user __atomic32_xchg_add(volatile int *p, int *lock, int n);
|
|
|
|
extern struct __get_user __atomic32_xchg_add_unless(volatile int *p,
|
2010-06-26 05:04:17 +08:00
|
|
|
int *lock, int o, int n);
|
2016-06-22 17:16:49 +08:00
|
|
|
extern struct __get_user __atomic32_fetch_or(volatile int *p, int *lock, int n);
|
|
|
|
extern struct __get_user __atomic32_fetch_and(volatile int *p, int *lock, int n);
|
|
|
|
extern struct __get_user __atomic32_fetch_andn(volatile int *p, int *lock, int n);
|
|
|
|
extern struct __get_user __atomic32_fetch_xor(volatile int *p, int *lock, int n);
|
2013-09-25 12:14:08 +08:00
|
|
|
extern long long __atomic64_cmpxchg(volatile long long *p, int *lock,
|
|
|
|
long long o, long long n);
|
|
|
|
extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n);
|
|
|
|
extern long long __atomic64_xchg_add(volatile long long *p, int *lock,
|
|
|
|
long long n);
|
|
|
|
extern long long __atomic64_xchg_add_unless(volatile long long *p,
|
|
|
|
int *lock, long long o, long long n);
|
2016-04-18 07:16:03 +08:00
|
|
|
extern long long __atomic64_fetch_and(volatile long long *p, int *lock, long long n);
|
|
|
|
extern long long __atomic64_fetch_or(volatile long long *p, int *lock, long long n);
|
|
|
|
extern long long __atomic64_fetch_xor(volatile long long *p, int *lock, long long n);
|
2010-06-26 05:04:17 +08:00
|
|
|
|
2012-03-30 01:39:51 +08:00
|
|
|
/* Return failure from the atomic wrappers. */
|
|
|
|
struct __get_user __atomic_bad_address(int __user *addr);
|
|
|
|
|
2010-05-29 11:09:12 +08:00
|
|
|
#endif /* !__ASSEMBLY__ */
|
|
|
|
|
|
|
|
#endif /* _ASM_TILE_ATOMIC_32_H */
|