include/asm-x86/atomic_32.h: checkpatch cleanups - formatting only
Signed-off-by: Joe Perches <joe@perches.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
3c311febfa
commit
78ff12eec4
|
@ -15,7 +15,9 @@
|
|||
* on us. We need to use _exactly_ the address the user gave us,
|
||||
* not some alias that contains the same information.
|
||||
*/
|
||||
typedef struct { int counter; } atomic_t;
|
||||
typedef struct {
|
||||
int counter;
|
||||
} atomic_t;
|
||||
|
||||
#define ATOMIC_INIT(i) { (i) }
|
||||
|
||||
|
@ -43,10 +45,9 @@ typedef struct { int counter; } atomic_t;
|
|||
*
|
||||
* Atomically adds @i to @v.
|
||||
*/
|
||||
static __inline__ void atomic_add(int i, atomic_t *v)
|
||||
static inline void atomic_add(int i, atomic_t *v)
|
||||
{
|
||||
__asm__ __volatile__(
|
||||
LOCK_PREFIX "addl %1,%0"
|
||||
asm volatile(LOCK_PREFIX "addl %1,%0"
|
||||
: "+m" (v->counter)
|
||||
: "ir" (i));
|
||||
}
|
||||
|
@ -58,10 +59,9 @@ static __inline__ void atomic_add(int i, atomic_t *v)
|
|||
*
|
||||
* Atomically subtracts @i from @v.
|
||||
*/
|
||||
static __inline__ void atomic_sub(int i, atomic_t *v)
|
||||
static inline void atomic_sub(int i, atomic_t *v)
|
||||
{
|
||||
__asm__ __volatile__(
|
||||
LOCK_PREFIX "subl %1,%0"
|
||||
asm volatile(LOCK_PREFIX "subl %1,%0"
|
||||
: "+m" (v->counter)
|
||||
: "ir" (i));
|
||||
}
|
||||
|
@ -75,12 +75,11 @@ static __inline__ void atomic_sub(int i, atomic_t *v)
|
|||
* true if the result is zero, or false for all
|
||||
* other cases.
|
||||
*/
|
||||
static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
|
||||
static inline int atomic_sub_and_test(int i, atomic_t *v)
|
||||
{
|
||||
unsigned char c;
|
||||
|
||||
__asm__ __volatile__(
|
||||
LOCK_PREFIX "subl %2,%0; sete %1"
|
||||
asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
|
||||
: "+m" (v->counter), "=qm" (c)
|
||||
: "ir" (i) : "memory");
|
||||
return c;
|
||||
|
@ -92,10 +91,9 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
|
|||
*
|
||||
* Atomically increments @v by 1.
|
||||
*/
|
||||
static __inline__ void atomic_inc(atomic_t *v)
|
||||
static inline void atomic_inc(atomic_t *v)
|
||||
{
|
||||
__asm__ __volatile__(
|
||||
LOCK_PREFIX "incl %0"
|
||||
asm volatile(LOCK_PREFIX "incl %0"
|
||||
: "+m" (v->counter));
|
||||
}
|
||||
|
||||
|
@ -105,10 +103,9 @@ static __inline__ void atomic_inc(atomic_t *v)
|
|||
*
|
||||
* Atomically decrements @v by 1.
|
||||
*/
|
||||
static __inline__ void atomic_dec(atomic_t *v)
|
||||
static inline void atomic_dec(atomic_t *v)
|
||||
{
|
||||
__asm__ __volatile__(
|
||||
LOCK_PREFIX "decl %0"
|
||||
asm volatile(LOCK_PREFIX "decl %0"
|
||||
: "+m" (v->counter));
|
||||
}
|
||||
|
||||
|
@ -120,12 +117,11 @@ static __inline__ void atomic_dec(atomic_t *v)
|
|||
* returns true if the result is 0, or false for all other
|
||||
* cases.
|
||||
*/
|
||||
static __inline__ int atomic_dec_and_test(atomic_t *v)
|
||||
static inline int atomic_dec_and_test(atomic_t *v)
|
||||
{
|
||||
unsigned char c;
|
||||
|
||||
__asm__ __volatile__(
|
||||
LOCK_PREFIX "decl %0; sete %1"
|
||||
asm volatile(LOCK_PREFIX "decl %0; sete %1"
|
||||
: "+m" (v->counter), "=qm" (c)
|
||||
: : "memory");
|
||||
return c != 0;
|
||||
|
@ -139,12 +135,11 @@ static __inline__ int atomic_dec_and_test(atomic_t *v)
|
|||
* and returns true if the result is zero, or false for all
|
||||
* other cases.
|
||||
*/
|
||||
static __inline__ int atomic_inc_and_test(atomic_t *v)
|
||||
static inline int atomic_inc_and_test(atomic_t *v)
|
||||
{
|
||||
unsigned char c;
|
||||
|
||||
__asm__ __volatile__(
|
||||
LOCK_PREFIX "incl %0; sete %1"
|
||||
asm volatile(LOCK_PREFIX "incl %0; sete %1"
|
||||
: "+m" (v->counter), "=qm" (c)
|
||||
: : "memory");
|
||||
return c != 0;
|
||||
|
@ -159,12 +154,11 @@ static __inline__ int atomic_inc_and_test(atomic_t *v)
|
|||
* if the result is negative, or false when
|
||||
* result is greater than or equal to zero.
|
||||
*/
|
||||
static __inline__ int atomic_add_negative(int i, atomic_t *v)
|
||||
static inline int atomic_add_negative(int i, atomic_t *v)
|
||||
{
|
||||
unsigned char c;
|
||||
|
||||
__asm__ __volatile__(
|
||||
LOCK_PREFIX "addl %2,%0; sets %1"
|
||||
asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
|
||||
: "+m" (v->counter), "=qm" (c)
|
||||
: "ir" (i) : "memory");
|
||||
return c;
|
||||
|
@ -177,7 +171,7 @@ static __inline__ int atomic_add_negative(int i, atomic_t *v)
|
|||
*
|
||||
* Atomically adds @i to @v and returns @i + @v
|
||||
*/
|
||||
static __inline__ int atomic_add_return(int i, atomic_t *v)
|
||||
static inline int atomic_add_return(int i, atomic_t *v)
|
||||
{
|
||||
int __i;
|
||||
#ifdef CONFIG_M386
|
||||
|
@ -187,8 +181,7 @@ static __inline__ int atomic_add_return(int i, atomic_t *v)
|
|||
#endif
|
||||
/* Modern 486+ processor */
|
||||
__i = i;
|
||||
__asm__ __volatile__(
|
||||
LOCK_PREFIX "xaddl %0, %1"
|
||||
asm volatile(LOCK_PREFIX "xaddl %0, %1"
|
||||
: "+r" (i), "+m" (v->counter)
|
||||
: : "memory");
|
||||
return i + __i;
|
||||
|
@ -210,7 +203,7 @@ no_xadd: /* Legacy 386 processor */
|
|||
*
|
||||
* Atomically subtracts @i from @v and returns @v - @i
|
||||
*/
|
||||
static __inline__ int atomic_sub_return(int i, atomic_t *v)
|
||||
static inline int atomic_sub_return(int i, atomic_t *v)
|
||||
{
|
||||
return atomic_add_return(-i, v);
|
||||
}
|
||||
|
@ -227,7 +220,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v)
|
|||
* Atomically adds @a to @v, so long as @v was not already @u.
|
||||
* Returns non-zero if @v was not @u, and zero otherwise.
|
||||
*/
|
||||
static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
|
||||
static inline int atomic_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
int c, old;
|
||||
c = atomic_read(v);
|
||||
|
@ -249,11 +242,11 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
|
|||
|
||||
/* These are x86-specific, used by some header files */
|
||||
#define atomic_clear_mask(mask, addr) \
|
||||
__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \
|
||||
: : "r" (~(mask)),"m" (*addr) : "memory")
|
||||
asm volatile(LOCK_PREFIX "andl %0,%1" \
|
||||
: : "r" (~(mask)), "m" (*(addr)) : "memory")
|
||||
|
||||
#define atomic_set_mask(mask, addr) \
|
||||
__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \
|
||||
asm volatile(LOCK_PREFIX "orl %0,%1" \
|
||||
: : "r" (mask), "m" (*(addr)) : "memory")
|
||||
|
||||
/* Atomic operations are already serializing on x86 */
|
||||
|
|
Loading…
Reference in New Issue