include/asm-x86/mutex_64.h: checkpatch cleanups - formatting only

Signed-off-by: Joe Perches <joe@perches.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Joe Perches 2008-03-23 01:02:54 -07:00 committed by Ingo Molnar
parent b2347fad51
commit 2c4e883041
1 changed files with 34 additions and 39 deletions

View File

@ -16,23 +16,21 @@
* *
* Atomically decrements @v and calls <fail_fn> if the result is negative. * Atomically decrements @v and calls <fail_fn> if the result is negative.
*/ */
#define __mutex_fastpath_lock(v, fail_fn) \ #define __mutex_fastpath_lock(v, fail_fn) \
do { \ do { \
unsigned long dummy; \ unsigned long dummy; \
\ \
typecheck(atomic_t *, v); \ typecheck(atomic_t *, v); \
typecheck_fn(void (*)(atomic_t *), fail_fn); \ typecheck_fn(void (*)(atomic_t *), fail_fn); \
\ \
__asm__ __volatile__( \ asm volatile(LOCK_PREFIX " decl (%%rdi)\n" \
LOCK_PREFIX " decl (%%rdi) \n" \ " jns 1f \n" \
" jns 1f \n" \ " call " #fail_fn "\n" \
" call "#fail_fn" \n" \ "1:" \
"1:" \ : "=D" (dummy) \
\ : "D" (v) \
:"=D" (dummy) \ : "rax", "rsi", "rdx", "rcx", \
: "D" (v) \ "r8", "r9", "r10", "r11", "memory"); \
: "rax", "rsi", "rdx", "rcx", \
"r8", "r9", "r10", "r11", "memory"); \
} while (0) } while (0)
/** /**
@ -45,9 +43,8 @@ do { \
* it wasn't 1 originally. This function returns 0 if the fastpath succeeds, * it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
* or anything the slow path function returns * or anything the slow path function returns
*/ */
static inline int static inline int __mutex_fastpath_lock_retval(atomic_t *count,
__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
int (*fail_fn)(atomic_t *))
{ {
if (unlikely(atomic_dec_return(count) < 0)) if (unlikely(atomic_dec_return(count) < 0))
return fail_fn(count); return fail_fn(count);
@ -62,23 +59,21 @@ __mutex_fastpath_lock_retval(atomic_t *count,
* *
* Atomically increments @v and calls <fail_fn> if the result is nonpositive. * Atomically increments @v and calls <fail_fn> if the result is nonpositive.
*/ */
#define __mutex_fastpath_unlock(v, fail_fn) \ #define __mutex_fastpath_unlock(v, fail_fn) \
do { \ do { \
unsigned long dummy; \ unsigned long dummy; \
\ \
typecheck(atomic_t *, v); \ typecheck(atomic_t *, v); \
typecheck_fn(void (*)(atomic_t *), fail_fn); \ typecheck_fn(void (*)(atomic_t *), fail_fn); \
\ \
__asm__ __volatile__( \ asm volatile(LOCK_PREFIX " incl (%%rdi)\n" \
LOCK_PREFIX " incl (%%rdi) \n" \ " jg 1f\n" \
" jg 1f \n" \ " call " #fail_fn "\n" \
" call "#fail_fn" \n" \ "1:" \
"1: " \ : "=D" (dummy) \
\ : "D" (v) \
:"=D" (dummy) \ : "rax", "rsi", "rdx", "rcx", \
: "D" (v) \ "r8", "r9", "r10", "r11", "memory"); \
: "rax", "rsi", "rdx", "rcx", \
"r8", "r9", "r10", "r11", "memory"); \
} while (0) } while (0)
#define __mutex_slowpath_needs_to_unlock() 1 #define __mutex_slowpath_needs_to_unlock() 1
@ -93,8 +88,8 @@ do { \
* if it wasn't 1 originally. [the fallback function is never used on * if it wasn't 1 originally. [the fallback function is never used on
* x86_64, because all x86_64 CPUs have a CMPXCHG instruction.] * x86_64, because all x86_64 CPUs have a CMPXCHG instruction.]
*/ */
static inline int static inline int __mutex_fastpath_trylock(atomic_t *count,
__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) int (*fail_fn)(atomic_t *))
{ {
if (likely(atomic_cmpxchg(count, 1, 0) == 1)) if (likely(atomic_cmpxchg(count, 1, 0) == 1))
return 1; return 1;