[ARM] 3311/1: clean up include/asm-arm/mutex.h

Patch from Nicolas Pitre

Since:

	if (unlikely(__res || __ex_flag))

produces worse code on ARM than:

	if (unlikely(__res | __ex_flag))

I therefore made it more explicit:

	__res |= __ex_flag;
	if (unlikely(__res != 0))

so it is not seen as a typo again.

Also made everything static inline rather than macros for better readability
(both produce the same code after all).

And finally added missing \t from multi-line assembly code.

Signed-off-by: Nicolas Pitre <nico@cam.org>
Acked-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
This commit is contained in:
Nicolas Pitre 2006-02-08 21:19:38 +00:00 committed by Russell King
parent 5964eae835
commit 365bf8ac6f
1 changed files with 64 additions and 65 deletions

View File

@ -23,72 +23,71 @@
* simply bail out immediately through the slow path where the lock will be
* reattempted until it succeeds.
*/
#define __mutex_fastpath_lock(count, fail_fn) \
do { \
int __ex_flag, __res; \
\
typecheck(atomic_t *, count); \
typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
\
__asm__ ( \
"ldrex %0, [%2] \n" \
"sub %0, %0, #1 \n" \
"strex %1, %0, [%2] \n" \
\
: "=&r" (__res), "=&r" (__ex_flag) \
: "r" (&(count)->counter) \
: "cc","memory" ); \
\
if (unlikely(__res || __ex_flag)) \
fail_fn(count); \
} while (0)
static inline void
__mutex_fastpath_lock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *))
{
int __ex_flag, __res;
#define __mutex_fastpath_lock_retval(count, fail_fn) \
({ \
int __ex_flag, __res; \
\
typecheck(atomic_t *, count); \
typecheck_fn(fastcall int (*)(atomic_t *), fail_fn); \
\
__asm__ ( \
"ldrex %0, [%2] \n" \
"sub %0, %0, #1 \n" \
"strex %1, %0, [%2] \n" \
\
: "=&r" (__res), "=&r" (__ex_flag) \
: "r" (&(count)->counter) \
: "cc","memory" ); \
\
__res |= __ex_flag; \
if (unlikely(__res != 0)) \
__res = fail_fn(count); \
__res; \
})
__asm__ (
"ldrex %0, [%2] \n\t"
"sub %0, %0, #1 \n\t"
"strex %1, %0, [%2] "
: "=&r" (__res), "=&r" (__ex_flag)
: "r" (&(count)->counter)
: "cc","memory" );
__res |= __ex_flag;
if (unlikely(__res != 0))
fail_fn(count);
}
static inline int
__mutex_fastpath_lock_retval(atomic_t *count, fastcall int (*fail_fn)(atomic_t *))
{
int __ex_flag, __res;
__asm__ (
"ldrex %0, [%2] \n\t"
"sub %0, %0, #1 \n\t"
"strex %1, %0, [%2] "
: "=&r" (__res), "=&r" (__ex_flag)
: "r" (&(count)->counter)
: "cc","memory" );
__res |= __ex_flag;
if (unlikely(__res != 0))
__res = fail_fn(count);
return __res;
}
/*
* Same trick is used for the unlock fast path. However the original value,
* rather than the result, is used to test for success in order to have
* better generated assembly.
*/
#define __mutex_fastpath_unlock(count, fail_fn) \
do { \
int __ex_flag, __res, __orig; \
\
typecheck(atomic_t *, count); \
typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
\
__asm__ ( \
"ldrex %0, [%3] \n" \
"add %1, %0, #1 \n" \
"strex %2, %1, [%3] \n" \
\
: "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag) \
: "r" (&(count)->counter) \
: "cc","memory" ); \
\
if (unlikely(__orig || __ex_flag)) \
fail_fn(count); \
} while (0)
static inline void
__mutex_fastpath_unlock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *))
{
int __ex_flag, __res, __orig;
__asm__ (
"ldrex %0, [%3] \n\t"
"add %1, %0, #1 \n\t"
"strex %2, %1, [%3] "
: "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
: "r" (&(count)->counter)
: "cc","memory" );
__orig |= __ex_flag;
if (unlikely(__orig != 0))
fail_fn(count);
}
/*
* If the unlock was done on a contended lock, or if the unlock simply fails
@ -110,12 +109,12 @@ __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
__asm__ (
"1: ldrex %0, [%3] \n"
"subs %1, %0, #1 \n"
"strexeq %2, %1, [%3] \n"
"movlt %0, #0 \n"
"cmpeq %2, #0 \n"
"bgt 1b \n"
"1: ldrex %0, [%3] \n\t"
"subs %1, %0, #1 \n\t"
"strexeq %2, %1, [%3] \n\t"
"movlt %0, #0 \n\t"
"cmpeq %2, #0 \n\t"
"bgt 1b "
: "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
: "r" (&count->counter)