locking,arch,sh: Fold atomic_ops
Many of the atomic op implementations are the same except for one instruction; fold the lot into a few CPP macros and reduce LoC. This also prepares for easy addition of new ops. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: linux-sh@vger.kernel.org Link: http://lkml.kernel.org/r/20140508135852.770036493@infradead.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
af095dd60b
commit
c6470150df
|
@ -1,85 +1,56 @@
|
||||||
#ifndef __ASM_SH_ATOMIC_GRB_H
|
#ifndef __ASM_SH_ATOMIC_GRB_H
|
||||||
#define __ASM_SH_ATOMIC_GRB_H
|
#define __ASM_SH_ATOMIC_GRB_H
|
||||||
|
|
||||||
static inline void atomic_add(int i, atomic_t *v)
|
#define ATOMIC_OP(op) \
|
||||||
{
|
static inline void atomic_##op(int i, atomic_t *v) \
|
||||||
int tmp;
|
{ \
|
||||||
|
int tmp; \
|
||||||
|
\
|
||||||
|
__asm__ __volatile__ ( \
|
||||||
|
" .align 2 \n\t" \
|
||||||
|
" mova 1f, r0 \n\t" /* r0 = end point */ \
|
||||||
|
" mov r15, r1 \n\t" /* r1 = saved sp */ \
|
||||||
|
" mov #-6, r15 \n\t" /* LOGIN: r15 = size */ \
|
||||||
|
" mov.l @%1, %0 \n\t" /* load old value */ \
|
||||||
|
" " #op " %2, %0 \n\t" /* $op */ \
|
||||||
|
" mov.l %0, @%1 \n\t" /* store new value */ \
|
||||||
|
"1: mov r1, r15 \n\t" /* LOGOUT */ \
|
||||||
|
: "=&r" (tmp), \
|
||||||
|
"+r" (v) \
|
||||||
|
: "r" (i) \
|
||||||
|
: "memory" , "r0", "r1"); \
|
||||||
|
} \
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
#define ATOMIC_OP_RETURN(op) \
|
||||||
" .align 2 \n\t"
|
static inline int atomic_##op##_return(int i, atomic_t *v) \
|
||||||
" mova 1f, r0 \n\t" /* r0 = end point */
|
{ \
|
||||||
" mov r15, r1 \n\t" /* r1 = saved sp */
|
int tmp; \
|
||||||
" mov #-6, r15 \n\t" /* LOGIN: r15 = size */
|
\
|
||||||
" mov.l @%1, %0 \n\t" /* load old value */
|
__asm__ __volatile__ ( \
|
||||||
" add %2, %0 \n\t" /* add */
|
" .align 2 \n\t" \
|
||||||
" mov.l %0, @%1 \n\t" /* store new value */
|
" mova 1f, r0 \n\t" /* r0 = end point */ \
|
||||||
"1: mov r1, r15 \n\t" /* LOGOUT */
|
" mov r15, r1 \n\t" /* r1 = saved sp */ \
|
||||||
: "=&r" (tmp),
|
" mov #-6, r15 \n\t" /* LOGIN: r15 = size */ \
|
||||||
"+r" (v)
|
" mov.l @%1, %0 \n\t" /* load old value */ \
|
||||||
: "r" (i)
|
" " #op " %2, %0 \n\t" /* $op */ \
|
||||||
: "memory" , "r0", "r1");
|
" mov.l %0, @%1 \n\t" /* store new value */ \
|
||||||
|
"1: mov r1, r15 \n\t" /* LOGOUT */ \
|
||||||
|
: "=&r" (tmp), \
|
||||||
|
"+r" (v) \
|
||||||
|
: "r" (i) \
|
||||||
|
: "memory" , "r0", "r1"); \
|
||||||
|
\
|
||||||
|
return tmp; \
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void atomic_sub(int i, atomic_t *v)
|
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
|
||||||
{
|
|
||||||
int tmp;
|
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
ATOMIC_OPS(add)
|
||||||
" .align 2 \n\t"
|
ATOMIC_OPS(sub)
|
||||||
" mova 1f, r0 \n\t" /* r0 = end point */
|
|
||||||
" mov r15, r1 \n\t" /* r1 = saved sp */
|
|
||||||
" mov #-6, r15 \n\t" /* LOGIN: r15 = size */
|
|
||||||
" mov.l @%1, %0 \n\t" /* load old value */
|
|
||||||
" sub %2, %0 \n\t" /* sub */
|
|
||||||
" mov.l %0, @%1 \n\t" /* store new value */
|
|
||||||
"1: mov r1, r15 \n\t" /* LOGOUT */
|
|
||||||
: "=&r" (tmp),
|
|
||||||
"+r" (v)
|
|
||||||
: "r" (i)
|
|
||||||
: "memory" , "r0", "r1");
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int atomic_add_return(int i, atomic_t *v)
|
#undef ATOMIC_OPS
|
||||||
{
|
#undef ATOMIC_OP_RETURN
|
||||||
int tmp;
|
#undef ATOMIC_OP
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
|
||||||
" .align 2 \n\t"
|
|
||||||
" mova 1f, r0 \n\t" /* r0 = end point */
|
|
||||||
" mov r15, r1 \n\t" /* r1 = saved sp */
|
|
||||||
" mov #-6, r15 \n\t" /* LOGIN: r15 = size */
|
|
||||||
" mov.l @%1, %0 \n\t" /* load old value */
|
|
||||||
" add %2, %0 \n\t" /* add */
|
|
||||||
" mov.l %0, @%1 \n\t" /* store new value */
|
|
||||||
"1: mov r1, r15 \n\t" /* LOGOUT */
|
|
||||||
: "=&r" (tmp),
|
|
||||||
"+r" (v)
|
|
||||||
: "r" (i)
|
|
||||||
: "memory" , "r0", "r1");
|
|
||||||
|
|
||||||
return tmp;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int atomic_sub_return(int i, atomic_t *v)
|
|
||||||
{
|
|
||||||
int tmp;
|
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
|
||||||
" .align 2 \n\t"
|
|
||||||
" mova 1f, r0 \n\t" /* r0 = end point */
|
|
||||||
" mov r15, r1 \n\t" /* r1 = saved sp */
|
|
||||||
" mov #-6, r15 \n\t" /* LOGIN: r15 = size */
|
|
||||||
" mov.l @%1, %0 \n\t" /* load old value */
|
|
||||||
" sub %2, %0 \n\t" /* sub */
|
|
||||||
" mov.l %0, @%1 \n\t" /* store new value */
|
|
||||||
"1: mov r1, r15 \n\t" /* LOGOUT */
|
|
||||||
: "=&r" (tmp),
|
|
||||||
"+r" (v)
|
|
||||||
: "r" (i)
|
|
||||||
: "memory", "r0", "r1");
|
|
||||||
|
|
||||||
return tmp;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
|
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
|
||||||
{
|
{
|
||||||
|
|
|
@ -8,49 +8,39 @@
|
||||||
* forward to code at the end of this object's .text section, then
|
* forward to code at the end of this object's .text section, then
|
||||||
* branch back to restart the operation.
|
* branch back to restart the operation.
|
||||||
*/
|
*/
|
||||||
static inline void atomic_add(int i, atomic_t *v)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
raw_local_irq_save(flags);
|
#define ATOMIC_OP(op, c_op) \
|
||||||
v->counter += i;
|
static inline void atomic_##op(int i, atomic_t *v) \
|
||||||
raw_local_irq_restore(flags);
|
{ \
|
||||||
|
unsigned long flags; \
|
||||||
|
\
|
||||||
|
raw_local_irq_save(flags); \
|
||||||
|
v->counter c_op i; \
|
||||||
|
raw_local_irq_restore(flags); \
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void atomic_sub(int i, atomic_t *v)
|
#define ATOMIC_OP_RETURN(op, c_op) \
|
||||||
{
|
static inline int atomic_##op##_return(int i, atomic_t *v) \
|
||||||
unsigned long flags;
|
{ \
|
||||||
|
unsigned long temp, flags; \
|
||||||
raw_local_irq_save(flags);
|
\
|
||||||
v->counter -= i;
|
raw_local_irq_save(flags); \
|
||||||
raw_local_irq_restore(flags);
|
temp = v->counter; \
|
||||||
|
temp c_op i; \
|
||||||
|
v->counter = temp; \
|
||||||
|
raw_local_irq_restore(flags); \
|
||||||
|
\
|
||||||
|
return temp; \
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int atomic_add_return(int i, atomic_t *v)
|
#define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op)
|
||||||
{
|
|
||||||
unsigned long temp, flags;
|
|
||||||
|
|
||||||
raw_local_irq_save(flags);
|
ATOMIC_OPS(add, +=)
|
||||||
temp = v->counter;
|
ATOMIC_OPS(sub, -=)
|
||||||
temp += i;
|
|
||||||
v->counter = temp;
|
|
||||||
raw_local_irq_restore(flags);
|
|
||||||
|
|
||||||
return temp;
|
#undef ATOMIC_OPS
|
||||||
}
|
#undef ATOMIC_OP_RETURN
|
||||||
|
#undef ATOMIC_OP
|
||||||
static inline int atomic_sub_return(int i, atomic_t *v)
|
|
||||||
{
|
|
||||||
unsigned long temp, flags;
|
|
||||||
|
|
||||||
raw_local_irq_save(flags);
|
|
||||||
temp = v->counter;
|
|
||||||
temp -= i;
|
|
||||||
v->counter = temp;
|
|
||||||
raw_local_irq_restore(flags);
|
|
||||||
|
|
||||||
return temp;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
|
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
|
||||||
{
|
{
|
||||||
|
|
|
@ -1,39 +1,6 @@
|
||||||
#ifndef __ASM_SH_ATOMIC_LLSC_H
|
#ifndef __ASM_SH_ATOMIC_LLSC_H
|
||||||
#define __ASM_SH_ATOMIC_LLSC_H
|
#define __ASM_SH_ATOMIC_LLSC_H
|
||||||
|
|
||||||
/*
|
|
||||||
* To get proper branch prediction for the main line, we must branch
|
|
||||||
* forward to code at the end of this object's .text section, then
|
|
||||||
* branch back to restart the operation.
|
|
||||||
*/
|
|
||||||
static inline void atomic_add(int i, atomic_t *v)
|
|
||||||
{
|
|
||||||
unsigned long tmp;
|
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
|
||||||
"1: movli.l @%2, %0 ! atomic_add \n"
|
|
||||||
" add %1, %0 \n"
|
|
||||||
" movco.l %0, @%2 \n"
|
|
||||||
" bf 1b \n"
|
|
||||||
: "=&z" (tmp)
|
|
||||||
: "r" (i), "r" (&v->counter)
|
|
||||||
: "t");
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void atomic_sub(int i, atomic_t *v)
|
|
||||||
{
|
|
||||||
unsigned long tmp;
|
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
|
||||||
"1: movli.l @%2, %0 ! atomic_sub \n"
|
|
||||||
" sub %1, %0 \n"
|
|
||||||
" movco.l %0, @%2 \n"
|
|
||||||
" bf 1b \n"
|
|
||||||
: "=&z" (tmp)
|
|
||||||
: "r" (i), "r" (&v->counter)
|
|
||||||
: "t");
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* SH-4A note:
|
* SH-4A note:
|
||||||
*
|
*
|
||||||
|
@ -42,40 +9,54 @@ static inline void atomic_sub(int i, atomic_t *v)
|
||||||
* encoding, so the retval is automatically set without having to
|
* encoding, so the retval is automatically set without having to
|
||||||
* do any special work.
|
* do any special work.
|
||||||
*/
|
*/
|
||||||
static inline int atomic_add_return(int i, atomic_t *v)
|
/*
|
||||||
{
|
* To get proper branch prediction for the main line, we must branch
|
||||||
unsigned long temp;
|
* forward to code at the end of this object's .text section, then
|
||||||
|
* branch back to restart the operation.
|
||||||
|
*/
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
#define ATOMIC_OP(op) \
|
||||||
"1: movli.l @%2, %0 ! atomic_add_return \n"
|
static inline void atomic_##op(int i, atomic_t *v) \
|
||||||
" add %1, %0 \n"
|
{ \
|
||||||
" movco.l %0, @%2 \n"
|
unsigned long tmp; \
|
||||||
" bf 1b \n"
|
\
|
||||||
" synco \n"
|
__asm__ __volatile__ ( \
|
||||||
: "=&z" (temp)
|
"1: movli.l @%2, %0 ! atomic_" #op "\n" \
|
||||||
: "r" (i), "r" (&v->counter)
|
" " #op " %1, %0 \n" \
|
||||||
: "t");
|
" movco.l %0, @%2 \n" \
|
||||||
|
" bf 1b \n" \
|
||||||
return temp;
|
: "=&z" (tmp) \
|
||||||
|
: "r" (i), "r" (&v->counter) \
|
||||||
|
: "t"); \
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int atomic_sub_return(int i, atomic_t *v)
|
#define ATOMIC_OP_RETURN(op) \
|
||||||
{
|
static inline int atomic_##op##_return(int i, atomic_t *v) \
|
||||||
unsigned long temp;
|
{ \
|
||||||
|
unsigned long temp; \
|
||||||
__asm__ __volatile__ (
|
\
|
||||||
"1: movli.l @%2, %0 ! atomic_sub_return \n"
|
__asm__ __volatile__ ( \
|
||||||
" sub %1, %0 \n"
|
"1: movli.l @%2, %0 ! atomic_" #op "_return \n" \
|
||||||
" movco.l %0, @%2 \n"
|
" " #op " %1, %0 \n" \
|
||||||
" bf 1b \n"
|
" movco.l %0, @%2 \n" \
|
||||||
" synco \n"
|
" bf 1b \n" \
|
||||||
: "=&z" (temp)
|
" synco \n" \
|
||||||
: "r" (i), "r" (&v->counter)
|
: "=&z" (temp) \
|
||||||
: "t");
|
: "r" (i), "r" (&v->counter) \
|
||||||
|
: "t"); \
|
||||||
return temp;
|
\
|
||||||
|
return temp; \
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
|
||||||
|
|
||||||
|
ATOMIC_OPS(add)
|
||||||
|
ATOMIC_OPS(sub)
|
||||||
|
|
||||||
|
#undef ATOMIC_OPS
|
||||||
|
#undef ATOMIC_OP_RETURN
|
||||||
|
#undef ATOMIC_OP
|
||||||
|
|
||||||
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
|
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
|
||||||
{
|
{
|
||||||
unsigned long tmp;
|
unsigned long tmp;
|
||||||
|
|
Loading…
Reference in New Issue