locking/atomic, arch/blackfin: Implement atomic_fetch_{add,sub,and,or,xor}()
Implement FETCH-OP atomic primitives, these are very similar to the existing OP-RETURN primitives we already have, except they return the value of the atomic variable _before_ modification. This is especially useful for irreversible operations -- such as bitops (because it becomes impossible to reconstruct the state prior to modification). Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Miao <realmz6@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: adi-buildroot-devel@lists.sourceforge.net Cc: linux-arch@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
1a6eafacd4
commit
e87fc0ec07
|
@ -17,6 +17,7 @@
|
|||
|
||||
asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr);
|
||||
asmlinkage int __raw_atomic_add_asm(volatile int *ptr, int value);
|
||||
asmlinkage int __raw_atomic_xadd_asm(volatile int *ptr, int value);
|
||||
|
||||
asmlinkage int __raw_atomic_and_asm(volatile int *ptr, int value);
|
||||
asmlinkage int __raw_atomic_or_asm(volatile int *ptr, int value);
|
||||
|
@ -28,10 +29,17 @@ asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value);
|
|||
#define atomic_add_return(i, v) __raw_atomic_add_asm(&(v)->counter, i)
|
||||
#define atomic_sub_return(i, v) __raw_atomic_add_asm(&(v)->counter, -(i))
|
||||
|
||||
#define atomic_fetch_add(i, v) __raw_atomic_xadd_asm(&(v)->counter, i)
|
||||
#define atomic_fetch_sub(i, v) __raw_atomic_xadd_asm(&(v)->counter, -(i))
|
||||
|
||||
#define atomic_or(i, v) (void)__raw_atomic_or_asm(&(v)->counter, i)
|
||||
#define atomic_and(i, v) (void)__raw_atomic_and_asm(&(v)->counter, i)
|
||||
#define atomic_xor(i, v) (void)__raw_atomic_xor_asm(&(v)->counter, i)
|
||||
|
||||
#define atomic_fetch_or(i, v) __raw_atomic_or_asm(&(v)->counter, i)
|
||||
#define atomic_fetch_and(i, v) __raw_atomic_and_asm(&(v)->counter, i)
|
||||
#define atomic_fetch_xor(i, v) __raw_atomic_xor_asm(&(v)->counter, i)
|
||||
|
||||
#endif
|
||||
|
||||
#include <asm-generic/atomic.h>
|
||||
|
|
|
@ -84,6 +84,7 @@ EXPORT_SYMBOL(insl_16);
|
|||
|
||||
#ifdef CONFIG_SMP
|
||||
EXPORT_SYMBOL(__raw_atomic_add_asm);
|
||||
EXPORT_SYMBOL(__raw_atomic_xadd_asm);
|
||||
EXPORT_SYMBOL(__raw_atomic_and_asm);
|
||||
EXPORT_SYMBOL(__raw_atomic_or_asm);
|
||||
EXPORT_SYMBOL(__raw_atomic_xor_asm);
|
||||
|
|
|
@ -605,6 +605,28 @@ ENTRY(___raw_atomic_add_asm)
|
|||
rts;
|
||||
ENDPROC(___raw_atomic_add_asm)
|
||||
|
||||
/*
|
||||
* r0 = ptr
|
||||
* r1 = value
|
||||
*
|
||||
* ADD a signed value to a 32bit word and return the old value atomically.
|
||||
* Clobbers: r3:0, p1:0
|
||||
*/
|
||||
ENTRY(___raw_atomic_xadd_asm)
|
||||
p1 = r0;
|
||||
r3 = r1;
|
||||
[--sp] = rets;
|
||||
call _get_core_lock;
|
||||
r3 = [p1];
|
||||
r2 = r3 + r2;
|
||||
[p1] = r2;
|
||||
r1 = p1;
|
||||
call _put_core_lock;
|
||||
r0 = r3;
|
||||
rets = [sp++];
|
||||
rts;
|
||||
ENDPROC(___raw_atomic_add_asm)
|
||||
|
||||
/*
|
||||
* r0 = ptr
|
||||
* r1 = mask
|
||||
|
@ -618,10 +640,9 @@ ENTRY(___raw_atomic_and_asm)
|
|||
r3 = r1;
|
||||
[--sp] = rets;
|
||||
call _get_core_lock;
|
||||
r2 = [p1];
|
||||
r3 = r2 & r3;
|
||||
[p1] = r3;
|
||||
r3 = r2;
|
||||
r3 = [p1];
|
||||
r2 = r2 & r3;
|
||||
[p1] = r2;
|
||||
r1 = p1;
|
||||
call _put_core_lock;
|
||||
r0 = r3;
|
||||
|
@ -642,10 +663,9 @@ ENTRY(___raw_atomic_or_asm)
|
|||
r3 = r1;
|
||||
[--sp] = rets;
|
||||
call _get_core_lock;
|
||||
r2 = [p1];
|
||||
r3 = r2 | r3;
|
||||
[p1] = r3;
|
||||
r3 = r2;
|
||||
r3 = [p1];
|
||||
r2 = r2 | r3;
|
||||
[p1] = r2;
|
||||
r1 = p1;
|
||||
call _put_core_lock;
|
||||
r0 = r3;
|
||||
|
@ -666,10 +686,9 @@ ENTRY(___raw_atomic_xor_asm)
|
|||
r3 = r1;
|
||||
[--sp] = rets;
|
||||
call _get_core_lock;
|
||||
r2 = [p1];
|
||||
r3 = r2 ^ r3;
|
||||
[p1] = r3;
|
||||
r3 = r2;
|
||||
r3 = [p1];
|
||||
r2 = r2 ^ r3;
|
||||
[p1] = r2;
|
||||
r1 = p1;
|
||||
call _put_core_lock;
|
||||
r0 = r3;
|
||||
|
|
Loading…
Reference in New Issue