[SPARC]: Make bitops use same spinlocks as atomics.
Recent workqueue changes basically make this a formal requirement. Also, move atomic32.o from lib-y to obj-y since it exports symbols to modules. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
216da721b8
commit
8a8b836b91
|
@ -83,9 +83,6 @@ extern int __divdi3(int, int);
|
|||
/* Private functions with odd calling conventions. */
|
||||
extern void ___atomic24_add(void);
|
||||
extern void ___atomic24_sub(void);
|
||||
extern void ___set_bit(void);
|
||||
extern void ___clear_bit(void);
|
||||
extern void ___change_bit(void);
|
||||
extern void ___rw_read_enter(void);
|
||||
extern void ___rw_read_try(void);
|
||||
extern void ___rw_read_exit(void);
|
||||
|
@ -125,11 +122,6 @@ EXPORT_SYMBOL(pfn_base);
|
|||
EXPORT_SYMBOL(___atomic24_add);
|
||||
EXPORT_SYMBOL(___atomic24_sub);
|
||||
|
||||
/* Bit operations. */
|
||||
EXPORT_SYMBOL(___set_bit);
|
||||
EXPORT_SYMBOL(___clear_bit);
|
||||
EXPORT_SYMBOL(___change_bit);
|
||||
|
||||
/* Per-CPU information table */
|
||||
EXPORT_PER_CPU_SYMBOL(__cpu_data);
|
||||
|
||||
|
|
|
@ -78,7 +78,6 @@ unsigned long profile_pc(struct pt_regs *regs)
|
|||
extern char __copy_user_begin[], __copy_user_end[];
|
||||
extern char __atomic_begin[], __atomic_end[];
|
||||
extern char __bzero_begin[], __bzero_end[];
|
||||
extern char __bitops_begin[], __bitops_end[];
|
||||
|
||||
unsigned long pc = regs->pc;
|
||||
|
||||
|
@ -88,9 +87,7 @@ unsigned long profile_pc(struct pt_regs *regs)
|
|||
(pc >= (unsigned long) __atomic_begin &&
|
||||
pc < (unsigned long) __atomic_end) ||
|
||||
(pc >= (unsigned long) __bzero_begin &&
|
||||
pc < (unsigned long) __bzero_end) ||
|
||||
(pc >= (unsigned long) __bitops_begin &&
|
||||
pc < (unsigned long) __bitops_end))
|
||||
pc < (unsigned long) __bzero_end))
|
||||
pc = regs->u_regs[UREG_RETPC];
|
||||
return pc;
|
||||
}
|
||||
|
|
|
@ -7,7 +7,7 @@ EXTRA_AFLAGS := -ansi -DST_DIV0=0x02
|
|||
lib-y := mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o memcpy.o memset.o \
|
||||
strlen.o checksum.o blockops.o memscan.o memcmp.o strncmp.o \
|
||||
strncpy_from_user.o divdi3.o udivdi3.o strlen_user.o \
|
||||
copy_user.o locks.o atomic.o atomic32.o bitops.o \
|
||||
copy_user.o locks.o atomic.o \
|
||||
lshrdi3.o ashldi3.o rwsem.o muldi3.o bitext.o
|
||||
|
||||
obj-y += iomap.o
|
||||
obj-y += iomap.o atomic32.o
|
||||
|
|
|
@ -76,3 +76,42 @@ void atomic_set(atomic_t *v, int i)
|
|||
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
|
||||
}
|
||||
EXPORT_SYMBOL(atomic_set);
|
||||
|
||||
unsigned long ___set_bit(unsigned long *addr, unsigned long mask)
|
||||
{
|
||||
unsigned long old, flags;
|
||||
|
||||
spin_lock_irqsave(ATOMIC_HASH(addr), flags);
|
||||
old = *addr;
|
||||
*addr = old | mask;
|
||||
spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
|
||||
|
||||
return old & mask;
|
||||
}
|
||||
EXPORT_SYMBOL(___set_bit);
|
||||
|
||||
unsigned long ___clear_bit(unsigned long *addr, unsigned long mask)
|
||||
{
|
||||
unsigned long old, flags;
|
||||
|
||||
spin_lock_irqsave(ATOMIC_HASH(addr), flags);
|
||||
old = *addr;
|
||||
*addr = old & ~mask;
|
||||
spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
|
||||
|
||||
return old & mask;
|
||||
}
|
||||
EXPORT_SYMBOL(___clear_bit);
|
||||
|
||||
unsigned long ___change_bit(unsigned long *addr, unsigned long mask)
|
||||
{
|
||||
unsigned long old, flags;
|
||||
|
||||
spin_lock_irqsave(ATOMIC_HASH(addr), flags);
|
||||
old = *addr;
|
||||
*addr = old ^ mask;
|
||||
spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
|
||||
|
||||
return old & mask;
|
||||
}
|
||||
EXPORT_SYMBOL(___change_bit);
|
||||
|
|
|
@ -1,109 +0,0 @@
|
|||
/* bitops.S: Low level assembler bit operations.
|
||||
*
|
||||
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
|
||||
*/
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/psr.h>
|
||||
|
||||
.text
|
||||
.align 4
|
||||
|
||||
.globl __bitops_begin
|
||||
__bitops_begin:
|
||||
|
||||
/* Take bits in %g2 and set them in word at %g1,
|
||||
* return whether bits were set in original value
|
||||
* in %g2. %g4 holds value to restore into %o7
|
||||
* in delay slot of jmpl return, %g3 + %g5 + %g7 can be
|
||||
* used as temporaries and thus is considered clobbered
|
||||
* by all callers.
|
||||
*/
|
||||
.globl ___set_bit
|
||||
___set_bit:
|
||||
rd %psr, %g3
|
||||
nop; nop; nop;
|
||||
or %g3, PSR_PIL, %g5
|
||||
wr %g5, 0x0, %psr
|
||||
nop; nop; nop
|
||||
#ifdef CONFIG_SMP
|
||||
set bitops_spinlock, %g5
|
||||
2: ldstub [%g5], %g7 ! Spin on the byte lock for SMP.
|
||||
orcc %g7, 0x0, %g0 ! Did we get it?
|
||||
bne 2b ! Nope...
|
||||
#endif
|
||||
ld [%g1], %g7
|
||||
or %g7, %g2, %g5
|
||||
and %g7, %g2, %g2
|
||||
#ifdef CONFIG_SMP
|
||||
st %g5, [%g1]
|
||||
set bitops_spinlock, %g5
|
||||
stb %g0, [%g5]
|
||||
#else
|
||||
st %g5, [%g1]
|
||||
#endif
|
||||
wr %g3, 0x0, %psr
|
||||
nop; nop; nop
|
||||
jmpl %o7, %g0
|
||||
mov %g4, %o7
|
||||
|
||||
/* Same as above, but clears the bits from %g2 instead. */
|
||||
.globl ___clear_bit
|
||||
___clear_bit:
|
||||
rd %psr, %g3
|
||||
nop; nop; nop
|
||||
or %g3, PSR_PIL, %g5
|
||||
wr %g5, 0x0, %psr
|
||||
nop; nop; nop
|
||||
#ifdef CONFIG_SMP
|
||||
set bitops_spinlock, %g5
|
||||
2: ldstub [%g5], %g7 ! Spin on the byte lock for SMP.
|
||||
orcc %g7, 0x0, %g0 ! Did we get it?
|
||||
bne 2b ! Nope...
|
||||
#endif
|
||||
ld [%g1], %g7
|
||||
andn %g7, %g2, %g5
|
||||
and %g7, %g2, %g2
|
||||
#ifdef CONFIG_SMP
|
||||
st %g5, [%g1]
|
||||
set bitops_spinlock, %g5
|
||||
stb %g0, [%g5]
|
||||
#else
|
||||
st %g5, [%g1]
|
||||
#endif
|
||||
wr %g3, 0x0, %psr
|
||||
nop; nop; nop
|
||||
jmpl %o7, %g0
|
||||
mov %g4, %o7
|
||||
|
||||
/* Same thing again, but this time toggles the bits from %g2. */
|
||||
.globl ___change_bit
|
||||
___change_bit:
|
||||
rd %psr, %g3
|
||||
nop; nop; nop
|
||||
or %g3, PSR_PIL, %g5
|
||||
wr %g5, 0x0, %psr
|
||||
nop; nop; nop
|
||||
#ifdef CONFIG_SMP
|
||||
set bitops_spinlock, %g5
|
||||
2: ldstub [%g5], %g7 ! Spin on the byte lock for SMP.
|
||||
orcc %g7, 0x0, %g0 ! Did we get it?
|
||||
bne 2b ! Nope...
|
||||
#endif
|
||||
ld [%g1], %g7
|
||||
xor %g7, %g2, %g5
|
||||
and %g7, %g2, %g2
|
||||
#ifdef CONFIG_SMP
|
||||
st %g5, [%g1]
|
||||
set bitops_spinlock, %g5
|
||||
stb %g0, [%g5]
|
||||
#else
|
||||
st %g5, [%g1]
|
||||
#endif
|
||||
wr %g3, 0x0, %psr
|
||||
nop; nop; nop
|
||||
jmpl %o7, %g0
|
||||
mov %g4, %o7
|
||||
|
||||
.globl __bitops_end
|
||||
__bitops_end:
|
|
@ -14,6 +14,10 @@
|
|||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
extern unsigned long ___set_bit(unsigned long *addr, unsigned long mask);
|
||||
extern unsigned long ___clear_bit(unsigned long *addr, unsigned long mask);
|
||||
extern unsigned long ___change_bit(unsigned long *addr, unsigned long mask);
|
||||
|
||||
/*
|
||||
* Set bit 'nr' in 32-bit quantity at address 'addr' where bit '0'
|
||||
* is in the highest of the four bytes and bit '31' is the high bit
|
||||
|
@ -22,134 +26,62 @@
|
|||
*/
|
||||
static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
register unsigned long mask asm("g2");
|
||||
register unsigned long *ADDR asm("g1");
|
||||
register int tmp1 asm("g3");
|
||||
register int tmp2 asm("g4");
|
||||
register int tmp3 asm("g5");
|
||||
register int tmp4 asm("g7");
|
||||
unsigned long *ADDR, mask;
|
||||
|
||||
ADDR = ((unsigned long *) addr) + (nr >> 5);
|
||||
mask = 1 << (nr & 31);
|
||||
|
||||
__asm__ __volatile__(
|
||||
"mov %%o7, %%g4\n\t"
|
||||
"call ___set_bit\n\t"
|
||||
" add %%o7, 8, %%o7\n"
|
||||
: "=&r" (mask), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3), "=r" (tmp4)
|
||||
: "0" (mask), "r" (ADDR)
|
||||
: "memory", "cc");
|
||||
|
||||
return mask != 0;
|
||||
return ___set_bit(ADDR, mask) != 0;
|
||||
}
|
||||
|
||||
static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
register unsigned long mask asm("g2");
|
||||
register unsigned long *ADDR asm("g1");
|
||||
register int tmp1 asm("g3");
|
||||
register int tmp2 asm("g4");
|
||||
register int tmp3 asm("g5");
|
||||
register int tmp4 asm("g7");
|
||||
unsigned long *ADDR, mask;
|
||||
|
||||
ADDR = ((unsigned long *) addr) + (nr >> 5);
|
||||
mask = 1 << (nr & 31);
|
||||
|
||||
__asm__ __volatile__(
|
||||
"mov %%o7, %%g4\n\t"
|
||||
"call ___set_bit\n\t"
|
||||
" add %%o7, 8, %%o7\n"
|
||||
: "=&r" (mask), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3), "=r" (tmp4)
|
||||
: "0" (mask), "r" (ADDR)
|
||||
: "memory", "cc");
|
||||
(void) ___set_bit(ADDR, mask);
|
||||
}
|
||||
|
||||
static inline int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
register unsigned long mask asm("g2");
|
||||
register unsigned long *ADDR asm("g1");
|
||||
register int tmp1 asm("g3");
|
||||
register int tmp2 asm("g4");
|
||||
register int tmp3 asm("g5");
|
||||
register int tmp4 asm("g7");
|
||||
unsigned long *ADDR, mask;
|
||||
|
||||
ADDR = ((unsigned long *) addr) + (nr >> 5);
|
||||
mask = 1 << (nr & 31);
|
||||
|
||||
__asm__ __volatile__(
|
||||
"mov %%o7, %%g4\n\t"
|
||||
"call ___clear_bit\n\t"
|
||||
" add %%o7, 8, %%o7\n"
|
||||
: "=&r" (mask), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3), "=r" (tmp4)
|
||||
: "0" (mask), "r" (ADDR)
|
||||
: "memory", "cc");
|
||||
|
||||
return mask != 0;
|
||||
return ___clear_bit(ADDR, mask) != 0;
|
||||
}
|
||||
|
||||
static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
register unsigned long mask asm("g2");
|
||||
register unsigned long *ADDR asm("g1");
|
||||
register int tmp1 asm("g3");
|
||||
register int tmp2 asm("g4");
|
||||
register int tmp3 asm("g5");
|
||||
register int tmp4 asm("g7");
|
||||
unsigned long *ADDR, mask;
|
||||
|
||||
ADDR = ((unsigned long *) addr) + (nr >> 5);
|
||||
mask = 1 << (nr & 31);
|
||||
|
||||
__asm__ __volatile__(
|
||||
"mov %%o7, %%g4\n\t"
|
||||
"call ___clear_bit\n\t"
|
||||
" add %%o7, 8, %%o7\n"
|
||||
: "=&r" (mask), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3), "=r" (tmp4)
|
||||
: "0" (mask), "r" (ADDR)
|
||||
: "memory", "cc");
|
||||
(void) ___clear_bit(ADDR, mask);
|
||||
}
|
||||
|
||||
static inline int test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
register unsigned long mask asm("g2");
|
||||
register unsigned long *ADDR asm("g1");
|
||||
register int tmp1 asm("g3");
|
||||
register int tmp2 asm("g4");
|
||||
register int tmp3 asm("g5");
|
||||
register int tmp4 asm("g7");
|
||||
unsigned long *ADDR, mask;
|
||||
|
||||
ADDR = ((unsigned long *) addr) + (nr >> 5);
|
||||
mask = 1 << (nr & 31);
|
||||
|
||||
__asm__ __volatile__(
|
||||
"mov %%o7, %%g4\n\t"
|
||||
"call ___change_bit\n\t"
|
||||
" add %%o7, 8, %%o7\n"
|
||||
: "=&r" (mask), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3), "=r" (tmp4)
|
||||
: "0" (mask), "r" (ADDR)
|
||||
: "memory", "cc");
|
||||
|
||||
return mask != 0;
|
||||
return ___change_bit(ADDR, mask) != 0;
|
||||
}
|
||||
|
||||
static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
{
|
||||
register unsigned long mask asm("g2");
|
||||
register unsigned long *ADDR asm("g1");
|
||||
register int tmp1 asm("g3");
|
||||
register int tmp2 asm("g4");
|
||||
register int tmp3 asm("g5");
|
||||
register int tmp4 asm("g7");
|
||||
unsigned long *ADDR, mask;
|
||||
|
||||
ADDR = ((unsigned long *) addr) + (nr >> 5);
|
||||
mask = 1 << (nr & 31);
|
||||
|
||||
__asm__ __volatile__(
|
||||
"mov %%o7, %%g4\n\t"
|
||||
"call ___change_bit\n\t"
|
||||
" add %%o7, 8, %%o7\n"
|
||||
: "=&r" (mask), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3), "=r" (tmp4)
|
||||
: "0" (mask), "r" (ADDR)
|
||||
: "memory", "cc");
|
||||
(void) ___change_bit(ADDR, mask);
|
||||
}
|
||||
|
||||
#include <asm-generic/bitops/non-atomic.h>
|
||||
|
|
Loading…
Reference in New Issue