locking/rwsem, sh: Drop superfluous arch specific implementation
Since "locking, rwsem: drop explicit memory barriers" the arch specific code is basically same as the the generic one so we can drop the superfluous code. Suggested-by: Davidlohr Bueso <dave@stgolabs.net> Signed-off-by: Michal Hocko <mhocko@suse.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Chris Zankel <chris@zankel.net> Cc: David S. Miller <davem@davemloft.net> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Cc: Signed-off-by: Jason Low <jason.low2@hp.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: linux-alpha@vger.kernel.org Cc: linux-arch@vger.kernel.org Cc: linux-ia64@vger.kernel.org Cc: linux-s390@vger.kernel.org Cc: linux-sh@vger.kernel.org Cc: linux-xtensa@linux-xtensa.org Cc: sparclinux@vger.kernel.org Link: http://lkml.kernel.org/r/1460041951-22347-5-git-send-email-mhocko@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
3aa2591dc2
commit
e4a2b01ed3
|
@ -26,6 +26,7 @@ generic-y += percpu.h
|
|||
generic-y += poll.h
|
||||
generic-y += preempt.h
|
||||
generic-y += resource.h
|
||||
generic-y += rwsem.h
|
||||
generic-y += sembuf.h
|
||||
generic-y += serial.h
|
||||
generic-y += shmbuf.h
|
||||
|
|
|
@ -1,117 +0,0 @@
|
|||
/*
|
||||
* include/asm-sh/rwsem.h: R/W semaphores for SH using the stuff
|
||||
* in lib/rwsem.c.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_SH_RWSEM_H
|
||||
#define _ASM_SH_RWSEM_H
|
||||
|
||||
#ifndef _LINUX_RWSEM_H
|
||||
#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
|
||||
#endif
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#define RWSEM_UNLOCKED_VALUE 0x00000000
|
||||
#define RWSEM_ACTIVE_BIAS 0x00000001
|
||||
#define RWSEM_ACTIVE_MASK 0x0000ffff
|
||||
#define RWSEM_WAITING_BIAS (-0x00010000)
|
||||
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
|
||||
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
|
||||
|
||||
/*
|
||||
* lock for reading
|
||||
*/
|
||||
static inline void __down_read(struct rw_semaphore *sem)
|
||||
{
|
||||
if (atomic_inc_return((atomic_t *)(&sem->count)) <= 0)
|
||||
rwsem_down_read_failed(sem);
|
||||
}
|
||||
|
||||
static inline int __down_read_trylock(struct rw_semaphore *sem)
|
||||
{
|
||||
int tmp;
|
||||
|
||||
while ((tmp = sem->count) >= 0) {
|
||||
if (tmp == cmpxchg(&sem->count, tmp,
|
||||
tmp + RWSEM_ACTIVE_READ_BIAS)) {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* lock for writing
|
||||
*/
|
||||
static inline void __down_write(struct rw_semaphore *sem)
|
||||
{
|
||||
int tmp;
|
||||
|
||||
tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
|
||||
(atomic_t *)(&sem->count));
|
||||
if (tmp != RWSEM_ACTIVE_WRITE_BIAS)
|
||||
rwsem_down_write_failed(sem);
|
||||
}
|
||||
|
||||
static inline int __down_write_trylock(struct rw_semaphore *sem)
|
||||
{
|
||||
int tmp;
|
||||
|
||||
tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
|
||||
RWSEM_ACTIVE_WRITE_BIAS);
|
||||
return tmp == RWSEM_UNLOCKED_VALUE;
|
||||
}
|
||||
|
||||
/*
|
||||
* unlock after reading
|
||||
*/
|
||||
static inline void __up_read(struct rw_semaphore *sem)
|
||||
{
|
||||
int tmp;
|
||||
|
||||
tmp = atomic_dec_return((atomic_t *)(&sem->count));
|
||||
if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)
|
||||
rwsem_wake(sem);
|
||||
}
|
||||
|
||||
/*
|
||||
* unlock after writing
|
||||
*/
|
||||
static inline void __up_write(struct rw_semaphore *sem)
|
||||
{
|
||||
if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
|
||||
(atomic_t *)(&sem->count)) < 0)
|
||||
rwsem_wake(sem);
|
||||
}
|
||||
|
||||
/*
|
||||
* implement atomic add functionality
|
||||
*/
|
||||
static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
|
||||
{
|
||||
atomic_add(delta, (atomic_t *)(&sem->count));
|
||||
}
|
||||
|
||||
/*
|
||||
* downgrade write lock to read lock
|
||||
*/
|
||||
static inline void __downgrade_write(struct rw_semaphore *sem)
|
||||
{
|
||||
int tmp;
|
||||
|
||||
tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
|
||||
if (tmp < 0)
|
||||
rwsem_downgrade_wake(sem);
|
||||
}
|
||||
|
||||
/*
|
||||
* implement exchange and add functionality
|
||||
*/
|
||||
static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
|
||||
{
|
||||
return atomic_add_return(delta, (atomic_t *)(&sem->count));
|
||||
}
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _ASM_SH_RWSEM_H */
|
Loading…
Reference in New Issue