locking/rwsem: Provide down_write_killable()
Now that all the architectures implement the necessary glue code we can introduce down_write_killable(). The only difference wrt. regular down_write() is that the slow path waits in TASK_KILLABLE state and the interruption by the fatal signal is reported as -EINTR to the caller. Signed-off-by: Michal Hocko <mhocko@suse.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Chris Zankel <chris@zankel.net> Cc: David S. Miller <davem@davemloft.net> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Cc: Signed-off-by: Jason Low <jason.low2@hp.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: linux-alpha@vger.kernel.org Cc: linux-arch@vger.kernel.org Cc: linux-ia64@vger.kernel.org Cc: linux-s390@vger.kernel.org Cc: linux-sh@vger.kernel.org Cc: linux-xtensa@linux-xtensa.org Cc: sparclinux@vger.kernel.org Link: http://lkml.kernel.org/r/1460041951-22347-12-git-send-email-mhocko@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
664b4e24c6
commit
916633a403
|
@ -102,9 +102,9 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
|
|||
#define ____down_write(sem, slow_path) \
|
||||
({ \
|
||||
long tmp; \
|
||||
struct rw_semaphore* ret = sem; \
|
||||
struct rw_semaphore* ret; \
|
||||
asm volatile("# beginning down_write\n\t" \
|
||||
LOCK_PREFIX " xadd %1,(%2)\n\t" \
|
||||
LOCK_PREFIX " xadd %1,(%3)\n\t" \
|
||||
/* adds 0xffff0001, returns the old value */ \
|
||||
" test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" \
|
||||
/* was the active mask 0 before? */\
|
||||
|
@ -112,7 +112,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
|
|||
" call " slow_path "\n" \
|
||||
"1:\n" \
|
||||
"# ending down_write" \
|
||||
: "+m" (sem->count), "=d" (tmp), "+a" (ret) \
|
||||
: "+m" (sem->count), "=d" (tmp), "=a" (ret) \
|
||||
: "a" (sem), "1" (RWSEM_ACTIVE_WRITE_BIAS) \
|
||||
: "memory", "cc"); \
|
||||
ret; \
|
||||
|
|
|
@ -444,6 +444,18 @@ do { \
|
|||
lock_acquired(&(_lock)->dep_map, _RET_IP_); \
|
||||
} while (0)
|
||||
|
||||
#define LOCK_CONTENDED_RETURN(_lock, try, lock) \
|
||||
({ \
|
||||
int ____err = 0; \
|
||||
if (!try(_lock)) { \
|
||||
lock_contended(&(_lock)->dep_map, _RET_IP_); \
|
||||
____err = lock(_lock); \
|
||||
} \
|
||||
if (!____err) \
|
||||
lock_acquired(&(_lock)->dep_map, _RET_IP_); \
|
||||
____err; \
|
||||
})
|
||||
|
||||
#else /* CONFIG_LOCK_STAT */
|
||||
|
||||
#define lock_contended(lockdep_map, ip) do {} while (0)
|
||||
|
@ -452,6 +464,9 @@ do { \
|
|||
#define LOCK_CONTENDED(_lock, try, lock) \
|
||||
lock(_lock)
|
||||
|
||||
#define LOCK_CONTENDED_RETURN(_lock, try, lock) \
|
||||
lock(_lock)
|
||||
|
||||
#endif /* CONFIG_LOCK_STAT */
|
||||
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
|
|
|
@ -118,6 +118,7 @@ extern int down_read_trylock(struct rw_semaphore *sem);
|
|||
* lock for writing
|
||||
*/
|
||||
extern void down_write(struct rw_semaphore *sem);
|
||||
extern int __must_check down_write_killable(struct rw_semaphore *sem);
|
||||
|
||||
/*
|
||||
* trylock for writing -- returns 1 if successful, 0 if contention
|
||||
|
|
|
@ -54,6 +54,25 @@ void __sched down_write(struct rw_semaphore *sem)
|
|||
|
||||
EXPORT_SYMBOL(down_write);
|
||||
|
||||
/*
|
||||
* lock for writing
|
||||
*/
|
||||
int __sched down_write_killable(struct rw_semaphore *sem)
|
||||
{
|
||||
might_sleep();
|
||||
rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
|
||||
|
||||
if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock, __down_write_killable)) {
|
||||
rwsem_release(&sem->dep_map, 1, _RET_IP_);
|
||||
return -EINTR;
|
||||
}
|
||||
|
||||
rwsem_set_owner(sem);
|
||||
return 0;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(down_write_killable);
|
||||
|
||||
/*
|
||||
* trylock for writing -- returns 1 if successful, 0 if contention
|
||||
*/
|
||||
|
|
Loading…
Reference in New Issue