locking/rwsem: Always try to wake waiters in out_nolock path
For writers, the out_nolock path will always attempt to wake up waiters. This may not be really necessary if the waiter to be removed is not the first one. For readers, no attempt to wake up waiter is being made. However, if the HANDOFF bit is set and the reader to be removed is the first waiter, the waiter behind it will inherit the HANDOFF bit and for a write lock waiter waking it up will allow it to spin on the lock to acquire it faster. So it can be beneficial to do a wakeup in this case. Add a new rwsem_del_wake_waiter() helper function to do that consistently for both reader and writer out_nolock paths. Signed-off-by: Waiman Long <longman@redhat.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20220322152059.2182333-4-longman@redhat.com
This commit is contained in:
parent
54c1ee4d61
commit
1ee326196c
|
@ -375,16 +375,19 @@ rwsem_add_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter)
|
|||
*
|
||||
* Both rwsem_mark_wake() and rwsem_try_write_lock() contain a full 'copy' of
|
||||
* this function. Modify with care.
|
||||
*
|
||||
* Return: true if wait_list isn't empty and false otherwise
|
||||
*/
|
||||
static inline void
|
||||
static inline bool
|
||||
rwsem_del_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter)
|
||||
{
|
||||
lockdep_assert_held(&sem->wait_lock);
|
||||
list_del(&waiter->list);
|
||||
if (likely(!list_empty(&sem->wait_list)))
|
||||
return;
|
||||
return true;
|
||||
|
||||
atomic_long_andnot(RWSEM_FLAG_HANDOFF | RWSEM_FLAG_WAITERS, &sem->count);
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -558,6 +561,33 @@ static void rwsem_mark_wake(struct rw_semaphore *sem,
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove a waiter and try to wake up other waiters in the wait queue
|
||||
* This function is called from the out_nolock path of both the reader and
|
||||
* writer slowpaths with wait_lock held. It releases the wait_lock and
|
||||
* optionally wake up waiters before it returns.
|
||||
*/
|
||||
static inline void
|
||||
rwsem_del_wake_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter,
|
||||
struct wake_q_head *wake_q)
|
||||
__releases(&sem->wait_lock)
|
||||
{
|
||||
bool first = rwsem_first_waiter(sem) == waiter;
|
||||
|
||||
wake_q_init(wake_q);
|
||||
|
||||
/*
|
||||
* If the wait_list isn't empty and the waiter to be deleted is
|
||||
* the first waiter, we wake up the remaining waiters as they may
|
||||
* be eligible to acquire or spin on the lock.
|
||||
*/
|
||||
if (rwsem_del_waiter(sem, waiter) && first)
|
||||
rwsem_mark_wake(sem, RWSEM_WAKE_ANY, wake_q);
|
||||
raw_spin_unlock_irq(&sem->wait_lock);
|
||||
if (!wake_q_empty(wake_q))
|
||||
wake_up_q(wake_q);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function must be called with the sem->wait_lock held to prevent
|
||||
* race conditions between checking the rwsem wait list and setting the
|
||||
|
@ -1050,8 +1080,7 @@ queue:
|
|||
return sem;
|
||||
|
||||
out_nolock:
|
||||
rwsem_del_waiter(sem, &waiter);
|
||||
raw_spin_unlock_irq(&sem->wait_lock);
|
||||
rwsem_del_wake_waiter(sem, &waiter, &wake_q);
|
||||
__set_current_state(TASK_RUNNING);
|
||||
lockevent_inc(rwsem_rlock_fail);
|
||||
return ERR_PTR(-EINTR);
|
||||
|
@ -1095,7 +1124,6 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
|
|||
*/
|
||||
raw_spin_unlock_irq(&sem->wait_lock);
|
||||
wake_up_q(&wake_q);
|
||||
wake_q_init(&wake_q); /* Used again, reinit */
|
||||
raw_spin_lock_irq(&sem->wait_lock);
|
||||
}
|
||||
} else {
|
||||
|
@ -1148,11 +1176,7 @@ trylock_again:
|
|||
out_nolock:
|
||||
__set_current_state(TASK_RUNNING);
|
||||
raw_spin_lock_irq(&sem->wait_lock);
|
||||
rwsem_del_waiter(sem, &waiter);
|
||||
if (!list_empty(&sem->wait_list))
|
||||
rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
|
||||
raw_spin_unlock_irq(&sem->wait_lock);
|
||||
wake_up_q(&wake_q);
|
||||
rwsem_del_wake_waiter(sem, &waiter, &wake_q);
|
||||
lockevent_inc(rwsem_wlock_fail);
|
||||
return ERR_PTR(-EINTR);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue