rtmutex: Wake up the waiters lockless while dropping the read lock.
The rw_semaphore and rwlock_t implementation both wake the waiter while holding the rt_mutex_base::wait_lock acquired. This can be optimized by waking the waiter lockless outside of the locked section to avoid a needless contention on the rt_mutex_base::wait_lock lock. Extend rt_mutex_wake_q_add() to also accept task and state and use it in __rwbase_read_unlock(). Suggested-by: Davidlohr Bueso <dave@stgolabs.net> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20210928150006.597310-3-bigeasy@linutronix.de
This commit is contained in:
parent
8fe46535e1
commit
9321f8152d
|
@ -446,17 +446,24 @@ static __always_inline void rt_mutex_adjust_prio(struct task_struct *p)
|
|||
}
|
||||
|
||||
/* RT mutex specific wake_q wrappers */
|
||||
static __always_inline void rt_mutex_wake_q_add_task(struct rt_wake_q_head *wqh,
|
||||
struct task_struct *task,
|
||||
unsigned int wake_state)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_PREEMPT_RT) && wake_state == TASK_RTLOCK_WAIT) {
|
||||
if (IS_ENABLED(CONFIG_PROVE_LOCKING))
|
||||
WARN_ON_ONCE(wqh->rtlock_task);
|
||||
get_task_struct(task);
|
||||
wqh->rtlock_task = task;
|
||||
} else {
|
||||
wake_q_add(&wqh->head, task);
|
||||
}
|
||||
}
|
||||
|
||||
static __always_inline void rt_mutex_wake_q_add(struct rt_wake_q_head *wqh,
|
||||
struct rt_mutex_waiter *w)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_PREEMPT_RT) && w->wake_state == TASK_RTLOCK_WAIT) {
|
||||
if (IS_ENABLED(CONFIG_PROVE_LOCKING))
|
||||
WARN_ON_ONCE(wqh->rtlock_task);
|
||||
get_task_struct(w->task);
|
||||
wqh->rtlock_task = w->task;
|
||||
} else {
|
||||
wake_q_add(&wqh->head, w->task);
|
||||
}
|
||||
rt_mutex_wake_q_add_task(wqh, w->task, w->wake_state);
|
||||
}
|
||||
|
||||
static __always_inline void rt_mutex_wake_up_q(struct rt_wake_q_head *wqh)
|
||||
|
|
|
@ -141,6 +141,7 @@ static void __sched __rwbase_read_unlock(struct rwbase_rt *rwb,
|
|||
{
|
||||
struct rt_mutex_base *rtm = &rwb->rtmutex;
|
||||
struct task_struct *owner;
|
||||
DEFINE_RT_WAKE_Q(wqh);
|
||||
|
||||
raw_spin_lock_irq(&rtm->wait_lock);
|
||||
/*
|
||||
|
@ -151,9 +152,12 @@ static void __sched __rwbase_read_unlock(struct rwbase_rt *rwb,
|
|||
*/
|
||||
owner = rt_mutex_owner(rtm);
|
||||
if (owner)
|
||||
wake_up_state(owner, state);
|
||||
rt_mutex_wake_q_add_task(&wqh, owner, state);
|
||||
|
||||
/* Pairs with the preempt_enable in rt_mutex_wake_up_q() */
|
||||
preempt_disable();
|
||||
raw_spin_unlock_irq(&rtm->wait_lock);
|
||||
rt_mutex_wake_up_q(&wqh);
|
||||
}
|
||||
|
||||
static __always_inline void rwbase_read_unlock(struct rwbase_rt *rwb,
|
||||
|
|
Loading…
Reference in New Issue