rtmutex: Clean up
Previous patches changed the meaning of the return value of rt_mutex_slowunlock(); update comments and code to reflect this. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: juri.lelli@arm.com Cc: bigeasy@linutronix.de Cc: xlpang@redhat.com Cc: rostedt@goodmis.org Cc: mathieu.desnoyers@efficios.com Cc: jdesfossez@efficios.com Cc: bristot@redhat.com Link: http://lkml.kernel.org/r/20170323150216.255058238@infradead.org Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
85e2d4f992
commit
aa2bfe5536
|
@ -1394,7 +1394,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_
|
||||||
{
|
{
|
||||||
u32 uninitialized_var(curval), newval;
|
u32 uninitialized_var(curval), newval;
|
||||||
struct task_struct *new_owner;
|
struct task_struct *new_owner;
|
||||||
bool deboost = false;
|
bool postunlock = false;
|
||||||
DEFINE_WAKE_Q(wake_q);
|
DEFINE_WAKE_Q(wake_q);
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
|
@ -1455,12 +1455,13 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_
|
||||||
/*
|
/*
|
||||||
* We've updated the uservalue, this unlock cannot fail.
|
* We've updated the uservalue, this unlock cannot fail.
|
||||||
*/
|
*/
|
||||||
deboost = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
|
postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
|
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
|
||||||
|
|
||||||
rt_mutex_postunlock(&wake_q, deboost);
|
if (postunlock)
|
||||||
|
rt_mutex_postunlock(&wake_q);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1330,7 +1330,8 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Slow path to release a rt-mutex.
|
* Slow path to release a rt-mutex.
|
||||||
* Return whether the current task needs to undo a potential priority boosting.
|
*
|
||||||
|
* Return whether the current task needs to call rt_mutex_postunlock().
|
||||||
*/
|
*/
|
||||||
static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
|
static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
|
||||||
struct wake_q_head *wake_q)
|
struct wake_q_head *wake_q)
|
||||||
|
@ -1401,8 +1402,7 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
|
||||||
|
|
||||||
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
|
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
|
||||||
|
|
||||||
/* check PI boosting */
|
return true; /* call rt_mutex_postunlock() */
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1449,15 +1449,14 @@ rt_mutex_fasttrylock(struct rt_mutex *lock,
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Undo pi boosting (if necessary) and wake top waiter.
|
* Performs the wakeup of the the top-waiter and re-enables preemption.
|
||||||
*/
|
*/
|
||||||
void rt_mutex_postunlock(struct wake_q_head *wake_q, bool deboost)
|
void rt_mutex_postunlock(struct wake_q_head *wake_q)
|
||||||
{
|
{
|
||||||
wake_up_q(wake_q);
|
wake_up_q(wake_q);
|
||||||
|
|
||||||
/* Pairs with preempt_disable() in rt_mutex_slowunlock() */
|
/* Pairs with preempt_disable() in rt_mutex_slowunlock() */
|
||||||
if (deboost)
|
preempt_enable();
|
||||||
preempt_enable();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
|
@ -1466,14 +1465,12 @@ rt_mutex_fastunlock(struct rt_mutex *lock,
|
||||||
struct wake_q_head *wqh))
|
struct wake_q_head *wqh))
|
||||||
{
|
{
|
||||||
DEFINE_WAKE_Q(wake_q);
|
DEFINE_WAKE_Q(wake_q);
|
||||||
bool deboost;
|
|
||||||
|
|
||||||
if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
|
if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
deboost = slowfn(lock, &wake_q);
|
if (slowfn(lock, &wake_q))
|
||||||
|
rt_mutex_postunlock(&wake_q);
|
||||||
rt_mutex_postunlock(&wake_q, deboost);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1593,19 +1590,20 @@ bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
|
||||||
*/
|
*/
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
|
|
||||||
return true; /* deboost and wakeups */
|
return true; /* call postunlock() */
|
||||||
}
|
}
|
||||||
|
|
||||||
void __sched rt_mutex_futex_unlock(struct rt_mutex *lock)
|
void __sched rt_mutex_futex_unlock(struct rt_mutex *lock)
|
||||||
{
|
{
|
||||||
DEFINE_WAKE_Q(wake_q);
|
DEFINE_WAKE_Q(wake_q);
|
||||||
bool deboost;
|
bool postunlock;
|
||||||
|
|
||||||
raw_spin_lock_irq(&lock->wait_lock);
|
raw_spin_lock_irq(&lock->wait_lock);
|
||||||
deboost = __rt_mutex_futex_unlock(lock, &wake_q);
|
postunlock = __rt_mutex_futex_unlock(lock, &wake_q);
|
||||||
raw_spin_unlock_irq(&lock->wait_lock);
|
raw_spin_unlock_irq(&lock->wait_lock);
|
||||||
|
|
||||||
rt_mutex_postunlock(&wake_q, deboost);
|
if (postunlock)
|
||||||
|
rt_mutex_postunlock(&wake_q);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -122,7 +122,7 @@ extern void rt_mutex_futex_unlock(struct rt_mutex *lock);
|
||||||
extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock,
|
extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock,
|
||||||
struct wake_q_head *wqh);
|
struct wake_q_head *wqh);
|
||||||
|
|
||||||
extern void rt_mutex_postunlock(struct wake_q_head *wake_q, bool deboost);
|
extern void rt_mutex_postunlock(struct wake_q_head *wake_q);
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_RT_MUTEXES
|
#ifdef CONFIG_DEBUG_RT_MUTEXES
|
||||||
# include "rtmutex-debug.h"
|
# include "rtmutex-debug.h"
|
||||||
|
|
Loading…
Reference in New Issue