[PATCH] clean up and remove some extra spinlocks from rtmutex
Oleg brought up some interesting points about grabbing the pi_lock for some protections. In this discussion, I realized that there are some places that the pi_lock is being grabbed when it really wasn't necessary. Also this patch does a little bit of clean up. This patch basically does three things: 1) renames the "boost" variable to "chain_walk". Since it is used in the debugging case when it isn't going to be boosted. It better describes what the test is going to do if it succeeds. 2) moves get_task_struct to just before the unlocking of the wait_lock. This removes duplicate code, and makes it a little easier to read. The owner wont go away while either the pi_lock or the wait_lock are held. 3) removes the pi_locking and owner blocked checking completely from the debugging case. This is because the grabbing the lock and doing the check, then releasing the lock is just so full of races. It's just as good to go ahead and call the pi_chain_walk function, since after releasing the lock the owner can then block anyway, and we would have missed that. For the debug case, we really do want to do the chain walk to test for deadlocks anyway. [oleg@tv-sign.ru: more of the same] Signed-of-by: Steven Rostedt <rostedt@goodmis.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Oleg Nesterov <oleg@tv-sign.ru> Cc: Esben Nielsen <nielsen.esben@googlemail.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
c24c95a085
commit
db630637b2
|
@ -251,6 +251,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
|
|||
|
||||
/* Grab the next task */
|
||||
task = rt_mutex_owner(lock);
|
||||
get_task_struct(task);
|
||||
spin_lock_irqsave(&task->pi_lock, flags);
|
||||
|
||||
if (waiter == rt_mutex_top_waiter(lock)) {
|
||||
|
@ -269,7 +270,6 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
|
|||
__rt_mutex_adjust_prio(task);
|
||||
}
|
||||
|
||||
get_task_struct(task);
|
||||
spin_unlock_irqrestore(&task->pi_lock, flags);
|
||||
|
||||
top_waiter = rt_mutex_top_waiter(lock);
|
||||
|
@ -409,7 +409,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
|
|||
struct task_struct *owner = rt_mutex_owner(lock);
|
||||
struct rt_mutex_waiter *top_waiter = waiter;
|
||||
unsigned long flags;
|
||||
int boost = 0, res;
|
||||
int chain_walk = 0, res;
|
||||
|
||||
spin_lock_irqsave(¤t->pi_lock, flags);
|
||||
__rt_mutex_adjust_prio(current);
|
||||
|
@ -433,25 +433,23 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
|
|||
plist_add(&waiter->pi_list_entry, &owner->pi_waiters);
|
||||
|
||||
__rt_mutex_adjust_prio(owner);
|
||||
if (owner->pi_blocked_on) {
|
||||
boost = 1;
|
||||
/* gets dropped in rt_mutex_adjust_prio_chain()! */
|
||||
get_task_struct(owner);
|
||||
}
|
||||
if (owner->pi_blocked_on)
|
||||
chain_walk = 1;
|
||||
spin_unlock_irqrestore(&owner->pi_lock, flags);
|
||||
}
|
||||
else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) {
|
||||
spin_lock_irqsave(&owner->pi_lock, flags);
|
||||
if (owner->pi_blocked_on) {
|
||||
boost = 1;
|
||||
/* gets dropped in rt_mutex_adjust_prio_chain()! */
|
||||
get_task_struct(owner);
|
||||
}
|
||||
spin_unlock_irqrestore(&owner->pi_lock, flags);
|
||||
}
|
||||
if (!boost)
|
||||
else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock))
|
||||
chain_walk = 1;
|
||||
|
||||
if (!chain_walk)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* The owner can't disappear while holding a lock,
|
||||
* so the owner struct is protected by wait_lock.
|
||||
* Gets dropped in rt_mutex_adjust_prio_chain()!
|
||||
*/
|
||||
get_task_struct(owner);
|
||||
|
||||
spin_unlock(&lock->wait_lock);
|
||||
|
||||
res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
|
||||
|
@ -532,7 +530,7 @@ static void remove_waiter(struct rt_mutex *lock,
|
|||
int first = (waiter == rt_mutex_top_waiter(lock));
|
||||
struct task_struct *owner = rt_mutex_owner(lock);
|
||||
unsigned long flags;
|
||||
int boost = 0;
|
||||
int chain_walk = 0;
|
||||
|
||||
spin_lock_irqsave(¤t->pi_lock, flags);
|
||||
plist_del(&waiter->list_entry, &lock->wait_list);
|
||||
|
@ -554,19 +552,20 @@ static void remove_waiter(struct rt_mutex *lock,
|
|||
}
|
||||
__rt_mutex_adjust_prio(owner);
|
||||
|
||||
if (owner->pi_blocked_on) {
|
||||
boost = 1;
|
||||
/* gets dropped in rt_mutex_adjust_prio_chain()! */
|
||||
get_task_struct(owner);
|
||||
}
|
||||
if (owner->pi_blocked_on)
|
||||
chain_walk = 1;
|
||||
|
||||
spin_unlock_irqrestore(&owner->pi_lock, flags);
|
||||
}
|
||||
|
||||
WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
|
||||
|
||||
if (!boost)
|
||||
if (!chain_walk)
|
||||
return;
|
||||
|
||||
/* gets dropped in rt_mutex_adjust_prio_chain()! */
|
||||
get_task_struct(owner);
|
||||
|
||||
spin_unlock(&lock->wait_lock);
|
||||
|
||||
rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current);
|
||||
|
@ -592,10 +591,10 @@ void rt_mutex_adjust_pi(struct task_struct *task)
|
|||
return;
|
||||
}
|
||||
|
||||
/* gets dropped in rt_mutex_adjust_prio_chain()! */
|
||||
get_task_struct(task);
|
||||
spin_unlock_irqrestore(&task->pi_lock, flags);
|
||||
|
||||
/* gets dropped in rt_mutex_adjust_prio_chain()! */
|
||||
get_task_struct(task);
|
||||
rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue