[PATCH] Cpuset: fix ABBA deadlock with cpu hotplug lock
Fix ABBA deadlock between lock_cpu_hotplug() and the cpuset callback_mutex lock. It only happens on cpu_exclusive cpusets, due to the dynamic sched domain code trying to take the cpu hotplug lock inside the cpuset callback_mutex lock. This bug has apparently been here for several months, but didn't get hit until the right customer load on a large system. This fix appears right from inspection, but it will take a few more days running it on that customers workload to be confident we nailed it. We don't have any other reproducible test case. The cpu_hotplug_lock() tends to cover large runs of code. The other places that hold both that lock and the cpuset callback mutex lock always nest the cpuset lock inside the hotplug lock. This place tries to do the reverse, risking an ABBA deadlock. This is in the cpuset_rmdir() code, where we: * take the callback_mutex lock * mark the cpuset CS_REMOVED * call update_cpu_domains for cpu_exclusive cpusets * in that call, take the cpu_hotplug lock if the cpuset is marked for removal. Thanks to Jack Steiner for identifying this deadlock. The fix is to tear down the dynamic sched domain before we grab the cpuset callback_mutex lock. This way, the two locks are serialized, with the hotplug lock taken and released before trying for the cpuset lock. I suspect that this bug was introduced when I changed the cpuset locking from one lock to two. The dynamic sched domain dependency on cpu_exclusive cpusets and its hotplug hooks were added to this code earlier, when cpusets had only a single lock. It may well have been fine then. Signed-off-by: Paul Jackson <pj@sgi.com> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
aa95387774
commit
abb5a5cc6b
|
@ -762,6 +762,8 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
|
||||||
*
|
*
|
||||||
* Call with manage_mutex held. May nest a call to the
|
* Call with manage_mutex held. May nest a call to the
|
||||||
* lock_cpu_hotplug()/unlock_cpu_hotplug() pair.
|
* lock_cpu_hotplug()/unlock_cpu_hotplug() pair.
|
||||||
|
* Must not be called holding callback_mutex, because we must
|
||||||
|
* not call lock_cpu_hotplug() while holding callback_mutex.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static void update_cpu_domains(struct cpuset *cur)
|
static void update_cpu_domains(struct cpuset *cur)
|
||||||
|
@ -781,7 +783,7 @@ static void update_cpu_domains(struct cpuset *cur)
|
||||||
if (is_cpu_exclusive(c))
|
if (is_cpu_exclusive(c))
|
||||||
cpus_andnot(pspan, pspan, c->cpus_allowed);
|
cpus_andnot(pspan, pspan, c->cpus_allowed);
|
||||||
}
|
}
|
||||||
if (is_removed(cur) || !is_cpu_exclusive(cur)) {
|
if (!is_cpu_exclusive(cur)) {
|
||||||
cpus_or(pspan, pspan, cur->cpus_allowed);
|
cpus_or(pspan, pspan, cur->cpus_allowed);
|
||||||
if (cpus_equal(pspan, cur->cpus_allowed))
|
if (cpus_equal(pspan, cur->cpus_allowed))
|
||||||
return;
|
return;
|
||||||
|
@ -1917,6 +1919,17 @@ static int cpuset_mkdir(struct inode *dir, struct dentry *dentry, int mode)
|
||||||
return cpuset_create(c_parent, dentry->d_name.name, mode | S_IFDIR);
|
return cpuset_create(c_parent, dentry->d_name.name, mode | S_IFDIR);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Locking note on the strange update_flag() call below:
|
||||||
|
*
|
||||||
|
* If the cpuset being removed is marked cpu_exclusive, then simulate
|
||||||
|
* turning cpu_exclusive off, which will call update_cpu_domains().
|
||||||
|
* The lock_cpu_hotplug() call in update_cpu_domains() must not be
|
||||||
|
* made while holding callback_mutex. Elsewhere the kernel nests
|
||||||
|
* callback_mutex inside lock_cpu_hotplug() calls. So the reverse
|
||||||
|
* nesting would risk an ABBA deadlock.
|
||||||
|
*/
|
||||||
|
|
||||||
static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry)
|
static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry)
|
||||||
{
|
{
|
||||||
struct cpuset *cs = dentry->d_fsdata;
|
struct cpuset *cs = dentry->d_fsdata;
|
||||||
|
@ -1936,11 +1949,16 @@ static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry)
|
||||||
mutex_unlock(&manage_mutex);
|
mutex_unlock(&manage_mutex);
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
|
if (is_cpu_exclusive(cs)) {
|
||||||
|
int retval = update_flag(CS_CPU_EXCLUSIVE, cs, "0");
|
||||||
|
if (retval < 0) {
|
||||||
|
mutex_unlock(&manage_mutex);
|
||||||
|
return retval;
|
||||||
|
}
|
||||||
|
}
|
||||||
parent = cs->parent;
|
parent = cs->parent;
|
||||||
mutex_lock(&callback_mutex);
|
mutex_lock(&callback_mutex);
|
||||||
set_bit(CS_REMOVED, &cs->flags);
|
set_bit(CS_REMOVED, &cs->flags);
|
||||||
if (is_cpu_exclusive(cs))
|
|
||||||
update_cpu_domains(cs);
|
|
||||||
list_del(&cs->sibling); /* delete my sibling from parent->children */
|
list_del(&cs->sibling); /* delete my sibling from parent->children */
|
||||||
spin_lock(&cs->dentry->d_lock);
|
spin_lock(&cs->dentry->d_lock);
|
||||||
d = dget(cs->dentry);
|
d = dget(cs->dentry);
|
||||||
|
|
Loading…
Reference in New Issue