memcg: avoid deadlock caused by race between oom and cpuset_attach
mpol_rebind_mm(), which can be called from cpuset_attach(), does down_write(mm->mmap_sem). This means down_write(mm->mmap_sem) can be called under cgroup_mutex. OTOH, page fault path does down_read(mm->mmap_sem) and calls mem_cgroup_try_charge_xxx(), which may eventually calls mem_cgroup_out_of_memory(). And mem_cgroup_out_of_memory() calls cgroup_lock(). This means cgroup_lock() can be called under down_read(mm->mmap_sem). If those two paths race, deadlock can happen. This patch avoid this deadlock by: - remove cgroup_lock() from mem_cgroup_out_of_memory(). - define new mutex (memcg_tasklist) and serialize mem_cgroup_move_task() (->attach handler of memory cgroup) and mem_cgroup_out_of_memory. Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Balbir Singh <balbir@linux.vnet.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
a5e924f5f8
commit
7f4d454dee
|
@ -51,6 +51,7 @@ static int really_do_swap_account __initdata = 1; /* for remember boot option*/
|
||||||
#define do_swap_account (0)
|
#define do_swap_account (0)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
static DEFINE_MUTEX(memcg_tasklist); /* can be hold under cgroup_mutex */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Statistics for memory cgroup.
|
* Statistics for memory cgroup.
|
||||||
|
@ -827,7 +828,9 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
|
||||||
|
|
||||||
if (!nr_retries--) {
|
if (!nr_retries--) {
|
||||||
if (oom) {
|
if (oom) {
|
||||||
|
mutex_lock(&memcg_tasklist);
|
||||||
mem_cgroup_out_of_memory(mem_over_limit, gfp_mask);
|
mem_cgroup_out_of_memory(mem_over_limit, gfp_mask);
|
||||||
|
mutex_unlock(&memcg_tasklist);
|
||||||
mem_over_limit->last_oom_jiffies = jiffies;
|
mem_over_limit->last_oom_jiffies = jiffies;
|
||||||
}
|
}
|
||||||
goto nomem;
|
goto nomem;
|
||||||
|
@ -2211,10 +2214,12 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
|
||||||
struct cgroup *old_cont,
|
struct cgroup *old_cont,
|
||||||
struct task_struct *p)
|
struct task_struct *p)
|
||||||
{
|
{
|
||||||
|
mutex_lock(&memcg_tasklist);
|
||||||
/*
|
/*
|
||||||
* FIXME: It's better to move charges of this process from old
|
* FIXME: It's better to move charges of this process from old
|
||||||
* memcg to new memcg. But it's just on TODO-List now.
|
* memcg to new memcg. But it's just on TODO-List now.
|
||||||
*/
|
*/
|
||||||
|
mutex_unlock(&memcg_tasklist);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct cgroup_subsys mem_cgroup_subsys = {
|
struct cgroup_subsys mem_cgroup_subsys = {
|
||||||
|
|
|
@ -429,7 +429,6 @@ void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask)
|
||||||
unsigned long points = 0;
|
unsigned long points = 0;
|
||||||
struct task_struct *p;
|
struct task_struct *p;
|
||||||
|
|
||||||
cgroup_lock();
|
|
||||||
read_lock(&tasklist_lock);
|
read_lock(&tasklist_lock);
|
||||||
retry:
|
retry:
|
||||||
p = select_bad_process(&points, mem);
|
p = select_bad_process(&points, mem);
|
||||||
|
@ -444,7 +443,6 @@ retry:
|
||||||
goto retry;
|
goto retry;
|
||||||
out:
|
out:
|
||||||
read_unlock(&tasklist_lock);
|
read_unlock(&tasklist_lock);
|
||||||
cgroup_unlock();
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue