mm, oom: avoid looping when chosen thread detaches its mm
oom_kill_task() returns non-zero iff the chosen process does not have any threads with an attached ->mm. In such a case, it's better to just return to the page allocator and retry the allocation because memory could have been freed in the interim and the oom condition may no longer exist. It's unnecessary to loop in the oom killer and find another thread to kill. This allows both oom_kill_task() and oom_kill_process() to be converted to void functions. If the oom condition persists, the oom killer will be recalled. Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: David Rientjes <rientjes@google.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Acked-by: Michal Hocko <mhocko@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
ce24d8a142
commit
2a1c9b1fc0
|
@ -434,14 +434,14 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
|
||||||
}
|
}
|
||||||
|
|
||||||
#define K(x) ((x) << (PAGE_SHIFT-10))
|
#define K(x) ((x) << (PAGE_SHIFT-10))
|
||||||
static int oom_kill_task(struct task_struct *p)
|
static void oom_kill_task(struct task_struct *p)
|
||||||
{
|
{
|
||||||
struct task_struct *q;
|
struct task_struct *q;
|
||||||
struct mm_struct *mm;
|
struct mm_struct *mm;
|
||||||
|
|
||||||
p = find_lock_task_mm(p);
|
p = find_lock_task_mm(p);
|
||||||
if (!p)
|
if (!p)
|
||||||
return 1;
|
return;
|
||||||
|
|
||||||
/* mm cannot be safely dereferenced after task_unlock(p) */
|
/* mm cannot be safely dereferenced after task_unlock(p) */
|
||||||
mm = p->mm;
|
mm = p->mm;
|
||||||
|
@ -477,12 +477,10 @@ static int oom_kill_task(struct task_struct *p)
|
||||||
|
|
||||||
set_tsk_thread_flag(p, TIF_MEMDIE);
|
set_tsk_thread_flag(p, TIF_MEMDIE);
|
||||||
force_sig(SIGKILL, p);
|
force_sig(SIGKILL, p);
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
#undef K
|
#undef K
|
||||||
|
|
||||||
static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
|
static void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
|
||||||
unsigned int points, unsigned long totalpages,
|
unsigned int points, unsigned long totalpages,
|
||||||
struct mem_cgroup *memcg, nodemask_t *nodemask,
|
struct mem_cgroup *memcg, nodemask_t *nodemask,
|
||||||
const char *message)
|
const char *message)
|
||||||
|
@ -501,7 +499,7 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
|
||||||
*/
|
*/
|
||||||
if (p->flags & PF_EXITING) {
|
if (p->flags & PF_EXITING) {
|
||||||
set_tsk_thread_flag(p, TIF_MEMDIE);
|
set_tsk_thread_flag(p, TIF_MEMDIE);
|
||||||
return 0;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
task_lock(p);
|
task_lock(p);
|
||||||
|
@ -533,7 +531,7 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
|
||||||
}
|
}
|
||||||
} while_each_thread(p, t);
|
} while_each_thread(p, t);
|
||||||
|
|
||||||
return oom_kill_task(victim);
|
oom_kill_task(victim);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -580,15 +578,10 @@ void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask)
|
||||||
check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0, NULL);
|
check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0, NULL);
|
||||||
limit = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT;
|
limit = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT;
|
||||||
read_lock(&tasklist_lock);
|
read_lock(&tasklist_lock);
|
||||||
retry:
|
|
||||||
p = select_bad_process(&points, limit, memcg, NULL);
|
p = select_bad_process(&points, limit, memcg, NULL);
|
||||||
if (!p || PTR_ERR(p) == -1UL)
|
if (p && PTR_ERR(p) != -1UL)
|
||||||
goto out;
|
oom_kill_process(p, gfp_mask, 0, points, limit, memcg, NULL,
|
||||||
|
"Memory cgroup out of memory");
|
||||||
if (oom_kill_process(p, gfp_mask, 0, points, limit, memcg, NULL,
|
|
||||||
"Memory cgroup out of memory"))
|
|
||||||
goto retry;
|
|
||||||
out:
|
|
||||||
read_unlock(&tasklist_lock);
|
read_unlock(&tasklist_lock);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -745,33 +738,24 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
|
||||||
if (sysctl_oom_kill_allocating_task &&
|
if (sysctl_oom_kill_allocating_task &&
|
||||||
!oom_unkillable_task(current, NULL, nodemask) &&
|
!oom_unkillable_task(current, NULL, nodemask) &&
|
||||||
current->mm) {
|
current->mm) {
|
||||||
/*
|
oom_kill_process(current, gfp_mask, order, 0, totalpages, NULL,
|
||||||
* oom_kill_process() needs tasklist_lock held. If it returns
|
nodemask,
|
||||||
* non-zero, current could not be killed so we must fallback to
|
"Out of memory (oom_kill_allocating_task)");
|
||||||
* the tasklist scan.
|
|
||||||
*/
|
|
||||||
if (!oom_kill_process(current, gfp_mask, order, 0, totalpages,
|
|
||||||
NULL, nodemask,
|
|
||||||
"Out of memory (oom_kill_allocating_task)"))
|
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
retry:
|
|
||||||
p = select_bad_process(&points, totalpages, NULL, mpol_mask);
|
p = select_bad_process(&points, totalpages, NULL, mpol_mask);
|
||||||
if (PTR_ERR(p) == -1UL)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
/* Found nothing?!?! Either we hang forever, or we panic. */
|
/* Found nothing?!?! Either we hang forever, or we panic. */
|
||||||
if (!p) {
|
if (!p) {
|
||||||
dump_header(NULL, gfp_mask, order, NULL, mpol_mask);
|
dump_header(NULL, gfp_mask, order, NULL, mpol_mask);
|
||||||
read_unlock(&tasklist_lock);
|
read_unlock(&tasklist_lock);
|
||||||
panic("Out of memory and no killable processes...\n");
|
panic("Out of memory and no killable processes...\n");
|
||||||
}
|
}
|
||||||
|
if (PTR_ERR(p) != -1UL) {
|
||||||
if (oom_kill_process(p, gfp_mask, order, points, totalpages, NULL,
|
oom_kill_process(p, gfp_mask, order, points, totalpages, NULL,
|
||||||
nodemask, "Out of memory"))
|
nodemask, "Out of memory");
|
||||||
goto retry;
|
|
||||||
killed = 1;
|
killed = 1;
|
||||||
|
}
|
||||||
out:
|
out:
|
||||||
read_unlock(&tasklist_lock);
|
read_unlock(&tasklist_lock);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue