2006-10-20 14:28:32 +08:00
|
|
|
#ifndef __INCLUDE_LINUX_OOM_H
|
|
|
|
#define __INCLUDE_LINUX_OOM_H
|
|
|
|
|
2007-10-17 14:25:53 +08:00
|
|
|
|
oom: badness heuristic rewrite
This a complete rewrite of the oom killer's badness() heuristic which is
used to determine which task to kill in oom conditions. The goal is to
make it as simple and predictable as possible so the results are better
understood and we end up killing the task which will lead to the most
memory freeing while still respecting the fine-tuning from userspace.
Instead of basing the heuristic on mm->total_vm for each task, the task's
rss and swap space is used instead. This is a better indication of the
amount of memory that will be freeable if the oom killed task is chosen
and subsequently exits. This helps specifically in cases where KDE or
GNOME is chosen for oom kill on desktop systems instead of a memory
hogging task.
The baseline for the heuristic is a proportion of memory that each task is
currently using in memory plus swap compared to the amount of "allowable"
memory. "Allowable," in this sense, means the system-wide resources for
unconstrained oom conditions, the set of mempolicy nodes, the mems
attached to current's cpuset, or a memory controller's limit. The
proportion is given on a scale of 0 (never kill) to 1000 (always kill),
roughly meaning that if a task has a badness() score of 500 that the task
consumes approximately 50% of allowable memory resident in RAM or in swap
space.
The proportion is always relative to the amount of "allowable" memory and
not the total amount of RAM systemwide so that mempolicies and cpusets may
operate in isolation; they shall not need to know the true size of the
machine on which they are running if they are bound to a specific set of
nodes or mems, respectively.
Root tasks are given 3% extra memory just like __vm_enough_memory()
provides in LSMs. In the event of two tasks consuming similar amounts of
memory, it is generally better to save root's task.
Because of the change in the badness() heuristic's baseline, it is also
necessary to introduce a new user interface to tune it. It's not possible
to redefine the meaning of /proc/pid/oom_adj with a new scale since the
ABI cannot be changed for backward compatability. Instead, a new tunable,
/proc/pid/oom_score_adj, is added that ranges from -1000 to +1000. It may
be used to polarize the heuristic such that certain tasks are never
considered for oom kill while others may always be considered. The value
is added directly into the badness() score so a value of -500, for
example, means to discount 50% of its memory consumption in comparison to
other tasks either on the system, bound to the mempolicy, in the cpuset,
or sharing the same memory controller.
/proc/pid/oom_adj is changed so that its meaning is rescaled into the
units used by /proc/pid/oom_score_adj, and vice versa. Changing one of
these per-task tunables will rescale the value of the other to an
equivalent meaning. Although /proc/pid/oom_adj was originally defined as
a bitshift on the badness score, it now shares the same linear growth as
/proc/pid/oom_score_adj but with different granularity. This is required
so the ABI is not broken with userspace applications and allows oom_adj to
be deprecated for future removal.
Signed-off-by: David Rientjes <rientjes@google.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-08-10 08:19:46 +08:00
|
|
|
#include <linux/sched.h>
|
2007-10-17 14:25:59 +08:00
|
|
|
#include <linux/types.h>
|
2009-12-16 08:45:33 +08:00
|
|
|
#include <linux/nodemask.h>
|
2012-10-13 17:46:48 +08:00
|
|
|
#include <uapi/linux/oom.h>
|
2007-10-17 14:25:59 +08:00
|
|
|
|
|
|
|
struct zonelist;
|
|
|
|
struct notifier_block;
|
2010-08-10 08:19:43 +08:00
|
|
|
struct mem_cgroup;
|
|
|
|
struct task_struct;
|
2007-10-17 14:25:59 +08:00
|
|
|
|
2015-09-09 06:00:44 +08:00
|
|
|
/*
|
|
|
|
* Details of the page allocation that triggered the oom killer that are used to
|
|
|
|
* determine what should be killed.
|
|
|
|
*/
|
2015-09-09 06:00:36 +08:00
|
|
|
struct oom_control {
|
2015-09-09 06:00:44 +08:00
|
|
|
/* Used to determine cpuset */
|
2015-09-09 06:00:36 +08:00
|
|
|
struct zonelist *zonelist;
|
2015-09-09 06:00:44 +08:00
|
|
|
|
|
|
|
/* Used to determine mempolicy */
|
|
|
|
nodemask_t *nodemask;
|
|
|
|
|
2016-07-27 06:22:33 +08:00
|
|
|
/* Memory cgroup in which oom is invoked, or NULL for global oom */
|
|
|
|
struct mem_cgroup *memcg;
|
|
|
|
|
2015-09-09 06:00:44 +08:00
|
|
|
/* Used to determine cpuset and node locality requirement */
|
|
|
|
const gfp_t gfp_mask;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* order == -1 means the oom kill is required by sysrq, otherwise only
|
|
|
|
* for display purposes.
|
|
|
|
*/
|
|
|
|
const int order;
|
2015-09-09 06:00:36 +08:00
|
|
|
|
2016-10-08 07:57:23 +08:00
|
|
|
/* Used by oom implementation, do not set */
|
|
|
|
unsigned long totalpages;
|
|
|
|
struct task_struct *chosen;
|
|
|
|
unsigned long chosen_points;
|
mm, memcg: introduce own oom handler to iterate only over its own threads
The global oom killer is serialized by the per-zonelist
try_set_zonelist_oom() which is used in the page allocator. Concurrent
oom kills are thus a rare event and only occur in systems using
mempolicies and with a large number of nodes.
Memory controller oom kills, however, can frequently be concurrent since
there is no serialization once the oom killer is called for oom conditions
in several different memcgs in parallel.
This creates a massive contention on tasklist_lock since the oom killer
requires the readside for the tasklist iteration. If several memcgs are
calling the oom killer, this lock can be held for a substantial amount of
time, especially if threads continue to enter it as other threads are
exiting.
Since the exit path grabs the writeside of the lock with irqs disabled in
a few different places, this can cause a soft lockup on cpus as a result
of tasklist_lock starvation.
The kernel lacks unfair writelocks, and successful calls to the oom killer
usually result in at least one thread entering the exit path, so an
alternative solution is needed.
This patch introduces a seperate oom handler for memcgs so that they do
not require tasklist_lock for as much time. Instead, it iterates only
over the threads attached to the oom memcg and grabs a reference to the
selected thread before calling oom_kill_process() to ensure it doesn't
prematurely exit.
This still requires tasklist_lock for the tasklist dump, iterating
children of the selected process, and killing all other threads on the
system sharing the same memory as the selected victim. So while this
isn't a complete solution to tasklist_lock starvation, it significantly
reduces the amount of time that it is held.
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Michal Hocko <mhocko@suse.cz>
Signed-off-by: David Rientjes <rientjes@google.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: Sha Zhengju <handai.szj@taobao.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-08-01 07:43:44 +08:00
|
|
|
};
|
|
|
|
|
2015-06-25 07:57:19 +08:00
|
|
|
extern struct mutex oom_lock;
|
|
|
|
|
2012-12-12 08:02:56 +08:00
|
|
|
static inline void set_current_oom_origin(void)
|
|
|
|
{
|
2016-05-24 07:23:57 +08:00
|
|
|
current->signal->oom_flag_origin = true;
|
2012-12-12 08:02:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void clear_current_oom_origin(void)
|
|
|
|
{
|
2016-05-24 07:23:57 +08:00
|
|
|
current->signal->oom_flag_origin = false;
|
2012-12-12 08:02:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool oom_task_origin(const struct task_struct *p)
|
|
|
|
{
|
2016-05-24 07:23:57 +08:00
|
|
|
return p->signal->oom_flag_origin;
|
2012-12-12 08:02:56 +08:00
|
|
|
}
|
2011-05-25 08:11:40 +08:00
|
|
|
|
2016-10-08 07:58:57 +08:00
|
|
|
static inline bool tsk_is_oom_victim(struct task_struct * tsk)
|
|
|
|
{
|
|
|
|
return tsk->signal->oom_mm;
|
|
|
|
}
|
|
|
|
|
2012-05-30 06:06:47 +08:00
|
|
|
extern unsigned long oom_badness(struct task_struct *p,
|
|
|
|
struct mem_cgroup *memcg, const nodemask_t *nodemask,
|
|
|
|
unsigned long totalpages);
|
2014-10-21 00:12:32 +08:00
|
|
|
|
2015-09-09 06:00:36 +08:00
|
|
|
extern bool out_of_memory(struct oom_control *oc);
|
2015-06-25 07:57:07 +08:00
|
|
|
|
2016-03-26 05:20:27 +08:00
|
|
|
extern void exit_oom_victim(struct task_struct *tsk);
|
2015-06-25 07:57:07 +08:00
|
|
|
|
2007-10-17 14:25:53 +08:00
|
|
|
extern int register_oom_notifier(struct notifier_block *nb);
|
|
|
|
extern int unregister_oom_notifier(struct notifier_block *nb);
|
|
|
|
|
2016-10-08 07:59:00 +08:00
|
|
|
extern bool oom_killer_disable(signed long timeout);
|
2015-02-12 07:26:24 +08:00
|
|
|
extern void oom_killer_enable(void);
|
2010-08-10 08:18:56 +08:00
|
|
|
|
2010-08-11 09:03:00 +08:00
|
|
|
extern struct task_struct *find_lock_task_mm(struct task_struct *p);
|
|
|
|
|
2010-08-10 08:18:56 +08:00
|
|
|
/* sysctls */
|
|
|
|
extern int sysctl_oom_dump_tasks;
|
|
|
|
extern int sysctl_oom_kill_allocating_task;
|
|
|
|
extern int sysctl_panic_on_oom;
|
2007-10-17 14:25:53 +08:00
|
|
|
#endif /* _INCLUDE_LINUX_OOM_H */
|