2006-10-20 14:28:32 +08:00
|
|
|
#ifndef __INCLUDE_LINUX_OOM_H
|
|
|
|
#define __INCLUDE_LINUX_OOM_H
|
|
|
|
|
2007-10-17 14:25:53 +08:00
|
|
|
|
2017-02-09 01:51:30 +08:00
|
|
|
#include <linux/sched/signal.h>
|
2007-10-17 14:25:59 +08:00
|
|
|
#include <linux/types.h>
|
2009-12-16 08:45:33 +08:00
|
|
|
#include <linux/nodemask.h>
|
2012-10-13 17:46:48 +08:00
|
|
|
#include <uapi/linux/oom.h>
|
2007-10-17 14:25:59 +08:00
|
|
|
|
|
|
|
struct zonelist;
|
|
|
|
struct notifier_block;
|
2010-08-10 08:19:43 +08:00
|
|
|
struct mem_cgroup;
|
|
|
|
struct task_struct;
|
2007-10-17 14:25:59 +08:00
|
|
|
|
2015-09-09 06:00:44 +08:00
|
|
|
/*
|
|
|
|
* Details of the page allocation that triggered the oom killer that are used to
|
|
|
|
* determine what should be killed.
|
|
|
|
*/
|
2015-09-09 06:00:36 +08:00
|
|
|
struct oom_control {
|
2015-09-09 06:00:44 +08:00
|
|
|
/* Used to determine cpuset */
|
2015-09-09 06:00:36 +08:00
|
|
|
struct zonelist *zonelist;
|
2015-09-09 06:00:44 +08:00
|
|
|
|
|
|
|
/* Used to determine mempolicy */
|
|
|
|
nodemask_t *nodemask;
|
|
|
|
|
2016-07-27 06:22:33 +08:00
|
|
|
/* Memory cgroup in which oom is invoked, or NULL for global oom */
|
|
|
|
struct mem_cgroup *memcg;
|
|
|
|
|
2015-09-09 06:00:44 +08:00
|
|
|
/* Used to determine cpuset and node locality requirement */
|
|
|
|
const gfp_t gfp_mask;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* order == -1 means the oom kill is required by sysrq, otherwise only
|
|
|
|
* for display purposes.
|
|
|
|
*/
|
|
|
|
const int order;
|
2015-09-09 06:00:36 +08:00
|
|
|
|
2016-10-08 07:57:23 +08:00
|
|
|
/* Used by oom implementation, do not set */
|
|
|
|
unsigned long totalpages;
|
|
|
|
struct task_struct *chosen;
|
|
|
|
unsigned long chosen_points;
|
mm, memcg: introduce own oom handler to iterate only over its own threads
The global oom killer is serialized by the per-zonelist
try_set_zonelist_oom() which is used in the page allocator. Concurrent
oom kills are thus a rare event and only occur in systems using
mempolicies and with a large number of nodes.
Memory controller oom kills, however, can frequently be concurrent since
there is no serialization once the oom killer is called for oom conditions
in several different memcgs in parallel.
This creates a massive contention on tasklist_lock since the oom killer
requires the readside for the tasklist iteration. If several memcgs are
calling the oom killer, this lock can be held for a substantial amount of
time, especially if threads continue to enter it as other threads are
exiting.
Since the exit path grabs the writeside of the lock with irqs disabled in
a few different places, this can cause a soft lockup on cpus as a result
of tasklist_lock starvation.
The kernel lacks unfair writelocks, and successful calls to the oom killer
usually result in at least one thread entering the exit path, so an
alternative solution is needed.
This patch introduces a seperate oom handler for memcgs so that they do
not require tasklist_lock for as much time. Instead, it iterates only
over the threads attached to the oom memcg and grabs a reference to the
selected thread before calling oom_kill_process() to ensure it doesn't
prematurely exit.
This still requires tasklist_lock for the tasklist dump, iterating
children of the selected process, and killing all other threads on the
system sharing the same memory as the selected victim. So while this
isn't a complete solution to tasklist_lock starvation, it significantly
reduces the amount of time that it is held.
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Michal Hocko <mhocko@suse.cz>
Signed-off-by: David Rientjes <rientjes@google.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: Sha Zhengju <handai.szj@taobao.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-08-01 07:43:44 +08:00
|
|
|
};
|
|
|
|
|
2015-06-25 07:57:19 +08:00
|
|
|
extern struct mutex oom_lock;
|
|
|
|
|
2012-12-12 08:02:56 +08:00
|
|
|
static inline void set_current_oom_origin(void)
|
|
|
|
{
|
2016-05-24 07:23:57 +08:00
|
|
|
current->signal->oom_flag_origin = true;
|
2012-12-12 08:02:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void clear_current_oom_origin(void)
|
|
|
|
{
|
2016-05-24 07:23:57 +08:00
|
|
|
current->signal->oom_flag_origin = false;
|
2012-12-12 08:02:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool oom_task_origin(const struct task_struct *p)
|
|
|
|
{
|
2016-05-24 07:23:57 +08:00
|
|
|
return p->signal->oom_flag_origin;
|
2012-12-12 08:02:56 +08:00
|
|
|
}
|
2011-05-25 08:11:40 +08:00
|
|
|
|
2016-10-08 07:58:57 +08:00
|
|
|
static inline bool tsk_is_oom_victim(struct task_struct * tsk)
|
|
|
|
{
|
|
|
|
return tsk->signal->oom_mm;
|
|
|
|
}
|
|
|
|
|
2012-05-30 06:06:47 +08:00
|
|
|
extern unsigned long oom_badness(struct task_struct *p,
|
|
|
|
struct mem_cgroup *memcg, const nodemask_t *nodemask,
|
|
|
|
unsigned long totalpages);
|
2014-10-21 00:12:32 +08:00
|
|
|
|
2015-09-09 06:00:36 +08:00
|
|
|
extern bool out_of_memory(struct oom_control *oc);
|
2015-06-25 07:57:07 +08:00
|
|
|
|
2016-10-08 07:59:03 +08:00
|
|
|
extern void exit_oom_victim(void);
|
2015-06-25 07:57:07 +08:00
|
|
|
|
2007-10-17 14:25:53 +08:00
|
|
|
extern int register_oom_notifier(struct notifier_block *nb);
|
|
|
|
extern int unregister_oom_notifier(struct notifier_block *nb);
|
|
|
|
|
2016-10-08 07:59:00 +08:00
|
|
|
extern bool oom_killer_disable(signed long timeout);
|
2015-02-12 07:26:24 +08:00
|
|
|
extern void oom_killer_enable(void);
|
2010-08-10 08:18:56 +08:00
|
|
|
|
2010-08-11 09:03:00 +08:00
|
|
|
extern struct task_struct *find_lock_task_mm(struct task_struct *p);
|
|
|
|
|
2010-08-10 08:18:56 +08:00
|
|
|
/* sysctls */
|
|
|
|
extern int sysctl_oom_dump_tasks;
|
|
|
|
extern int sysctl_oom_kill_allocating_task;
|
|
|
|
extern int sysctl_panic_on_oom;
|
2007-10-17 14:25:53 +08:00
|
|
|
#endif /* _INCLUDE_LINUX_OOM_H */
|