oom: suppress nodes that are not allowed from meminfo on oom kill
The oom killer is extremely verbose for machines with a large number of cpus and/or nodes. This verbosity can often be harmful if it causes other important messages to be scrolled from the kernel log and incurs a signicant time delay, specifically for kernels with CONFIG_NODES_SHIFT > 8. This patch causes only memory information to be displayed for nodes that are allowed by current's cpuset when dumping the VM state. Information for all other nodes is irrelevant to the oom condition; we don't care if there's an abundance of memory elsewhere if we can't access it. This only affects the behavior of dumping memory information when an oom is triggered. Other dumps, such as for sysrq+m, still display the unfiltered form when using the existing show_mem() interface. Additionally, the per-cpu pageset statistics are extremely verbose in oom killer output, so it is now suppressed. This removes nodes_weight(current->mems_allowed) * (1 + nr_cpus) lines from the oom killer output. Callers may use __show_mem(SHOW_MEM_FILTER_NODES) to filter disallowed nodes. Signed-off-by: David Rientjes <rientjes@google.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
94dcf29a11
commit
ddd588b5dd
|
@ -859,7 +859,14 @@ extern void pagefault_out_of_memory(void);
|
||||||
|
|
||||||
#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
|
#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Flags passed to __show_mem() and __show_free_areas() to suppress output in
|
||||||
|
* various contexts.
|
||||||
|
*/
|
||||||
|
#define SHOW_MEM_FILTER_NODES (0x0001u) /* filter disallowed nodes */
|
||||||
|
|
||||||
extern void show_free_areas(void);
|
extern void show_free_areas(void);
|
||||||
|
extern void __show_free_areas(unsigned int flags);
|
||||||
|
|
||||||
int shmem_lock(struct file *file, int lock, struct user_struct *user);
|
int shmem_lock(struct file *file, int lock, struct user_struct *user);
|
||||||
struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags);
|
struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags);
|
||||||
|
@ -1348,6 +1355,7 @@ extern void calculate_zone_inactive_ratio(struct zone *zone);
|
||||||
extern void mem_init(void);
|
extern void mem_init(void);
|
||||||
extern void __init mmap_init(void);
|
extern void __init mmap_init(void);
|
||||||
extern void show_mem(void);
|
extern void show_mem(void);
|
||||||
|
extern void __show_mem(unsigned int flags);
|
||||||
extern void si_meminfo(struct sysinfo * val);
|
extern void si_meminfo(struct sysinfo * val);
|
||||||
extern void si_meminfo_node(struct sysinfo *val, int nid);
|
extern void si_meminfo_node(struct sysinfo *val, int nid);
|
||||||
extern int after_bootmem;
|
extern int after_bootmem;
|
||||||
|
|
|
@ -9,14 +9,14 @@
|
||||||
#include <linux/nmi.h>
|
#include <linux/nmi.h>
|
||||||
#include <linux/quicklist.h>
|
#include <linux/quicklist.h>
|
||||||
|
|
||||||
void show_mem(void)
|
void __show_mem(unsigned int filter)
|
||||||
{
|
{
|
||||||
pg_data_t *pgdat;
|
pg_data_t *pgdat;
|
||||||
unsigned long total = 0, reserved = 0, shared = 0,
|
unsigned long total = 0, reserved = 0, shared = 0,
|
||||||
nonshared = 0, highmem = 0;
|
nonshared = 0, highmem = 0;
|
||||||
|
|
||||||
printk("Mem-Info:\n");
|
printk("Mem-Info:\n");
|
||||||
show_free_areas();
|
__show_free_areas(filter);
|
||||||
|
|
||||||
for_each_online_pgdat(pgdat) {
|
for_each_online_pgdat(pgdat) {
|
||||||
unsigned long i, flags;
|
unsigned long i, flags;
|
||||||
|
@ -61,3 +61,8 @@ void show_mem(void)
|
||||||
quicklist_total_size());
|
quicklist_total_size());
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void show_mem(void)
|
||||||
|
{
|
||||||
|
__show_mem(0);
|
||||||
|
}
|
||||||
|
|
|
@ -406,7 +406,7 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
|
||||||
task_unlock(current);
|
task_unlock(current);
|
||||||
dump_stack();
|
dump_stack();
|
||||||
mem_cgroup_print_oom_info(mem, p);
|
mem_cgroup_print_oom_info(mem, p);
|
||||||
show_mem();
|
__show_mem(SHOW_MEM_FILTER_NODES);
|
||||||
if (sysctl_oom_dump_tasks)
|
if (sysctl_oom_dump_tasks)
|
||||||
dump_tasks(mem, nodemask);
|
dump_tasks(mem, nodemask);
|
||||||
}
|
}
|
||||||
|
|
|
@ -2411,19 +2411,42 @@ void si_meminfo_node(struct sysinfo *val, int nid)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Determine whether the zone's node should be displayed or not, depending on
|
||||||
|
* whether SHOW_MEM_FILTER_NODES was passed to __show_free_areas().
|
||||||
|
*/
|
||||||
|
static bool skip_free_areas_zone(unsigned int flags, const struct zone *zone)
|
||||||
|
{
|
||||||
|
bool ret = false;
|
||||||
|
|
||||||
|
if (!(flags & SHOW_MEM_FILTER_NODES))
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
get_mems_allowed();
|
||||||
|
ret = !node_isset(zone->zone_pgdat->node_id,
|
||||||
|
cpuset_current_mems_allowed);
|
||||||
|
put_mems_allowed();
|
||||||
|
out:
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
#define K(x) ((x) << (PAGE_SHIFT-10))
|
#define K(x) ((x) << (PAGE_SHIFT-10))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Show free area list (used inside shift_scroll-lock stuff)
|
* Show free area list (used inside shift_scroll-lock stuff)
|
||||||
* We also calculate the percentage fragmentation. We do this by counting the
|
* We also calculate the percentage fragmentation. We do this by counting the
|
||||||
* memory on each free list with the exception of the first item on the list.
|
* memory on each free list with the exception of the first item on the list.
|
||||||
|
* Suppresses nodes that are not allowed by current's cpuset if
|
||||||
|
* SHOW_MEM_FILTER_NODES is passed.
|
||||||
*/
|
*/
|
||||||
void show_free_areas(void)
|
void __show_free_areas(unsigned int filter)
|
||||||
{
|
{
|
||||||
int cpu;
|
int cpu;
|
||||||
struct zone *zone;
|
struct zone *zone;
|
||||||
|
|
||||||
for_each_populated_zone(zone) {
|
for_each_populated_zone(zone) {
|
||||||
|
if (skip_free_areas_zone(filter, zone))
|
||||||
|
continue;
|
||||||
show_node(zone);
|
show_node(zone);
|
||||||
printk("%s per-cpu:\n", zone->name);
|
printk("%s per-cpu:\n", zone->name);
|
||||||
|
|
||||||
|
@ -2465,6 +2488,8 @@ void show_free_areas(void)
|
||||||
for_each_populated_zone(zone) {
|
for_each_populated_zone(zone) {
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
if (skip_free_areas_zone(filter, zone))
|
||||||
|
continue;
|
||||||
show_node(zone);
|
show_node(zone);
|
||||||
printk("%s"
|
printk("%s"
|
||||||
" free:%lukB"
|
" free:%lukB"
|
||||||
|
@ -2532,6 +2557,8 @@ void show_free_areas(void)
|
||||||
for_each_populated_zone(zone) {
|
for_each_populated_zone(zone) {
|
||||||
unsigned long nr[MAX_ORDER], flags, order, total = 0;
|
unsigned long nr[MAX_ORDER], flags, order, total = 0;
|
||||||
|
|
||||||
|
if (skip_free_areas_zone(filter, zone))
|
||||||
|
continue;
|
||||||
show_node(zone);
|
show_node(zone);
|
||||||
printk("%s: ", zone->name);
|
printk("%s: ", zone->name);
|
||||||
|
|
||||||
|
@ -2551,6 +2578,11 @@ void show_free_areas(void)
|
||||||
show_swap_cache_info();
|
show_swap_cache_info();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void show_free_areas(void)
|
||||||
|
{
|
||||||
|
__show_free_areas(0);
|
||||||
|
}
|
||||||
|
|
||||||
static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
|
static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
|
||||||
{
|
{
|
||||||
zoneref->zone = zone;
|
zoneref->zone = zone;
|
||||||
|
|
Loading…
Reference in New Issue