mm: memcontrol: consolidate memory controller initialization
The initialization code for the per-cpu charge stock and the soft limit tree is compact enough to inline it into mem_cgroup_init(). Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Michal Hocko <mhocko@suse.cz> Reviewed-by: Vladimir Davydov <vdavydov@parallels.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
9c608dbe6a
commit
95a045f63d
|
@ -2138,17 +2138,6 @@ static void drain_local_stock(struct work_struct *dummy)
|
||||||
clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
|
clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init memcg_stock_init(void)
|
|
||||||
{
|
|
||||||
int cpu;
|
|
||||||
|
|
||||||
for_each_possible_cpu(cpu) {
|
|
||||||
struct memcg_stock_pcp *stock =
|
|
||||||
&per_cpu(memcg_stock, cpu);
|
|
||||||
INIT_WORK(&stock->work, drain_local_stock);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Cache charges(val) to local per_cpu area.
|
* Cache charges(val) to local per_cpu area.
|
||||||
* This will be consumed by consume_stock() function, later.
|
* This will be consumed by consume_stock() function, later.
|
||||||
|
@ -4507,28 +4496,6 @@ struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(parent_mem_cgroup);
|
EXPORT_SYMBOL(parent_mem_cgroup);
|
||||||
|
|
||||||
static void __init mem_cgroup_soft_limit_tree_init(void)
|
|
||||||
{
|
|
||||||
int node;
|
|
||||||
|
|
||||||
for_each_node(node) {
|
|
||||||
struct mem_cgroup_tree_per_node *rtpn;
|
|
||||||
int zone;
|
|
||||||
|
|
||||||
rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
|
|
||||||
node_online(node) ? node : NUMA_NO_NODE);
|
|
||||||
|
|
||||||
for (zone = 0; zone < MAX_NR_ZONES; zone++) {
|
|
||||||
struct mem_cgroup_tree_per_zone *rtpz;
|
|
||||||
|
|
||||||
rtpz = &rtpn->rb_tree_per_zone[zone];
|
|
||||||
rtpz->rb_root = RB_ROOT;
|
|
||||||
spin_lock_init(&rtpz->lock);
|
|
||||||
}
|
|
||||||
soft_limit_tree.rb_tree_per_node[node] = rtpn;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct cgroup_subsys_state * __ref
|
static struct cgroup_subsys_state * __ref
|
||||||
mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
|
mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
|
||||||
{
|
{
|
||||||
|
@ -5905,10 +5872,33 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
|
||||||
*/
|
*/
|
||||||
static int __init mem_cgroup_init(void)
|
static int __init mem_cgroup_init(void)
|
||||||
{
|
{
|
||||||
|
int cpu, node;
|
||||||
|
|
||||||
hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
|
hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
|
||||||
|
|
||||||
|
for_each_possible_cpu(cpu)
|
||||||
|
INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
|
||||||
|
drain_local_stock);
|
||||||
|
|
||||||
|
for_each_node(node) {
|
||||||
|
struct mem_cgroup_tree_per_node *rtpn;
|
||||||
|
int zone;
|
||||||
|
|
||||||
|
rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
|
||||||
|
node_online(node) ? node : NUMA_NO_NODE);
|
||||||
|
|
||||||
|
for (zone = 0; zone < MAX_NR_ZONES; zone++) {
|
||||||
|
struct mem_cgroup_tree_per_zone *rtpz;
|
||||||
|
|
||||||
|
rtpz = &rtpn->rb_tree_per_zone[zone];
|
||||||
|
rtpz->rb_root = RB_ROOT;
|
||||||
|
spin_lock_init(&rtpz->lock);
|
||||||
|
}
|
||||||
|
soft_limit_tree.rb_tree_per_node[node] = rtpn;
|
||||||
|
}
|
||||||
|
|
||||||
enable_swap_cgroup();
|
enable_swap_cgroup();
|
||||||
mem_cgroup_soft_limit_tree_init();
|
|
||||||
memcg_stock_init();
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
subsys_initcall(mem_cgroup_init);
|
subsys_initcall(mem_cgroup_init);
|
||||||
|
|
Loading…
Reference in New Issue