memcg: cpu hotplug aware quick acount_move detection
An event counter MEM_CGROUP_ON_MOVE is used for quick check whether file stat update can be done in async manner or not. Now, it use percpu counter and for_each_possible_cpu to update. This patch replaces for_each_possible_cpu to for_each_online_cpu and adds necessary synchronization logic at CPU HOTPLUG. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Balbir Singh <balbir@in.ibm.com> Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
711d3d2c9b
commit
1489ebad8b
|
@ -1132,11 +1132,14 @@ static unsigned int get_swappiness(struct mem_cgroup *memcg)
|
||||||
static void mem_cgroup_start_move(struct mem_cgroup *mem)
|
static void mem_cgroup_start_move(struct mem_cgroup *mem)
|
||||||
{
|
{
|
||||||
int cpu;
|
int cpu;
|
||||||
/* Because this is for moving account, reuse mc.lock */
|
|
||||||
spin_lock(&mc.lock);
|
get_online_cpus();
|
||||||
for_each_possible_cpu(cpu)
|
spin_lock(&mem->pcp_counter_lock);
|
||||||
|
for_each_online_cpu(cpu)
|
||||||
per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) += 1;
|
per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) += 1;
|
||||||
spin_unlock(&mc.lock);
|
mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] += 1;
|
||||||
|
spin_unlock(&mem->pcp_counter_lock);
|
||||||
|
put_online_cpus();
|
||||||
|
|
||||||
synchronize_rcu();
|
synchronize_rcu();
|
||||||
}
|
}
|
||||||
|
@ -1147,10 +1150,13 @@ static void mem_cgroup_end_move(struct mem_cgroup *mem)
|
||||||
|
|
||||||
if (!mem)
|
if (!mem)
|
||||||
return;
|
return;
|
||||||
spin_lock(&mc.lock);
|
get_online_cpus();
|
||||||
for_each_possible_cpu(cpu)
|
spin_lock(&mem->pcp_counter_lock);
|
||||||
|
for_each_online_cpu(cpu)
|
||||||
per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) -= 1;
|
per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) -= 1;
|
||||||
spin_unlock(&mc.lock);
|
mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] -= 1;
|
||||||
|
spin_unlock(&mem->pcp_counter_lock);
|
||||||
|
put_online_cpus();
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* 2 routines for checking "mem" is under move_account() or not.
|
* 2 routines for checking "mem" is under move_account() or not.
|
||||||
|
@ -1751,6 +1757,17 @@ static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *mem, int cpu)
|
||||||
per_cpu(mem->stat->count[i], cpu) = 0;
|
per_cpu(mem->stat->count[i], cpu) = 0;
|
||||||
mem->nocpu_base.count[i] += x;
|
mem->nocpu_base.count[i] += x;
|
||||||
}
|
}
|
||||||
|
/* need to clear ON_MOVE value, works as a kind of lock. */
|
||||||
|
per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) = 0;
|
||||||
|
spin_unlock(&mem->pcp_counter_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void synchronize_mem_cgroup_on_move(struct mem_cgroup *mem, int cpu)
|
||||||
|
{
|
||||||
|
int idx = MEM_CGROUP_ON_MOVE;
|
||||||
|
|
||||||
|
spin_lock(&mem->pcp_counter_lock);
|
||||||
|
per_cpu(mem->stat->count[idx], cpu) = mem->nocpu_base.count[idx];
|
||||||
spin_unlock(&mem->pcp_counter_lock);
|
spin_unlock(&mem->pcp_counter_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1762,6 +1779,12 @@ static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb,
|
||||||
struct memcg_stock_pcp *stock;
|
struct memcg_stock_pcp *stock;
|
||||||
struct mem_cgroup *iter;
|
struct mem_cgroup *iter;
|
||||||
|
|
||||||
|
if ((action == CPU_ONLINE)) {
|
||||||
|
for_each_mem_cgroup_all(iter)
|
||||||
|
synchronize_mem_cgroup_on_move(iter, cpu);
|
||||||
|
return NOTIFY_OK;
|
||||||
|
}
|
||||||
|
|
||||||
if ((action != CPU_DEAD) || action != CPU_DEAD_FROZEN)
|
if ((action != CPU_DEAD) || action != CPU_DEAD_FROZEN)
|
||||||
return NOTIFY_OK;
|
return NOTIFY_OK;
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue