memcgroup: tidy up mem_cgroup_charge_common
Tidy up mem_cgroup_charge_common before extending it. Adjust some comments, but mainly clean up its loop: I've an aversion to loops full of continues, then a break or a goto at the bottom. And the is_atomic test should be on the __GFP_WAIT bit, not GFP_ATOMIC bits. Signed-off-by: Hugh Dickins <hugh@veritas.com> Cc: Balbir Singh <balbir@linux.vnet.ibm.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
ac44d354d5
commit
3be91277e7
|
@ -345,23 +345,22 @@ retry:
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unlock_page_cgroup(page);
|
unlock_page_cgroup(page);
|
||||||
|
|
||||||
pc = kzalloc(sizeof(struct page_cgroup), gfp_mask);
|
pc = kzalloc(sizeof(struct page_cgroup), gfp_mask);
|
||||||
if (pc == NULL)
|
if (pc == NULL)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
rcu_read_lock();
|
|
||||||
/*
|
/*
|
||||||
* We always charge the cgroup the mm_struct belongs to
|
* We always charge the cgroup the mm_struct belongs to.
|
||||||
* the mm_struct's mem_cgroup changes on task migration if the
|
* The mm_struct's mem_cgroup changes on task migration if the
|
||||||
* thread group leader migrates. It's possible that mm is not
|
* thread group leader migrates. It's possible that mm is not
|
||||||
* set, if so charge the init_mm (happens for pagecache usage).
|
* set, if so charge the init_mm (happens for pagecache usage).
|
||||||
*/
|
*/
|
||||||
if (!mm)
|
if (!mm)
|
||||||
mm = &init_mm;
|
mm = &init_mm;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
mem = rcu_dereference(mm->mem_cgroup);
|
mem = rcu_dereference(mm->mem_cgroup);
|
||||||
/*
|
/*
|
||||||
* For every charge from the cgroup, increment reference
|
* For every charge from the cgroup, increment reference
|
||||||
|
@ -375,12 +374,8 @@ retry:
|
||||||
* the cgroup limit.
|
* the cgroup limit.
|
||||||
*/
|
*/
|
||||||
while (res_counter_charge(&mem->res, PAGE_SIZE)) {
|
while (res_counter_charge(&mem->res, PAGE_SIZE)) {
|
||||||
bool is_atomic = gfp_mask & GFP_ATOMIC;
|
if (!(gfp_mask & __GFP_WAIT))
|
||||||
/*
|
goto out;
|
||||||
* We cannot reclaim under GFP_ATOMIC, fail the charge
|
|
||||||
*/
|
|
||||||
if (is_atomic)
|
|
||||||
goto noreclaim;
|
|
||||||
|
|
||||||
if (try_to_free_mem_cgroup_pages(mem, gfp_mask))
|
if (try_to_free_mem_cgroup_pages(mem, gfp_mask))
|
||||||
continue;
|
continue;
|
||||||
|
@ -394,23 +389,12 @@ retry:
|
||||||
*/
|
*/
|
||||||
if (res_counter_check_under_limit(&mem->res))
|
if (res_counter_check_under_limit(&mem->res))
|
||||||
continue;
|
continue;
|
||||||
/*
|
|
||||||
* Since we control both RSS and cache, we end up with a
|
if (!nr_retries--) {
|
||||||
* very interesting scenario where we end up reclaiming
|
mem_cgroup_out_of_memory(mem, gfp_mask);
|
||||||
* memory (essentially RSS), since the memory is pushed
|
goto out;
|
||||||
* to swap cache, we eventually end up adding those
|
|
||||||
* pages back to our list. Hence we give ourselves a
|
|
||||||
* few chances before we fail
|
|
||||||
*/
|
|
||||||
else if (nr_retries--) {
|
|
||||||
congestion_wait(WRITE, HZ/10);
|
|
||||||
continue;
|
|
||||||
}
|
}
|
||||||
noreclaim:
|
congestion_wait(WRITE, HZ/10);
|
||||||
css_put(&mem->css);
|
|
||||||
if (!is_atomic)
|
|
||||||
mem_cgroup_out_of_memory(mem, GFP_KERNEL);
|
|
||||||
goto free_pc;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic_set(&pc->ref_cnt, 1);
|
atomic_set(&pc->ref_cnt, 1);
|
||||||
|
@ -419,10 +403,11 @@ noreclaim:
|
||||||
pc->flags = 0;
|
pc->flags = 0;
|
||||||
if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE)
|
if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE)
|
||||||
pc->flags |= PAGE_CGROUP_FLAG_CACHE;
|
pc->flags |= PAGE_CGROUP_FLAG_CACHE;
|
||||||
|
|
||||||
if (page_cgroup_assign_new_page_cgroup(page, pc)) {
|
if (page_cgroup_assign_new_page_cgroup(page, pc)) {
|
||||||
/*
|
/*
|
||||||
* an another charge is added to this page already.
|
* Another charge has been added to this page already.
|
||||||
* we do take lock_page_cgroup(page) again and read
|
* We take lock_page_cgroup(page) again and read
|
||||||
* page->cgroup, increment refcnt.... just retry is OK.
|
* page->cgroup, increment refcnt.... just retry is OK.
|
||||||
*/
|
*/
|
||||||
res_counter_uncharge(&mem->res, PAGE_SIZE);
|
res_counter_uncharge(&mem->res, PAGE_SIZE);
|
||||||
|
@ -437,7 +422,8 @@ noreclaim:
|
||||||
|
|
||||||
done:
|
done:
|
||||||
return 0;
|
return 0;
|
||||||
free_pc:
|
out:
|
||||||
|
css_put(&mem->css);
|
||||||
kfree(pc);
|
kfree(pc);
|
||||||
err:
|
err:
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
Loading…
Reference in New Issue