radix-tree: account nodes to memcg only if explicitly requested
Radix trees may be used not only for storing page cache pages, so
unconditionally accounting radix tree nodes to the current memory cgroup
is bad: if a radix tree node is used for storing data shared among
different cgroups we risk pinning dead memory cgroups forever.
So let's only account radix tree nodes if it was explicitly requested by
passing __GFP_ACCOUNT to INIT_RADIX_TREE. Currently, we only want to
account page cache entries, so mark mapping->page_tree so.
Fixes: 58e698af4c
("radix-tree: account radix_tree_node to memory cgroup")
Link: http://lkml.kernel.org/r/1470057188-7864-1-git-send-email-vdavydov@virtuozzo.com
Signed-off-by: Vladimir Davydov <vdavydov@virtuozzo.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: <stable@vger.kernel.org> [4.6+]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
c3cee37228
commit
05eb6e7263
|
@ -345,7 +345,7 @@ EXPORT_SYMBOL(inc_nlink);
|
||||||
void address_space_init_once(struct address_space *mapping)
|
void address_space_init_once(struct address_space *mapping)
|
||||||
{
|
{
|
||||||
memset(mapping, 0, sizeof(*mapping));
|
memset(mapping, 0, sizeof(*mapping));
|
||||||
INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
|
INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC | __GFP_ACCOUNT);
|
||||||
spin_lock_init(&mapping->tree_lock);
|
spin_lock_init(&mapping->tree_lock);
|
||||||
init_rwsem(&mapping->i_mmap_rwsem);
|
init_rwsem(&mapping->i_mmap_rwsem);
|
||||||
INIT_LIST_HEAD(&mapping->private_list);
|
INIT_LIST_HEAD(&mapping->private_list);
|
||||||
|
|
|
@ -277,10 +277,11 @@ radix_tree_node_alloc(struct radix_tree_root *root)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Even if the caller has preloaded, try to allocate from the
|
* Even if the caller has preloaded, try to allocate from the
|
||||||
* cache first for the new node to get accounted.
|
* cache first for the new node to get accounted to the memory
|
||||||
|
* cgroup.
|
||||||
*/
|
*/
|
||||||
ret = kmem_cache_alloc(radix_tree_node_cachep,
|
ret = kmem_cache_alloc(radix_tree_node_cachep,
|
||||||
gfp_mask | __GFP_ACCOUNT | __GFP_NOWARN);
|
gfp_mask | __GFP_NOWARN);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
@ -303,8 +304,7 @@ radix_tree_node_alloc(struct radix_tree_root *root)
|
||||||
kmemleak_update_trace(ret);
|
kmemleak_update_trace(ret);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
ret = kmem_cache_alloc(radix_tree_node_cachep,
|
ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
|
||||||
gfp_mask | __GFP_ACCOUNT);
|
|
||||||
out:
|
out:
|
||||||
BUG_ON(radix_tree_is_internal_node(ret));
|
BUG_ON(radix_tree_is_internal_node(ret));
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -351,6 +351,12 @@ static int __radix_tree_preload(gfp_t gfp_mask, int nr)
|
||||||
struct radix_tree_node *node;
|
struct radix_tree_node *node;
|
||||||
int ret = -ENOMEM;
|
int ret = -ENOMEM;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Nodes preloaded by one cgroup can be be used by another cgroup, so
|
||||||
|
* they should never be accounted to any particular memory cgroup.
|
||||||
|
*/
|
||||||
|
gfp_mask &= ~__GFP_ACCOUNT;
|
||||||
|
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
rtp = this_cpu_ptr(&radix_tree_preloads);
|
rtp = this_cpu_ptr(&radix_tree_preloads);
|
||||||
while (rtp->nr < nr) {
|
while (rtp->nr < nr) {
|
||||||
|
|
Loading…
Reference in New Issue