memcg, slab: do not destroy children caches if parent has aliases
Currently we destroy children caches at the very beginning of kmem_cache_destroy(). This is wrong, because the root cache will not necessarily be destroyed in the end - if it has aliases (refcount > 0), kmem_cache_destroy() will simply decrement its refcount and return. In this case, at best we will get a bunch of warnings in dmesg, like this one: kmem_cache_destroy kmalloc-32:0: Slab cache still has objects CPU: 1 PID: 7139 Comm: modprobe Tainted: G B W 3.13.0+ #117 Call Trace: dump_stack+0x49/0x5b kmem_cache_destroy+0xdf/0xf0 kmem_cache_destroy_memcg_children+0x97/0xc0 kmem_cache_destroy+0xf/0xf0 xfs_mru_cache_uninit+0x21/0x30 [xfs] exit_xfs_fs+0x2e/0xc44 [xfs] SyS_delete_module+0x198/0x1f0 system_call_fastpath+0x16/0x1b At worst - if kmem_cache_destroy() will race with an allocation from a memcg cache - the kernel will panic. This patch fixes this by moving children caches destruction after the check if the cache has aliases. Plus, it forbids destroying a root cache if it still has children caches, because each children cache keeps a reference to its parent. Signed-off-by: Vladimir Davydov <vdavydov@parallels.com> Cc: Michal Hocko <mhocko@suse.cz> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: David Rientjes <rientjes@google.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: Glauber Costa <glommer@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
051dd46050
commit
b8529907ba
|
@ -507,7 +507,7 @@ struct kmem_cache *
|
||||||
__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp);
|
__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp);
|
||||||
|
|
||||||
void mem_cgroup_destroy_cache(struct kmem_cache *cachep);
|
void mem_cgroup_destroy_cache(struct kmem_cache *cachep);
|
||||||
void kmem_cache_destroy_memcg_children(struct kmem_cache *s);
|
int __kmem_cache_destroy_memcg_children(struct kmem_cache *s);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed.
|
* memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed.
|
||||||
|
@ -661,10 +661,6 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
|
||||||
{
|
{
|
||||||
return cachep;
|
return cachep;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void kmem_cache_destroy_memcg_children(struct kmem_cache *s)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
#endif /* CONFIG_MEMCG_KMEM */
|
#endif /* CONFIG_MEMCG_KMEM */
|
||||||
#endif /* _LINUX_MEMCONTROL_H */
|
#endif /* _LINUX_MEMCONTROL_H */
|
||||||
|
|
||||||
|
|
|
@ -3321,15 +3321,10 @@ void mem_cgroup_destroy_cache(struct kmem_cache *cachep)
|
||||||
schedule_work(&cachep->memcg_params->destroy);
|
schedule_work(&cachep->memcg_params->destroy);
|
||||||
}
|
}
|
||||||
|
|
||||||
void kmem_cache_destroy_memcg_children(struct kmem_cache *s)
|
int __kmem_cache_destroy_memcg_children(struct kmem_cache *s)
|
||||||
{
|
{
|
||||||
struct kmem_cache *c;
|
struct kmem_cache *c;
|
||||||
int i;
|
int i, failed = 0;
|
||||||
|
|
||||||
if (!s->memcg_params)
|
|
||||||
return;
|
|
||||||
if (!s->memcg_params->is_root_cache)
|
|
||||||
return;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the cache is being destroyed, we trust that there is no one else
|
* If the cache is being destroyed, we trust that there is no one else
|
||||||
|
@ -3363,8 +3358,12 @@ void kmem_cache_destroy_memcg_children(struct kmem_cache *s)
|
||||||
c->memcg_params->dead = false;
|
c->memcg_params->dead = false;
|
||||||
cancel_work_sync(&c->memcg_params->destroy);
|
cancel_work_sync(&c->memcg_params->destroy);
|
||||||
kmem_cache_destroy(c);
|
kmem_cache_destroy(c);
|
||||||
|
|
||||||
|
if (cache_from_memcg_idx(s, i))
|
||||||
|
failed++;
|
||||||
}
|
}
|
||||||
mutex_unlock(&activate_kmem_mutex);
|
mutex_unlock(&activate_kmem_mutex);
|
||||||
|
return failed;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg)
|
static void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg)
|
||||||
|
|
|
@ -301,39 +301,64 @@ out_unlock:
|
||||||
mutex_unlock(&slab_mutex);
|
mutex_unlock(&slab_mutex);
|
||||||
put_online_cpus();
|
put_online_cpus();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int kmem_cache_destroy_memcg_children(struct kmem_cache *s)
|
||||||
|
{
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
if (!s->memcg_params ||
|
||||||
|
!s->memcg_params->is_root_cache)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
mutex_unlock(&slab_mutex);
|
||||||
|
rc = __kmem_cache_destroy_memcg_children(s);
|
||||||
|
mutex_lock(&slab_mutex);
|
||||||
|
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static int kmem_cache_destroy_memcg_children(struct kmem_cache *s)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
#endif /* CONFIG_MEMCG_KMEM */
|
#endif /* CONFIG_MEMCG_KMEM */
|
||||||
|
|
||||||
void kmem_cache_destroy(struct kmem_cache *s)
|
void kmem_cache_destroy(struct kmem_cache *s)
|
||||||
{
|
{
|
||||||
/* Destroy all the children caches if we aren't a memcg cache */
|
|
||||||
kmem_cache_destroy_memcg_children(s);
|
|
||||||
|
|
||||||
get_online_cpus();
|
get_online_cpus();
|
||||||
mutex_lock(&slab_mutex);
|
mutex_lock(&slab_mutex);
|
||||||
|
|
||||||
s->refcount--;
|
s->refcount--;
|
||||||
if (!s->refcount) {
|
if (s->refcount)
|
||||||
list_del(&s->list);
|
goto out_unlock;
|
||||||
memcg_unregister_cache(s);
|
|
||||||
|
|
||||||
if (!__kmem_cache_shutdown(s)) {
|
if (kmem_cache_destroy_memcg_children(s) != 0)
|
||||||
mutex_unlock(&slab_mutex);
|
goto out_unlock;
|
||||||
if (s->flags & SLAB_DESTROY_BY_RCU)
|
|
||||||
rcu_barrier();
|
|
||||||
|
|
||||||
memcg_free_cache_params(s);
|
list_del(&s->list);
|
||||||
kfree(s->name);
|
memcg_unregister_cache(s);
|
||||||
kmem_cache_free(kmem_cache, s);
|
|
||||||
} else {
|
if (__kmem_cache_shutdown(s) != 0) {
|
||||||
list_add(&s->list, &slab_caches);
|
list_add(&s->list, &slab_caches);
|
||||||
memcg_register_cache(s);
|
memcg_register_cache(s);
|
||||||
mutex_unlock(&slab_mutex);
|
printk(KERN_ERR "kmem_cache_destroy %s: "
|
||||||
printk(KERN_ERR "kmem_cache_destroy %s: Slab cache still has objects\n",
|
"Slab cache still has objects\n", s->name);
|
||||||
s->name);
|
dump_stack();
|
||||||
dump_stack();
|
goto out_unlock;
|
||||||
}
|
|
||||||
} else {
|
|
||||||
mutex_unlock(&slab_mutex);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mutex_unlock(&slab_mutex);
|
||||||
|
if (s->flags & SLAB_DESTROY_BY_RCU)
|
||||||
|
rcu_barrier();
|
||||||
|
|
||||||
|
memcg_free_cache_params(s);
|
||||||
|
kfree(s->name);
|
||||||
|
kmem_cache_free(kmem_cache, s);
|
||||||
|
goto out_put_cpus;
|
||||||
|
|
||||||
|
out_unlock:
|
||||||
|
mutex_unlock(&slab_mutex);
|
||||||
|
out_put_cpus:
|
||||||
put_online_cpus();
|
put_online_cpus();
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(kmem_cache_destroy);
|
EXPORT_SYMBOL(kmem_cache_destroy);
|
||||||
|
|
Loading…
Reference in New Issue