mm/sl[aou]b: Do slab aliasing call from common code

The slab aliasing logic causes some strange contortions in slub. So add
a call to deal with aliases to slab_common.c but disable it for other
slab allocators by providng stubs that fail to create aliases.

Full general support for aliases will require additional cleanup passes
and more standardization of fields in kmem_cache.

Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
This commit is contained in:
Christoph Lameter 2012-09-05 00:18:32 +00:00 committed by Pekka Enberg
parent db265eca77
commit cbb79694d5
3 changed files with 25 additions and 4 deletions

View File

@ -36,6 +36,16 @@ extern struct kmem_cache *kmem_cache;
struct kmem_cache *__kmem_cache_create(const char *name, size_t size, struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
size_t align, unsigned long flags, void (*ctor)(void *)); size_t align, unsigned long flags, void (*ctor)(void *));
#ifdef CONFIG_SLUB
struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
size_t align, unsigned long flags, void (*ctor)(void *));
#else
static inline struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
size_t align, unsigned long flags, void (*ctor)(void *))
{ return NULL; }
#endif
int __kmem_cache_shutdown(struct kmem_cache *); int __kmem_cache_shutdown(struct kmem_cache *);
#endif #endif

View File

@ -115,6 +115,10 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
goto out_locked; goto out_locked;
} }
s = __kmem_cache_alias(name, size, align, flags, ctor);
if (s)
goto out_locked;
s = __kmem_cache_create(n, size, align, flags, ctor); s = __kmem_cache_create(n, size, align, flags, ctor);
if (s) { if (s) {

View File

@ -3708,7 +3708,7 @@ void __init kmem_cache_init(void)
slub_max_order = 0; slub_max_order = 0;
kmem_size = offsetof(struct kmem_cache, node) + kmem_size = offsetof(struct kmem_cache, node) +
nr_node_ids * sizeof(struct kmem_cache_node *); nr_node_ids * sizeof(struct kmem_cache_node *);
/* Allocate two kmem_caches from the page allocator */ /* Allocate two kmem_caches from the page allocator */
kmalloc_size = ALIGN(kmem_size, cache_line_size()); kmalloc_size = ALIGN(kmem_size, cache_line_size());
@ -3922,7 +3922,7 @@ static struct kmem_cache *find_mergeable(size_t size,
return NULL; return NULL;
} }
struct kmem_cache *__kmem_cache_create(const char *name, size_t size, struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
size_t align, unsigned long flags, void (*ctor)(void *)) size_t align, unsigned long flags, void (*ctor)(void *))
{ {
struct kmem_cache *s; struct kmem_cache *s;
@ -3939,11 +3939,18 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
if (sysfs_slab_alias(s, name)) { if (sysfs_slab_alias(s, name)) {
s->refcount--; s->refcount--;
return NULL; s = NULL;
} }
return s;
} }
return s;
}
struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
size_t align, unsigned long flags, void (*ctor)(void *))
{
struct kmem_cache *s;
s = kmem_cache_alloc(kmem_cache, GFP_KERNEL); s = kmem_cache_alloc(kmem_cache, GFP_KERNEL);
if (s) { if (s) {
if (kmem_cache_open(s, name, if (kmem_cache_open(s, name,