Merge branch 'slab/common-for-cgroups' into slab/for-linus
Fix up a trivial conflict with NUMA_NO_NODE cleanups. Conflicts: mm/slob.c Signed-off-by: Pekka Enberg <penberg@kernel.org>
This commit is contained in:
commit
f4178cdddd
255
mm/slab.c
255
mm/slab.c
|
@ -570,9 +570,9 @@ static struct arraycache_init initarray_generic =
|
||||||
{ {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
|
{ {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
|
||||||
|
|
||||||
/* internal cache of cache description objs */
|
/* internal cache of cache description objs */
|
||||||
static struct kmem_list3 *cache_cache_nodelists[MAX_NUMNODES];
|
static struct kmem_list3 *kmem_cache_nodelists[MAX_NUMNODES];
|
||||||
static struct kmem_cache cache_cache = {
|
static struct kmem_cache kmem_cache_boot = {
|
||||||
.nodelists = cache_cache_nodelists,
|
.nodelists = kmem_cache_nodelists,
|
||||||
.batchcount = 1,
|
.batchcount = 1,
|
||||||
.limit = BOOT_CPUCACHE_ENTRIES,
|
.limit = BOOT_CPUCACHE_ENTRIES,
|
||||||
.shared = 1,
|
.shared = 1,
|
||||||
|
@ -795,6 +795,7 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
|
||||||
*left_over = slab_size - nr_objs*buffer_size - mgmt_size;
|
*left_over = slab_size - nr_objs*buffer_size - mgmt_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if DEBUG
|
||||||
#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
|
#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
|
||||||
|
|
||||||
static void __slab_error(const char *function, struct kmem_cache *cachep,
|
static void __slab_error(const char *function, struct kmem_cache *cachep,
|
||||||
|
@ -805,6 +806,7 @@ static void __slab_error(const char *function, struct kmem_cache *cachep,
|
||||||
dump_stack();
|
dump_stack();
|
||||||
add_taint(TAINT_BAD_PAGE);
|
add_taint(TAINT_BAD_PAGE);
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* By default on NUMA we use alien caches to stage the freeing of
|
* By default on NUMA we use alien caches to stage the freeing of
|
||||||
|
@ -1587,15 +1589,17 @@ void __init kmem_cache_init(void)
|
||||||
int order;
|
int order;
|
||||||
int node;
|
int node;
|
||||||
|
|
||||||
|
kmem_cache = &kmem_cache_boot;
|
||||||
|
|
||||||
if (num_possible_nodes() == 1)
|
if (num_possible_nodes() == 1)
|
||||||
use_alien_caches = 0;
|
use_alien_caches = 0;
|
||||||
|
|
||||||
for (i = 0; i < NUM_INIT_LISTS; i++) {
|
for (i = 0; i < NUM_INIT_LISTS; i++) {
|
||||||
kmem_list3_init(&initkmem_list3[i]);
|
kmem_list3_init(&initkmem_list3[i]);
|
||||||
if (i < MAX_NUMNODES)
|
if (i < MAX_NUMNODES)
|
||||||
cache_cache.nodelists[i] = NULL;
|
kmem_cache->nodelists[i] = NULL;
|
||||||
}
|
}
|
||||||
set_up_list3s(&cache_cache, CACHE_CACHE);
|
set_up_list3s(kmem_cache, CACHE_CACHE);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Fragmentation resistance on low memory - only use bigger
|
* Fragmentation resistance on low memory - only use bigger
|
||||||
|
@ -1607,9 +1611,9 @@ void __init kmem_cache_init(void)
|
||||||
|
|
||||||
/* Bootstrap is tricky, because several objects are allocated
|
/* Bootstrap is tricky, because several objects are allocated
|
||||||
* from caches that do not exist yet:
|
* from caches that do not exist yet:
|
||||||
* 1) initialize the cache_cache cache: it contains the struct
|
* 1) initialize the kmem_cache cache: it contains the struct
|
||||||
* kmem_cache structures of all caches, except cache_cache itself:
|
* kmem_cache structures of all caches, except kmem_cache itself:
|
||||||
* cache_cache is statically allocated.
|
* kmem_cache is statically allocated.
|
||||||
* Initially an __init data area is used for the head array and the
|
* Initially an __init data area is used for the head array and the
|
||||||
* kmem_list3 structures, it's replaced with a kmalloc allocated
|
* kmem_list3 structures, it's replaced with a kmalloc allocated
|
||||||
* array at the end of the bootstrap.
|
* array at the end of the bootstrap.
|
||||||
|
@ -1618,43 +1622,43 @@ void __init kmem_cache_init(void)
|
||||||
* An __init data area is used for the head array.
|
* An __init data area is used for the head array.
|
||||||
* 3) Create the remaining kmalloc caches, with minimally sized
|
* 3) Create the remaining kmalloc caches, with minimally sized
|
||||||
* head arrays.
|
* head arrays.
|
||||||
* 4) Replace the __init data head arrays for cache_cache and the first
|
* 4) Replace the __init data head arrays for kmem_cache and the first
|
||||||
* kmalloc cache with kmalloc allocated arrays.
|
* kmalloc cache with kmalloc allocated arrays.
|
||||||
* 5) Replace the __init data for kmem_list3 for cache_cache and
|
* 5) Replace the __init data for kmem_list3 for kmem_cache and
|
||||||
* the other cache's with kmalloc allocated memory.
|
* the other cache's with kmalloc allocated memory.
|
||||||
* 6) Resize the head arrays of the kmalloc caches to their final sizes.
|
* 6) Resize the head arrays of the kmalloc caches to their final sizes.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
node = numa_mem_id();
|
node = numa_mem_id();
|
||||||
|
|
||||||
/* 1) create the cache_cache */
|
/* 1) create the kmem_cache */
|
||||||
INIT_LIST_HEAD(&slab_caches);
|
INIT_LIST_HEAD(&slab_caches);
|
||||||
list_add(&cache_cache.list, &slab_caches);
|
list_add(&kmem_cache->list, &slab_caches);
|
||||||
cache_cache.colour_off = cache_line_size();
|
kmem_cache->colour_off = cache_line_size();
|
||||||
cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
|
kmem_cache->array[smp_processor_id()] = &initarray_cache.cache;
|
||||||
cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
|
kmem_cache->nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
|
* struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
|
||||||
*/
|
*/
|
||||||
cache_cache.size = offsetof(struct kmem_cache, array[nr_cpu_ids]) +
|
kmem_cache->size = offsetof(struct kmem_cache, array[nr_cpu_ids]) +
|
||||||
nr_node_ids * sizeof(struct kmem_list3 *);
|
nr_node_ids * sizeof(struct kmem_list3 *);
|
||||||
cache_cache.object_size = cache_cache.size;
|
kmem_cache->object_size = kmem_cache->size;
|
||||||
cache_cache.size = ALIGN(cache_cache.size,
|
kmem_cache->size = ALIGN(kmem_cache->object_size,
|
||||||
cache_line_size());
|
cache_line_size());
|
||||||
cache_cache.reciprocal_buffer_size =
|
kmem_cache->reciprocal_buffer_size =
|
||||||
reciprocal_value(cache_cache.size);
|
reciprocal_value(kmem_cache->size);
|
||||||
|
|
||||||
for (order = 0; order < MAX_ORDER; order++) {
|
for (order = 0; order < MAX_ORDER; order++) {
|
||||||
cache_estimate(order, cache_cache.size,
|
cache_estimate(order, kmem_cache->size,
|
||||||
cache_line_size(), 0, &left_over, &cache_cache.num);
|
cache_line_size(), 0, &left_over, &kmem_cache->num);
|
||||||
if (cache_cache.num)
|
if (kmem_cache->num)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
BUG_ON(!cache_cache.num);
|
BUG_ON(!kmem_cache->num);
|
||||||
cache_cache.gfporder = order;
|
kmem_cache->gfporder = order;
|
||||||
cache_cache.colour = left_over / cache_cache.colour_off;
|
kmem_cache->colour = left_over / kmem_cache->colour_off;
|
||||||
cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
|
kmem_cache->slab_size = ALIGN(kmem_cache->num * sizeof(kmem_bufctl_t) +
|
||||||
sizeof(struct slab), cache_line_size());
|
sizeof(struct slab), cache_line_size());
|
||||||
|
|
||||||
/* 2+3) create the kmalloc caches */
|
/* 2+3) create the kmalloc caches */
|
||||||
|
@ -1667,19 +1671,22 @@ void __init kmem_cache_init(void)
|
||||||
* bug.
|
* bug.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
sizes[INDEX_AC].cs_cachep = __kmem_cache_create(names[INDEX_AC].name,
|
sizes[INDEX_AC].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
|
||||||
sizes[INDEX_AC].cs_size,
|
sizes[INDEX_AC].cs_cachep->name = names[INDEX_AC].name;
|
||||||
ARCH_KMALLOC_MINALIGN,
|
sizes[INDEX_AC].cs_cachep->size = sizes[INDEX_AC].cs_size;
|
||||||
ARCH_KMALLOC_FLAGS|SLAB_PANIC,
|
sizes[INDEX_AC].cs_cachep->object_size = sizes[INDEX_AC].cs_size;
|
||||||
NULL);
|
sizes[INDEX_AC].cs_cachep->align = ARCH_KMALLOC_MINALIGN;
|
||||||
|
__kmem_cache_create(sizes[INDEX_AC].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
|
||||||
|
list_add(&sizes[INDEX_AC].cs_cachep->list, &slab_caches);
|
||||||
|
|
||||||
if (INDEX_AC != INDEX_L3) {
|
if (INDEX_AC != INDEX_L3) {
|
||||||
sizes[INDEX_L3].cs_cachep =
|
sizes[INDEX_L3].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
|
||||||
__kmem_cache_create(names[INDEX_L3].name,
|
sizes[INDEX_L3].cs_cachep->name = names[INDEX_L3].name;
|
||||||
sizes[INDEX_L3].cs_size,
|
sizes[INDEX_L3].cs_cachep->size = sizes[INDEX_L3].cs_size;
|
||||||
ARCH_KMALLOC_MINALIGN,
|
sizes[INDEX_L3].cs_cachep->object_size = sizes[INDEX_L3].cs_size;
|
||||||
ARCH_KMALLOC_FLAGS|SLAB_PANIC,
|
sizes[INDEX_L3].cs_cachep->align = ARCH_KMALLOC_MINALIGN;
|
||||||
NULL);
|
__kmem_cache_create(sizes[INDEX_L3].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
|
||||||
|
list_add(&sizes[INDEX_L3].cs_cachep->list, &slab_caches);
|
||||||
}
|
}
|
||||||
|
|
||||||
slab_early_init = 0;
|
slab_early_init = 0;
|
||||||
|
@ -1693,20 +1700,23 @@ void __init kmem_cache_init(void)
|
||||||
* allow tighter packing of the smaller caches.
|
* allow tighter packing of the smaller caches.
|
||||||
*/
|
*/
|
||||||
if (!sizes->cs_cachep) {
|
if (!sizes->cs_cachep) {
|
||||||
sizes->cs_cachep = __kmem_cache_create(names->name,
|
sizes->cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
|
||||||
sizes->cs_size,
|
sizes->cs_cachep->name = names->name;
|
||||||
ARCH_KMALLOC_MINALIGN,
|
sizes->cs_cachep->size = sizes->cs_size;
|
||||||
ARCH_KMALLOC_FLAGS|SLAB_PANIC,
|
sizes->cs_cachep->object_size = sizes->cs_size;
|
||||||
NULL);
|
sizes->cs_cachep->align = ARCH_KMALLOC_MINALIGN;
|
||||||
|
__kmem_cache_create(sizes->cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
|
||||||
|
list_add(&sizes->cs_cachep->list, &slab_caches);
|
||||||
}
|
}
|
||||||
#ifdef CONFIG_ZONE_DMA
|
#ifdef CONFIG_ZONE_DMA
|
||||||
sizes->cs_dmacachep = __kmem_cache_create(
|
sizes->cs_dmacachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
|
||||||
names->name_dma,
|
sizes->cs_dmacachep->name = names->name_dma;
|
||||||
sizes->cs_size,
|
sizes->cs_dmacachep->size = sizes->cs_size;
|
||||||
ARCH_KMALLOC_MINALIGN,
|
sizes->cs_dmacachep->object_size = sizes->cs_size;
|
||||||
ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA|
|
sizes->cs_dmacachep->align = ARCH_KMALLOC_MINALIGN;
|
||||||
SLAB_PANIC,
|
__kmem_cache_create(sizes->cs_dmacachep,
|
||||||
NULL);
|
ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| SLAB_PANIC);
|
||||||
|
list_add(&sizes->cs_dmacachep->list, &slab_caches);
|
||||||
#endif
|
#endif
|
||||||
sizes++;
|
sizes++;
|
||||||
names++;
|
names++;
|
||||||
|
@ -1717,15 +1727,15 @@ void __init kmem_cache_init(void)
|
||||||
|
|
||||||
ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
|
ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
|
||||||
|
|
||||||
BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache);
|
BUG_ON(cpu_cache_get(kmem_cache) != &initarray_cache.cache);
|
||||||
memcpy(ptr, cpu_cache_get(&cache_cache),
|
memcpy(ptr, cpu_cache_get(kmem_cache),
|
||||||
sizeof(struct arraycache_init));
|
sizeof(struct arraycache_init));
|
||||||
/*
|
/*
|
||||||
* Do not assume that spinlocks can be initialized via memcpy:
|
* Do not assume that spinlocks can be initialized via memcpy:
|
||||||
*/
|
*/
|
||||||
spin_lock_init(&ptr->lock);
|
spin_lock_init(&ptr->lock);
|
||||||
|
|
||||||
cache_cache.array[smp_processor_id()] = ptr;
|
kmem_cache->array[smp_processor_id()] = ptr;
|
||||||
|
|
||||||
ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
|
ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
|
||||||
|
|
||||||
|
@ -1746,7 +1756,7 @@ void __init kmem_cache_init(void)
|
||||||
int nid;
|
int nid;
|
||||||
|
|
||||||
for_each_online_node(nid) {
|
for_each_online_node(nid) {
|
||||||
init_list(&cache_cache, &initkmem_list3[CACHE_CACHE + nid], nid);
|
init_list(kmem_cache, &initkmem_list3[CACHE_CACHE + nid], nid);
|
||||||
|
|
||||||
init_list(malloc_sizes[INDEX_AC].cs_cachep,
|
init_list(malloc_sizes[INDEX_AC].cs_cachep,
|
||||||
&initkmem_list3[SIZE_AC + nid], nid);
|
&initkmem_list3[SIZE_AC + nid], nid);
|
||||||
|
@ -2195,27 +2205,6 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __kmem_cache_destroy(struct kmem_cache *cachep)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
struct kmem_list3 *l3;
|
|
||||||
|
|
||||||
for_each_online_cpu(i)
|
|
||||||
kfree(cachep->array[i]);
|
|
||||||
|
|
||||||
/* NUMA: free the list3 structures */
|
|
||||||
for_each_online_node(i) {
|
|
||||||
l3 = cachep->nodelists[i];
|
|
||||||
if (l3) {
|
|
||||||
kfree(l3->shared);
|
|
||||||
free_alien_cache(l3->alien);
|
|
||||||
kfree(l3);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
kmem_cache_free(&cache_cache, cachep);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* calculate_slab_order - calculate size (page order) of slabs
|
* calculate_slab_order - calculate size (page order) of slabs
|
||||||
* @cachep: pointer to the cache that is being created
|
* @cachep: pointer to the cache that is being created
|
||||||
|
@ -2352,9 +2341,6 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
|
||||||
* Cannot be called within a int, but can be interrupted.
|
* Cannot be called within a int, but can be interrupted.
|
||||||
* The @ctor is run when new pages are allocated by the cache.
|
* The @ctor is run when new pages are allocated by the cache.
|
||||||
*
|
*
|
||||||
* @name must be valid until the cache is destroyed. This implies that
|
|
||||||
* the module calling this has to destroy the cache before getting unloaded.
|
|
||||||
*
|
|
||||||
* The flags are
|
* The flags are
|
||||||
*
|
*
|
||||||
* %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
|
* %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
|
||||||
|
@ -2367,13 +2353,13 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
|
||||||
* cacheline. This can be beneficial if you're counting cycles as closely
|
* cacheline. This can be beneficial if you're counting cycles as closely
|
||||||
* as davem.
|
* as davem.
|
||||||
*/
|
*/
|
||||||
struct kmem_cache *
|
int
|
||||||
__kmem_cache_create (const char *name, size_t size, size_t align,
|
__kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
|
||||||
unsigned long flags, void (*ctor)(void *))
|
|
||||||
{
|
{
|
||||||
size_t left_over, slab_size, ralign;
|
size_t left_over, slab_size, ralign;
|
||||||
struct kmem_cache *cachep = NULL;
|
|
||||||
gfp_t gfp;
|
gfp_t gfp;
|
||||||
|
int err;
|
||||||
|
size_t size = cachep->size;
|
||||||
|
|
||||||
#if DEBUG
|
#if DEBUG
|
||||||
#if FORCED_DEBUG
|
#if FORCED_DEBUG
|
||||||
|
@ -2445,8 +2431,8 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
|
||||||
ralign = ARCH_SLAB_MINALIGN;
|
ralign = ARCH_SLAB_MINALIGN;
|
||||||
}
|
}
|
||||||
/* 3) caller mandated alignment */
|
/* 3) caller mandated alignment */
|
||||||
if (ralign < align) {
|
if (ralign < cachep->align) {
|
||||||
ralign = align;
|
ralign = cachep->align;
|
||||||
}
|
}
|
||||||
/* disable debug if necessary */
|
/* disable debug if necessary */
|
||||||
if (ralign > __alignof__(unsigned long long))
|
if (ralign > __alignof__(unsigned long long))
|
||||||
|
@ -2454,21 +2440,14 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
|
||||||
/*
|
/*
|
||||||
* 4) Store it.
|
* 4) Store it.
|
||||||
*/
|
*/
|
||||||
align = ralign;
|
cachep->align = ralign;
|
||||||
|
|
||||||
if (slab_is_available())
|
if (slab_is_available())
|
||||||
gfp = GFP_KERNEL;
|
gfp = GFP_KERNEL;
|
||||||
else
|
else
|
||||||
gfp = GFP_NOWAIT;
|
gfp = GFP_NOWAIT;
|
||||||
|
|
||||||
/* Get cache's description obj. */
|
|
||||||
cachep = kmem_cache_zalloc(&cache_cache, gfp);
|
|
||||||
if (!cachep)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids];
|
cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids];
|
||||||
cachep->object_size = size;
|
|
||||||
cachep->align = align;
|
|
||||||
#if DEBUG
|
#if DEBUG
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2514,18 +2493,15 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
|
||||||
*/
|
*/
|
||||||
flags |= CFLGS_OFF_SLAB;
|
flags |= CFLGS_OFF_SLAB;
|
||||||
|
|
||||||
size = ALIGN(size, align);
|
size = ALIGN(size, cachep->align);
|
||||||
|
|
||||||
left_over = calculate_slab_order(cachep, size, align, flags);
|
left_over = calculate_slab_order(cachep, size, cachep->align, flags);
|
||||||
|
|
||||||
|
if (!cachep->num)
|
||||||
|
return -E2BIG;
|
||||||
|
|
||||||
if (!cachep->num) {
|
|
||||||
printk(KERN_ERR
|
|
||||||
"kmem_cache_create: couldn't create cache %s.\n", name);
|
|
||||||
kmem_cache_free(&cache_cache, cachep);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
|
slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
|
||||||
+ sizeof(struct slab), align);
|
+ sizeof(struct slab), cachep->align);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the slab has been placed off-slab, and we have enough space then
|
* If the slab has been placed off-slab, and we have enough space then
|
||||||
|
@ -2553,8 +2529,8 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
|
||||||
|
|
||||||
cachep->colour_off = cache_line_size();
|
cachep->colour_off = cache_line_size();
|
||||||
/* Offset must be a multiple of the alignment. */
|
/* Offset must be a multiple of the alignment. */
|
||||||
if (cachep->colour_off < align)
|
if (cachep->colour_off < cachep->align)
|
||||||
cachep->colour_off = align;
|
cachep->colour_off = cachep->align;
|
||||||
cachep->colour = left_over / cachep->colour_off;
|
cachep->colour = left_over / cachep->colour_off;
|
||||||
cachep->slab_size = slab_size;
|
cachep->slab_size = slab_size;
|
||||||
cachep->flags = flags;
|
cachep->flags = flags;
|
||||||
|
@ -2575,12 +2551,11 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
|
||||||
*/
|
*/
|
||||||
BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache));
|
BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache));
|
||||||
}
|
}
|
||||||
cachep->ctor = ctor;
|
|
||||||
cachep->name = name;
|
|
||||||
|
|
||||||
if (setup_cpu_cache(cachep, gfp)) {
|
err = setup_cpu_cache(cachep, gfp);
|
||||||
__kmem_cache_destroy(cachep);
|
if (err) {
|
||||||
return NULL;
|
__kmem_cache_shutdown(cachep);
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (flags & SLAB_DEBUG_OBJECTS) {
|
if (flags & SLAB_DEBUG_OBJECTS) {
|
||||||
|
@ -2593,9 +2568,7 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
|
||||||
slab_set_debugobj_lock_classes(cachep);
|
slab_set_debugobj_lock_classes(cachep);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* cache setup completed, link it into the list */
|
return 0;
|
||||||
list_add(&cachep->list, &slab_caches);
|
|
||||||
return cachep;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#if DEBUG
|
#if DEBUG
|
||||||
|
@ -2754,49 +2727,29 @@ int kmem_cache_shrink(struct kmem_cache *cachep)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(kmem_cache_shrink);
|
EXPORT_SYMBOL(kmem_cache_shrink);
|
||||||
|
|
||||||
/**
|
int __kmem_cache_shutdown(struct kmem_cache *cachep)
|
||||||
* kmem_cache_destroy - delete a cache
|
|
||||||
* @cachep: the cache to destroy
|
|
||||||
*
|
|
||||||
* Remove a &struct kmem_cache object from the slab cache.
|
|
||||||
*
|
|
||||||
* It is expected this function will be called by a module when it is
|
|
||||||
* unloaded. This will remove the cache completely, and avoid a duplicate
|
|
||||||
* cache being allocated each time a module is loaded and unloaded, if the
|
|
||||||
* module doesn't have persistent in-kernel storage across loads and unloads.
|
|
||||||
*
|
|
||||||
* The cache must be empty before calling this function.
|
|
||||||
*
|
|
||||||
* The caller must guarantee that no one will allocate memory from the cache
|
|
||||||
* during the kmem_cache_destroy().
|
|
||||||
*/
|
|
||||||
void kmem_cache_destroy(struct kmem_cache *cachep)
|
|
||||||
{
|
{
|
||||||
BUG_ON(!cachep || in_interrupt());
|
int i;
|
||||||
|
struct kmem_list3 *l3;
|
||||||
|
int rc = __cache_shrink(cachep);
|
||||||
|
|
||||||
/* Find the cache in the chain of caches. */
|
if (rc)
|
||||||
get_online_cpus();
|
return rc;
|
||||||
mutex_lock(&slab_mutex);
|
|
||||||
/*
|
for_each_online_cpu(i)
|
||||||
* the chain is never empty, cache_cache is never destroyed
|
kfree(cachep->array[i]);
|
||||||
*/
|
|
||||||
list_del(&cachep->list);
|
/* NUMA: free the list3 structures */
|
||||||
if (__cache_shrink(cachep)) {
|
for_each_online_node(i) {
|
||||||
slab_error(cachep, "Can't free all objects");
|
l3 = cachep->nodelists[i];
|
||||||
list_add(&cachep->list, &slab_caches);
|
if (l3) {
|
||||||
mutex_unlock(&slab_mutex);
|
kfree(l3->shared);
|
||||||
put_online_cpus();
|
free_alien_cache(l3->alien);
|
||||||
return;
|
kfree(l3);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
return 0;
|
||||||
if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
|
|
||||||
rcu_barrier();
|
|
||||||
|
|
||||||
__kmem_cache_destroy(cachep);
|
|
||||||
mutex_unlock(&slab_mutex);
|
|
||||||
put_online_cpus();
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(kmem_cache_destroy);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Get the memory for a slab management obj.
|
* Get the memory for a slab management obj.
|
||||||
|
@ -3330,7 +3283,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
|
||||||
|
|
||||||
static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags)
|
static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags)
|
||||||
{
|
{
|
||||||
if (cachep == &cache_cache)
|
if (cachep == kmem_cache)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
return should_failslab(cachep->object_size, flags, cachep->flags);
|
return should_failslab(cachep->object_size, flags, cachep->flags);
|
||||||
|
|
19
mm/slab.h
19
mm/slab.h
|
@ -25,9 +25,26 @@ extern enum slab_state slab_state;
|
||||||
|
|
||||||
/* The slab cache mutex protects the management structures during changes */
|
/* The slab cache mutex protects the management structures during changes */
|
||||||
extern struct mutex slab_mutex;
|
extern struct mutex slab_mutex;
|
||||||
|
|
||||||
|
/* The list of all slab caches on the system */
|
||||||
extern struct list_head slab_caches;
|
extern struct list_head slab_caches;
|
||||||
|
|
||||||
struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
|
/* The slab cache that manages slab cache information */
|
||||||
|
extern struct kmem_cache *kmem_cache;
|
||||||
|
|
||||||
|
/* Functions provided by the slab allocators */
|
||||||
|
extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
|
||||||
|
|
||||||
|
#ifdef CONFIG_SLUB
|
||||||
|
struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
|
||||||
size_t align, unsigned long flags, void (*ctor)(void *));
|
size_t align, unsigned long flags, void (*ctor)(void *));
|
||||||
|
#else
|
||||||
|
static inline struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
|
||||||
|
size_t align, unsigned long flags, void (*ctor)(void *))
|
||||||
|
{ return NULL; }
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
int __kmem_cache_shutdown(struct kmem_cache *);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -22,6 +22,7 @@
|
||||||
enum slab_state slab_state;
|
enum slab_state slab_state;
|
||||||
LIST_HEAD(slab_caches);
|
LIST_HEAD(slab_caches);
|
||||||
DEFINE_MUTEX(slab_mutex);
|
DEFINE_MUTEX(slab_mutex);
|
||||||
|
struct kmem_cache *kmem_cache;
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_VM
|
#ifdef CONFIG_DEBUG_VM
|
||||||
static int kmem_cache_sanity_check(const char *name, size_t size)
|
static int kmem_cache_sanity_check(const char *name, size_t size)
|
||||||
|
@ -98,21 +99,92 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
|
||||||
unsigned long flags, void (*ctor)(void *))
|
unsigned long flags, void (*ctor)(void *))
|
||||||
{
|
{
|
||||||
struct kmem_cache *s = NULL;
|
struct kmem_cache *s = NULL;
|
||||||
|
int err = 0;
|
||||||
|
|
||||||
get_online_cpus();
|
get_online_cpus();
|
||||||
mutex_lock(&slab_mutex);
|
mutex_lock(&slab_mutex);
|
||||||
if (kmem_cache_sanity_check(name, size) == 0)
|
|
||||||
s = __kmem_cache_create(name, size, align, flags, ctor);
|
if (!kmem_cache_sanity_check(name, size) == 0)
|
||||||
|
goto out_locked;
|
||||||
|
|
||||||
|
|
||||||
|
s = __kmem_cache_alias(name, size, align, flags, ctor);
|
||||||
|
if (s)
|
||||||
|
goto out_locked;
|
||||||
|
|
||||||
|
s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
|
||||||
|
if (s) {
|
||||||
|
s->object_size = s->size = size;
|
||||||
|
s->align = align;
|
||||||
|
s->ctor = ctor;
|
||||||
|
s->name = kstrdup(name, GFP_KERNEL);
|
||||||
|
if (!s->name) {
|
||||||
|
kmem_cache_free(kmem_cache, s);
|
||||||
|
err = -ENOMEM;
|
||||||
|
goto out_locked;
|
||||||
|
}
|
||||||
|
|
||||||
|
err = __kmem_cache_create(s, flags);
|
||||||
|
if (!err) {
|
||||||
|
|
||||||
|
s->refcount = 1;
|
||||||
|
list_add(&s->list, &slab_caches);
|
||||||
|
|
||||||
|
} else {
|
||||||
|
kfree(s->name);
|
||||||
|
kmem_cache_free(kmem_cache, s);
|
||||||
|
}
|
||||||
|
} else
|
||||||
|
err = -ENOMEM;
|
||||||
|
|
||||||
|
out_locked:
|
||||||
mutex_unlock(&slab_mutex);
|
mutex_unlock(&slab_mutex);
|
||||||
put_online_cpus();
|
put_online_cpus();
|
||||||
|
|
||||||
if (!s && (flags & SLAB_PANIC))
|
if (err) {
|
||||||
panic("kmem_cache_create: Failed to create slab '%s'\n", name);
|
|
||||||
|
if (flags & SLAB_PANIC)
|
||||||
|
panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
|
||||||
|
name, err);
|
||||||
|
else {
|
||||||
|
printk(KERN_WARNING "kmem_cache_create(%s) failed with error %d",
|
||||||
|
name, err);
|
||||||
|
dump_stack();
|
||||||
|
}
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(kmem_cache_create);
|
EXPORT_SYMBOL(kmem_cache_create);
|
||||||
|
|
||||||
|
void kmem_cache_destroy(struct kmem_cache *s)
|
||||||
|
{
|
||||||
|
get_online_cpus();
|
||||||
|
mutex_lock(&slab_mutex);
|
||||||
|
s->refcount--;
|
||||||
|
if (!s->refcount) {
|
||||||
|
list_del(&s->list);
|
||||||
|
|
||||||
|
if (!__kmem_cache_shutdown(s)) {
|
||||||
|
if (s->flags & SLAB_DESTROY_BY_RCU)
|
||||||
|
rcu_barrier();
|
||||||
|
|
||||||
|
kfree(s->name);
|
||||||
|
kmem_cache_free(kmem_cache, s);
|
||||||
|
} else {
|
||||||
|
list_add(&s->list, &slab_caches);
|
||||||
|
printk(KERN_ERR "kmem_cache_destroy %s: Slab cache still has objects\n",
|
||||||
|
s->name);
|
||||||
|
dump_stack();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
mutex_unlock(&slab_mutex);
|
||||||
|
put_online_cpus();
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(kmem_cache_destroy);
|
||||||
|
|
||||||
int slab_is_available(void)
|
int slab_is_available(void)
|
||||||
{
|
{
|
||||||
return slab_state >= UP;
|
return slab_state >= UP;
|
||||||
|
|
60
mm/slob.c
60
mm/slob.c
|
@ -529,44 +529,24 @@ size_t ksize(const void *block)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ksize);
|
EXPORT_SYMBOL(ksize);
|
||||||
|
|
||||||
struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
|
int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
|
||||||
size_t align, unsigned long flags, void (*ctor)(void *))
|
|
||||||
{
|
{
|
||||||
struct kmem_cache *c;
|
size_t align = c->size;
|
||||||
|
|
||||||
c = slob_alloc(sizeof(struct kmem_cache),
|
if (flags & SLAB_DESTROY_BY_RCU) {
|
||||||
GFP_KERNEL, ARCH_KMALLOC_MINALIGN, NUMA_NO_NODE);
|
/* leave room for rcu footer at the end of object */
|
||||||
|
c->size += sizeof(struct slob_rcu);
|
||||||
if (c) {
|
|
||||||
c->name = name;
|
|
||||||
c->size = size;
|
|
||||||
if (flags & SLAB_DESTROY_BY_RCU) {
|
|
||||||
/* leave room for rcu footer at the end of object */
|
|
||||||
c->size += sizeof(struct slob_rcu);
|
|
||||||
}
|
|
||||||
c->flags = flags;
|
|
||||||
c->ctor = ctor;
|
|
||||||
/* ignore alignment unless it's forced */
|
|
||||||
c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
|
|
||||||
if (c->align < ARCH_SLAB_MINALIGN)
|
|
||||||
c->align = ARCH_SLAB_MINALIGN;
|
|
||||||
if (c->align < align)
|
|
||||||
c->align = align;
|
|
||||||
|
|
||||||
kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL);
|
|
||||||
c->refcount = 1;
|
|
||||||
}
|
}
|
||||||
return c;
|
c->flags = flags;
|
||||||
}
|
/* ignore alignment unless it's forced */
|
||||||
|
c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
|
||||||
|
if (c->align < ARCH_SLAB_MINALIGN)
|
||||||
|
c->align = ARCH_SLAB_MINALIGN;
|
||||||
|
if (c->align < align)
|
||||||
|
c->align = align;
|
||||||
|
|
||||||
void kmem_cache_destroy(struct kmem_cache *c)
|
return 0;
|
||||||
{
|
|
||||||
kmemleak_free(c);
|
|
||||||
if (c->flags & SLAB_DESTROY_BY_RCU)
|
|
||||||
rcu_barrier();
|
|
||||||
slob_free(c, sizeof(struct kmem_cache));
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(kmem_cache_destroy);
|
|
||||||
|
|
||||||
void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
|
void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
|
||||||
{
|
{
|
||||||
|
@ -634,14 +614,28 @@ unsigned int kmem_cache_size(struct kmem_cache *c)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(kmem_cache_size);
|
EXPORT_SYMBOL(kmem_cache_size);
|
||||||
|
|
||||||
|
int __kmem_cache_shutdown(struct kmem_cache *c)
|
||||||
|
{
|
||||||
|
/* No way to check for remaining objects */
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
int kmem_cache_shrink(struct kmem_cache *d)
|
int kmem_cache_shrink(struct kmem_cache *d)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(kmem_cache_shrink);
|
EXPORT_SYMBOL(kmem_cache_shrink);
|
||||||
|
|
||||||
|
struct kmem_cache kmem_cache_boot = {
|
||||||
|
.name = "kmem_cache",
|
||||||
|
.size = sizeof(struct kmem_cache),
|
||||||
|
.flags = SLAB_PANIC,
|
||||||
|
.align = ARCH_KMALLOC_MINALIGN,
|
||||||
|
};
|
||||||
|
|
||||||
void __init kmem_cache_init(void)
|
void __init kmem_cache_init(void)
|
||||||
{
|
{
|
||||||
|
kmem_cache = &kmem_cache_boot;
|
||||||
slab_state = UP;
|
slab_state = UP;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
145
mm/slub.c
145
mm/slub.c
|
@ -210,11 +210,7 @@ static void sysfs_slab_remove(struct kmem_cache *);
|
||||||
static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
|
static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
|
||||||
static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
|
static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
|
||||||
{ return 0; }
|
{ return 0; }
|
||||||
static inline void sysfs_slab_remove(struct kmem_cache *s)
|
static inline void sysfs_slab_remove(struct kmem_cache *s) { }
|
||||||
{
|
|
||||||
kfree(s->name);
|
|
||||||
kfree(s);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -626,7 +622,7 @@ static void object_err(struct kmem_cache *s, struct page *page,
|
||||||
print_trailer(s, page, object);
|
print_trailer(s, page, object);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...)
|
static void slab_err(struct kmem_cache *s, struct page *page, const char *fmt, ...)
|
||||||
{
|
{
|
||||||
va_list args;
|
va_list args;
|
||||||
char buf[100];
|
char buf[100];
|
||||||
|
@ -2627,6 +2623,13 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
|
||||||
|
|
||||||
page = virt_to_head_page(x);
|
page = virt_to_head_page(x);
|
||||||
|
|
||||||
|
if (kmem_cache_debug(s) && page->slab != s) {
|
||||||
|
pr_err("kmem_cache_free: Wrong slab cache. %s but object"
|
||||||
|
" is from %s\n", page->slab->name, s->name);
|
||||||
|
WARN_ON_ONCE(1);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
slab_free(s, page, x, _RET_IP_);
|
slab_free(s, page, x, _RET_IP_);
|
||||||
|
|
||||||
trace_kmem_cache_free(_RET_IP_, x);
|
trace_kmem_cache_free(_RET_IP_, x);
|
||||||
|
@ -3041,17 +3044,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int kmem_cache_open(struct kmem_cache *s,
|
static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
|
||||||
const char *name, size_t size,
|
|
||||||
size_t align, unsigned long flags,
|
|
||||||
void (*ctor)(void *))
|
|
||||||
{
|
{
|
||||||
memset(s, 0, kmem_size);
|
s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor);
|
||||||
s->name = name;
|
|
||||||
s->ctor = ctor;
|
|
||||||
s->object_size = size;
|
|
||||||
s->align = align;
|
|
||||||
s->flags = kmem_cache_flags(size, flags, name, ctor);
|
|
||||||
s->reserved = 0;
|
s->reserved = 0;
|
||||||
|
|
||||||
if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU))
|
if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU))
|
||||||
|
@ -3113,7 +3108,6 @@ static int kmem_cache_open(struct kmem_cache *s,
|
||||||
else
|
else
|
||||||
s->cpu_partial = 30;
|
s->cpu_partial = 30;
|
||||||
|
|
||||||
s->refcount = 1;
|
|
||||||
#ifdef CONFIG_NUMA
|
#ifdef CONFIG_NUMA
|
||||||
s->remote_node_defrag_ratio = 1000;
|
s->remote_node_defrag_ratio = 1000;
|
||||||
#endif
|
#endif
|
||||||
|
@ -3121,16 +3115,16 @@ static int kmem_cache_open(struct kmem_cache *s,
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
if (alloc_kmem_cache_cpus(s))
|
if (alloc_kmem_cache_cpus(s))
|
||||||
return 1;
|
return 0;
|
||||||
|
|
||||||
free_kmem_cache_nodes(s);
|
free_kmem_cache_nodes(s);
|
||||||
error:
|
error:
|
||||||
if (flags & SLAB_PANIC)
|
if (flags & SLAB_PANIC)
|
||||||
panic("Cannot create slab %s size=%lu realsize=%u "
|
panic("Cannot create slab %s size=%lu realsize=%u "
|
||||||
"order=%u offset=%u flags=%lx\n",
|
"order=%u offset=%u flags=%lx\n",
|
||||||
s->name, (unsigned long)size, s->size, oo_order(s->oo),
|
s->name, (unsigned long)s->size, s->size, oo_order(s->oo),
|
||||||
s->offset, flags);
|
s->offset, flags);
|
||||||
return 0;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -3152,7 +3146,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
|
||||||
sizeof(long), GFP_ATOMIC);
|
sizeof(long), GFP_ATOMIC);
|
||||||
if (!map)
|
if (!map)
|
||||||
return;
|
return;
|
||||||
slab_err(s, page, "%s", text);
|
slab_err(s, page, text, s->name);
|
||||||
slab_lock(page);
|
slab_lock(page);
|
||||||
|
|
||||||
get_map(s, page, map);
|
get_map(s, page, map);
|
||||||
|
@ -3184,7 +3178,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
|
||||||
discard_slab(s, page);
|
discard_slab(s, page);
|
||||||
} else {
|
} else {
|
||||||
list_slab_objects(s, page,
|
list_slab_objects(s, page,
|
||||||
"Objects remaining on kmem_cache_close()");
|
"Objects remaining in %s on kmem_cache_close()");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3197,7 +3191,6 @@ static inline int kmem_cache_close(struct kmem_cache *s)
|
||||||
int node;
|
int node;
|
||||||
|
|
||||||
flush_all(s);
|
flush_all(s);
|
||||||
free_percpu(s->cpu_slab);
|
|
||||||
/* Attempt to free all objects */
|
/* Attempt to free all objects */
|
||||||
for_each_node_state(node, N_NORMAL_MEMORY) {
|
for_each_node_state(node, N_NORMAL_MEMORY) {
|
||||||
struct kmem_cache_node *n = get_node(s, node);
|
struct kmem_cache_node *n = get_node(s, node);
|
||||||
|
@ -3206,33 +3199,20 @@ static inline int kmem_cache_close(struct kmem_cache *s)
|
||||||
if (n->nr_partial || slabs_node(s, node))
|
if (n->nr_partial || slabs_node(s, node))
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
free_percpu(s->cpu_slab);
|
||||||
free_kmem_cache_nodes(s);
|
free_kmem_cache_nodes(s);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
int __kmem_cache_shutdown(struct kmem_cache *s)
|
||||||
* Close a cache and release the kmem_cache structure
|
|
||||||
* (must be used for caches created using kmem_cache_create)
|
|
||||||
*/
|
|
||||||
void kmem_cache_destroy(struct kmem_cache *s)
|
|
||||||
{
|
{
|
||||||
mutex_lock(&slab_mutex);
|
int rc = kmem_cache_close(s);
|
||||||
s->refcount--;
|
|
||||||
if (!s->refcount) {
|
if (!rc)
|
||||||
list_del(&s->list);
|
|
||||||
mutex_unlock(&slab_mutex);
|
|
||||||
if (kmem_cache_close(s)) {
|
|
||||||
printk(KERN_ERR "SLUB %s: %s called for cache that "
|
|
||||||
"still has objects.\n", s->name, __func__);
|
|
||||||
dump_stack();
|
|
||||||
}
|
|
||||||
if (s->flags & SLAB_DESTROY_BY_RCU)
|
|
||||||
rcu_barrier();
|
|
||||||
sysfs_slab_remove(s);
|
sysfs_slab_remove(s);
|
||||||
} else
|
|
||||||
mutex_unlock(&slab_mutex);
|
return rc;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(kmem_cache_destroy);
|
|
||||||
|
|
||||||
/********************************************************************
|
/********************************************************************
|
||||||
* Kmalloc subsystem
|
* Kmalloc subsystem
|
||||||
|
@ -3241,8 +3221,6 @@ EXPORT_SYMBOL(kmem_cache_destroy);
|
||||||
struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
|
struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
|
||||||
EXPORT_SYMBOL(kmalloc_caches);
|
EXPORT_SYMBOL(kmalloc_caches);
|
||||||
|
|
||||||
static struct kmem_cache *kmem_cache;
|
|
||||||
|
|
||||||
#ifdef CONFIG_ZONE_DMA
|
#ifdef CONFIG_ZONE_DMA
|
||||||
static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
|
static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
|
||||||
#endif
|
#endif
|
||||||
|
@ -3288,14 +3266,17 @@ static struct kmem_cache *__init create_kmalloc_cache(const char *name,
|
||||||
{
|
{
|
||||||
struct kmem_cache *s;
|
struct kmem_cache *s;
|
||||||
|
|
||||||
s = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
|
s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
|
||||||
|
|
||||||
|
s->name = name;
|
||||||
|
s->size = s->object_size = size;
|
||||||
|
s->align = ARCH_KMALLOC_MINALIGN;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This function is called with IRQs disabled during early-boot on
|
* This function is called with IRQs disabled during early-boot on
|
||||||
* single CPU so there's no need to take slab_mutex here.
|
* single CPU so there's no need to take slab_mutex here.
|
||||||
*/
|
*/
|
||||||
if (!kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN,
|
if (kmem_cache_open(s, flags))
|
||||||
flags, NULL))
|
|
||||||
goto panic;
|
goto panic;
|
||||||
|
|
||||||
list_add(&s->list, &slab_caches);
|
list_add(&s->list, &slab_caches);
|
||||||
|
@ -3734,12 +3715,12 @@ void __init kmem_cache_init(void)
|
||||||
slub_max_order = 0;
|
slub_max_order = 0;
|
||||||
|
|
||||||
kmem_size = offsetof(struct kmem_cache, node) +
|
kmem_size = offsetof(struct kmem_cache, node) +
|
||||||
nr_node_ids * sizeof(struct kmem_cache_node *);
|
nr_node_ids * sizeof(struct kmem_cache_node *);
|
||||||
|
|
||||||
/* Allocate two kmem_caches from the page allocator */
|
/* Allocate two kmem_caches from the page allocator */
|
||||||
kmalloc_size = ALIGN(kmem_size, cache_line_size());
|
kmalloc_size = ALIGN(kmem_size, cache_line_size());
|
||||||
order = get_order(2 * kmalloc_size);
|
order = get_order(2 * kmalloc_size);
|
||||||
kmem_cache = (void *)__get_free_pages(GFP_NOWAIT, order);
|
kmem_cache = (void *)__get_free_pages(GFP_NOWAIT | __GFP_ZERO, order);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Must first have the slab cache available for the allocations of the
|
* Must first have the slab cache available for the allocations of the
|
||||||
|
@ -3748,9 +3729,10 @@ void __init kmem_cache_init(void)
|
||||||
*/
|
*/
|
||||||
kmem_cache_node = (void *)kmem_cache + kmalloc_size;
|
kmem_cache_node = (void *)kmem_cache + kmalloc_size;
|
||||||
|
|
||||||
kmem_cache_open(kmem_cache_node, "kmem_cache_node",
|
kmem_cache_node->name = "kmem_cache_node";
|
||||||
sizeof(struct kmem_cache_node),
|
kmem_cache_node->size = kmem_cache_node->object_size =
|
||||||
0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
|
sizeof(struct kmem_cache_node);
|
||||||
|
kmem_cache_open(kmem_cache_node, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
|
||||||
|
|
||||||
hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
|
hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
|
||||||
|
|
||||||
|
@ -3758,8 +3740,10 @@ void __init kmem_cache_init(void)
|
||||||
slab_state = PARTIAL;
|
slab_state = PARTIAL;
|
||||||
|
|
||||||
temp_kmem_cache = kmem_cache;
|
temp_kmem_cache = kmem_cache;
|
||||||
kmem_cache_open(kmem_cache, "kmem_cache", kmem_size,
|
kmem_cache->name = "kmem_cache";
|
||||||
0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
|
kmem_cache->size = kmem_cache->object_size = kmem_size;
|
||||||
|
kmem_cache_open(kmem_cache, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
|
||||||
|
|
||||||
kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
|
kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
|
||||||
memcpy(kmem_cache, temp_kmem_cache, kmem_size);
|
memcpy(kmem_cache, temp_kmem_cache, kmem_size);
|
||||||
|
|
||||||
|
@ -3948,11 +3932,10 @@ static struct kmem_cache *find_mergeable(size_t size,
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
|
struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
|
||||||
size_t align, unsigned long flags, void (*ctor)(void *))
|
size_t align, unsigned long flags, void (*ctor)(void *))
|
||||||
{
|
{
|
||||||
struct kmem_cache *s;
|
struct kmem_cache *s;
|
||||||
char *n;
|
|
||||||
|
|
||||||
s = find_mergeable(size, align, flags, name, ctor);
|
s = find_mergeable(size, align, flags, name, ctor);
|
||||||
if (s) {
|
if (s) {
|
||||||
|
@ -3966,36 +3949,29 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
|
||||||
|
|
||||||
if (sysfs_slab_alias(s, name)) {
|
if (sysfs_slab_alias(s, name)) {
|
||||||
s->refcount--;
|
s->refcount--;
|
||||||
return NULL;
|
s = NULL;
|
||||||
}
|
}
|
||||||
return s;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
n = kstrdup(name, GFP_KERNEL);
|
return s;
|
||||||
if (!n)
|
}
|
||||||
return NULL;
|
|
||||||
|
|
||||||
s = kmalloc(kmem_size, GFP_KERNEL);
|
int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
|
||||||
if (s) {
|
{
|
||||||
if (kmem_cache_open(s, n,
|
int err;
|
||||||
size, align, flags, ctor)) {
|
|
||||||
int r;
|
|
||||||
|
|
||||||
list_add(&s->list, &slab_caches);
|
err = kmem_cache_open(s, flags);
|
||||||
mutex_unlock(&slab_mutex);
|
if (err)
|
||||||
r = sysfs_slab_add(s);
|
return err;
|
||||||
mutex_lock(&slab_mutex);
|
|
||||||
|
|
||||||
if (!r)
|
mutex_unlock(&slab_mutex);
|
||||||
return s;
|
err = sysfs_slab_add(s);
|
||||||
|
mutex_lock(&slab_mutex);
|
||||||
|
|
||||||
list_del(&s->list);
|
if (err)
|
||||||
kmem_cache_close(s);
|
kmem_cache_close(s);
|
||||||
}
|
|
||||||
kfree(s);
|
return err;
|
||||||
}
|
|
||||||
kfree(n);
|
|
||||||
return NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
|
@ -5225,14 +5201,6 @@ static ssize_t slab_attr_store(struct kobject *kobj,
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void kmem_cache_release(struct kobject *kobj)
|
|
||||||
{
|
|
||||||
struct kmem_cache *s = to_slab(kobj);
|
|
||||||
|
|
||||||
kfree(s->name);
|
|
||||||
kfree(s);
|
|
||||||
}
|
|
||||||
|
|
||||||
static const struct sysfs_ops slab_sysfs_ops = {
|
static const struct sysfs_ops slab_sysfs_ops = {
|
||||||
.show = slab_attr_show,
|
.show = slab_attr_show,
|
||||||
.store = slab_attr_store,
|
.store = slab_attr_store,
|
||||||
|
@ -5240,7 +5208,6 @@ static const struct sysfs_ops slab_sysfs_ops = {
|
||||||
|
|
||||||
static struct kobj_type slab_ktype = {
|
static struct kobj_type slab_ktype = {
|
||||||
.sysfs_ops = &slab_sysfs_ops,
|
.sysfs_ops = &slab_sysfs_ops,
|
||||||
.release = kmem_cache_release
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static int uevent_filter(struct kset *kset, struct kobject *kobj)
|
static int uevent_filter(struct kset *kset, struct kobject *kobj)
|
||||||
|
|
Loading…
Reference in New Issue