slub: use new node functions

Make use of the new node functions in mm/slab.h to reduce code size and
simplify.

[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Christoph Lameter <cl@linux.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Acked-by: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Christoph Lameter 2014-08-06 16:04:09 -07:00 committed by Linus Torvalds
parent 44c5356fb4
commit fa45dc254b
1 changed files with 29 additions and 49 deletions

View File

@ -2157,6 +2157,7 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL, static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST); DEFAULT_RATELIMIT_BURST);
int node; int node;
struct kmem_cache_node *n;
if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs)) if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs))
return; return;
@ -2171,15 +2172,11 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
pr_warn(" %s debugging increased min order, use slub_debug=O to disable.\n", pr_warn(" %s debugging increased min order, use slub_debug=O to disable.\n",
s->name); s->name);
for_each_online_node(node) { for_each_kmem_cache_node(s, node, n) {
struct kmem_cache_node *n = get_node(s, node);
unsigned long nr_slabs; unsigned long nr_slabs;
unsigned long nr_objs; unsigned long nr_objs;
unsigned long nr_free; unsigned long nr_free;
if (!n)
continue;
nr_free = count_partial(n, count_free); nr_free = count_partial(n, count_free);
nr_slabs = node_nr_slabs(n); nr_slabs = node_nr_slabs(n);
nr_objs = node_nr_objs(n); nr_objs = node_nr_objs(n);
@ -2923,13 +2920,10 @@ static void early_kmem_cache_node_alloc(int node)
static void free_kmem_cache_nodes(struct kmem_cache *s) static void free_kmem_cache_nodes(struct kmem_cache *s)
{ {
int node; int node;
struct kmem_cache_node *n;
for_each_node_state(node, N_NORMAL_MEMORY) { for_each_kmem_cache_node(s, node, n) {
struct kmem_cache_node *n = s->node[node]; kmem_cache_free(kmem_cache_node, n);
if (n)
kmem_cache_free(kmem_cache_node, n);
s->node[node] = NULL; s->node[node] = NULL;
} }
} }
@ -3217,12 +3211,11 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
static inline int kmem_cache_close(struct kmem_cache *s) static inline int kmem_cache_close(struct kmem_cache *s)
{ {
int node; int node;
struct kmem_cache_node *n;
flush_all(s); flush_all(s);
/* Attempt to free all objects */ /* Attempt to free all objects */
for_each_node_state(node, N_NORMAL_MEMORY) { for_each_kmem_cache_node(s, node, n) {
struct kmem_cache_node *n = get_node(s, node);
free_partial(s, n); free_partial(s, n);
if (n->nr_partial || slabs_node(s, node)) if (n->nr_partial || slabs_node(s, node))
return 1; return 1;
@ -3407,9 +3400,7 @@ int __kmem_cache_shrink(struct kmem_cache *s)
return -ENOMEM; return -ENOMEM;
flush_all(s); flush_all(s);
for_each_node_state(node, N_NORMAL_MEMORY) { for_each_kmem_cache_node(s, node, n) {
n = get_node(s, node);
if (!n->nr_partial) if (!n->nr_partial)
continue; continue;
@ -3581,6 +3572,7 @@ static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
{ {
int node; int node;
struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
struct kmem_cache_node *n;
memcpy(s, static_cache, kmem_cache->object_size); memcpy(s, static_cache, kmem_cache->object_size);
@ -3590,19 +3582,16 @@ static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
* IPIs around. * IPIs around.
*/ */
__flush_cpu_slab(s, smp_processor_id()); __flush_cpu_slab(s, smp_processor_id());
for_each_node_state(node, N_NORMAL_MEMORY) { for_each_kmem_cache_node(s, node, n) {
struct kmem_cache_node *n = get_node(s, node);
struct page *p; struct page *p;
if (n) { list_for_each_entry(p, &n->partial, lru)
list_for_each_entry(p, &n->partial, lru) p->slab_cache = s;
p->slab_cache = s;
#ifdef CONFIG_SLUB_DEBUG #ifdef CONFIG_SLUB_DEBUG
list_for_each_entry(p, &n->full, lru) list_for_each_entry(p, &n->full, lru)
p->slab_cache = s; p->slab_cache = s;
#endif #endif
}
} }
list_add(&s->list, &slab_caches); list_add(&s->list, &slab_caches);
return s; return s;
@ -3955,16 +3944,14 @@ static long validate_slab_cache(struct kmem_cache *s)
unsigned long count = 0; unsigned long count = 0;
unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) * unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
sizeof(unsigned long), GFP_KERNEL); sizeof(unsigned long), GFP_KERNEL);
struct kmem_cache_node *n;
if (!map) if (!map)
return -ENOMEM; return -ENOMEM;
flush_all(s); flush_all(s);
for_each_node_state(node, N_NORMAL_MEMORY) { for_each_kmem_cache_node(s, node, n)
struct kmem_cache_node *n = get_node(s, node);
count += validate_slab_node(s, n, map); count += validate_slab_node(s, n, map);
}
kfree(map); kfree(map);
return count; return count;
} }
@ -4118,6 +4105,7 @@ static int list_locations(struct kmem_cache *s, char *buf,
int node; int node;
unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) * unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
sizeof(unsigned long), GFP_KERNEL); sizeof(unsigned long), GFP_KERNEL);
struct kmem_cache_node *n;
if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location), if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
GFP_TEMPORARY)) { GFP_TEMPORARY)) {
@ -4127,8 +4115,7 @@ static int list_locations(struct kmem_cache *s, char *buf,
/* Push back cpu slabs */ /* Push back cpu slabs */
flush_all(s); flush_all(s);
for_each_node_state(node, N_NORMAL_MEMORY) { for_each_kmem_cache_node(s, node, n) {
struct kmem_cache_node *n = get_node(s, node);
unsigned long flags; unsigned long flags;
struct page *page; struct page *page;
@ -4327,8 +4314,9 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
get_online_mems(); get_online_mems();
#ifdef CONFIG_SLUB_DEBUG #ifdef CONFIG_SLUB_DEBUG
if (flags & SO_ALL) { if (flags & SO_ALL) {
for_each_node_state(node, N_NORMAL_MEMORY) { struct kmem_cache_node *n;
struct kmem_cache_node *n = get_node(s, node);
for_each_kmem_cache_node(s, node, n) {
if (flags & SO_TOTAL) if (flags & SO_TOTAL)
x = atomic_long_read(&n->total_objects); x = atomic_long_read(&n->total_objects);
@ -4344,9 +4332,9 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
} else } else
#endif #endif
if (flags & SO_PARTIAL) { if (flags & SO_PARTIAL) {
for_each_node_state(node, N_NORMAL_MEMORY) { struct kmem_cache_node *n;
struct kmem_cache_node *n = get_node(s, node);
for_each_kmem_cache_node(s, node, n) {
if (flags & SO_TOTAL) if (flags & SO_TOTAL)
x = count_partial(n, count_total); x = count_partial(n, count_total);
else if (flags & SO_OBJECTS) else if (flags & SO_OBJECTS)
@ -4359,7 +4347,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
} }
x = sprintf(buf, "%lu", total); x = sprintf(buf, "%lu", total);
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
for_each_node_state(node, N_NORMAL_MEMORY) for (node = 0; node < nr_node_ids; node++)
if (nodes[node]) if (nodes[node])
x += sprintf(buf + x, " N%d=%lu", x += sprintf(buf + x, " N%d=%lu",
node, nodes[node]); node, nodes[node]);
@ -4373,16 +4361,12 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
static int any_slab_objects(struct kmem_cache *s) static int any_slab_objects(struct kmem_cache *s)
{ {
int node; int node;
struct kmem_cache_node *n;
for_each_online_node(node) { for_each_kmem_cache_node(s, node, n)
struct kmem_cache_node *n = get_node(s, node);
if (!n)
continue;
if (atomic_long_read(&n->total_objects)) if (atomic_long_read(&n->total_objects))
return 1; return 1;
}
return 0; return 0;
} }
#endif #endif
@ -5337,13 +5321,9 @@ void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
unsigned long nr_objs = 0; unsigned long nr_objs = 0;
unsigned long nr_free = 0; unsigned long nr_free = 0;
int node; int node;
struct kmem_cache_node *n;
for_each_online_node(node) { for_each_kmem_cache_node(s, node, n) {
struct kmem_cache_node *n = get_node(s, node);
if (!n)
continue;
nr_slabs += node_nr_slabs(n); nr_slabs += node_nr_slabs(n);
nr_objs += node_nr_objs(n); nr_objs += node_nr_objs(n);
nr_free += count_partial(n, count_free); nr_free += count_partial(n, count_free);