Merge branch 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux
Pull slab updates from Pekka Enberg: "Mainly a bunch of SLUB fixes from Joonsoo Kim" * 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux: slub: use __SetPageSlab function to set PG_slab flag slub: fix a memory leak in get_partial_node() slub: remove unused argument of init_kmem_cache_node() slub: fix a possible memory leak Documentations: Fix slabinfo.c directory in vm/slub.txt slub: fix incorrect return type of get_any_partial()
This commit is contained in:
commit
af4f8ba31a
|
@ -17,7 +17,7 @@ data and perform operation on the slabs. By default slabinfo only lists
|
||||||
slabs that have data in them. See "slabinfo -h" for more options when
|
slabs that have data in them. See "slabinfo -h" for more options when
|
||||||
running the command. slabinfo can be compiled with
|
running the command. slabinfo can be compiled with
|
||||||
|
|
||||||
gcc -o slabinfo tools/slub/slabinfo.c
|
gcc -o slabinfo tools/vm/slabinfo.c
|
||||||
|
|
||||||
Some of the modes of operation of slabinfo require that slub debugging
|
Some of the modes of operation of slabinfo require that slub debugging
|
||||||
be enabled on the command line. F.e. no tracking information will be
|
be enabled on the command line. F.e. no tracking information will be
|
||||||
|
|
23
mm/slub.c
23
mm/slub.c
|
@ -1369,7 +1369,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
|
||||||
|
|
||||||
inc_slabs_node(s, page_to_nid(page), page->objects);
|
inc_slabs_node(s, page_to_nid(page), page->objects);
|
||||||
page->slab = s;
|
page->slab = s;
|
||||||
page->flags |= 1 << PG_slab;
|
__SetPageSlab(page);
|
||||||
|
|
||||||
start = page_address(page);
|
start = page_address(page);
|
||||||
|
|
||||||
|
@ -1514,15 +1514,19 @@ static inline void *acquire_slab(struct kmem_cache *s,
|
||||||
freelist = page->freelist;
|
freelist = page->freelist;
|
||||||
counters = page->counters;
|
counters = page->counters;
|
||||||
new.counters = counters;
|
new.counters = counters;
|
||||||
if (mode)
|
if (mode) {
|
||||||
new.inuse = page->objects;
|
new.inuse = page->objects;
|
||||||
|
new.freelist = NULL;
|
||||||
|
} else {
|
||||||
|
new.freelist = freelist;
|
||||||
|
}
|
||||||
|
|
||||||
VM_BUG_ON(new.frozen);
|
VM_BUG_ON(new.frozen);
|
||||||
new.frozen = 1;
|
new.frozen = 1;
|
||||||
|
|
||||||
} while (!__cmpxchg_double_slab(s, page,
|
} while (!__cmpxchg_double_slab(s, page,
|
||||||
freelist, counters,
|
freelist, counters,
|
||||||
NULL, new.counters,
|
new.freelist, new.counters,
|
||||||
"lock and freeze"));
|
"lock and freeze"));
|
||||||
|
|
||||||
remove_partial(n, page);
|
remove_partial(n, page);
|
||||||
|
@ -1564,7 +1568,6 @@ static void *get_partial_node(struct kmem_cache *s,
|
||||||
object = t;
|
object = t;
|
||||||
available = page->objects - page->inuse;
|
available = page->objects - page->inuse;
|
||||||
} else {
|
} else {
|
||||||
page->freelist = t;
|
|
||||||
available = put_cpu_partial(s, page, 0);
|
available = put_cpu_partial(s, page, 0);
|
||||||
stat(s, CPU_PARTIAL_NODE);
|
stat(s, CPU_PARTIAL_NODE);
|
||||||
}
|
}
|
||||||
|
@ -1579,7 +1582,7 @@ static void *get_partial_node(struct kmem_cache *s,
|
||||||
/*
|
/*
|
||||||
* Get a page from somewhere. Search in increasing NUMA distances.
|
* Get a page from somewhere. Search in increasing NUMA distances.
|
||||||
*/
|
*/
|
||||||
static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags,
|
static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
|
||||||
struct kmem_cache_cpu *c)
|
struct kmem_cache_cpu *c)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_NUMA
|
#ifdef CONFIG_NUMA
|
||||||
|
@ -2766,7 +2769,7 @@ static unsigned long calculate_alignment(unsigned long flags,
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
|
init_kmem_cache_node(struct kmem_cache_node *n)
|
||||||
{
|
{
|
||||||
n->nr_partial = 0;
|
n->nr_partial = 0;
|
||||||
spin_lock_init(&n->list_lock);
|
spin_lock_init(&n->list_lock);
|
||||||
|
@ -2836,7 +2839,7 @@ static void early_kmem_cache_node_alloc(int node)
|
||||||
init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
|
init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
|
||||||
init_tracking(kmem_cache_node, n);
|
init_tracking(kmem_cache_node, n);
|
||||||
#endif
|
#endif
|
||||||
init_kmem_cache_node(n, kmem_cache_node);
|
init_kmem_cache_node(n);
|
||||||
inc_slabs_node(kmem_cache_node, node, page->objects);
|
inc_slabs_node(kmem_cache_node, node, page->objects);
|
||||||
|
|
||||||
add_partial(n, page, DEACTIVATE_TO_HEAD);
|
add_partial(n, page, DEACTIVATE_TO_HEAD);
|
||||||
|
@ -2876,7 +2879,7 @@ static int init_kmem_cache_nodes(struct kmem_cache *s)
|
||||||
}
|
}
|
||||||
|
|
||||||
s->node[node] = n;
|
s->node[node] = n;
|
||||||
init_kmem_cache_node(n, s);
|
init_kmem_cache_node(n);
|
||||||
}
|
}
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
@ -3625,7 +3628,7 @@ static int slab_mem_going_online_callback(void *arg)
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
init_kmem_cache_node(n, s);
|
init_kmem_cache_node(n);
|
||||||
s->node[nid] = n;
|
s->node[nid] = n;
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
|
@ -3968,9 +3971,9 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
|
||||||
}
|
}
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
kfree(n);
|
|
||||||
kfree(s);
|
kfree(s);
|
||||||
}
|
}
|
||||||
|
kfree(n);
|
||||||
err:
|
err:
|
||||||
up_write(&slub_lock);
|
up_write(&slub_lock);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue