Slab allocators: Replace explicit zeroing with __GFP_ZERO
kmalloc_node() and kmem_cache_alloc_node() were not available in a zeroing variant in the past. But with __GFP_ZERO it is possible now to do zeroing while allocating. Use __GFP_ZERO to remove the explicit clearing of memory via memset whereever we can. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
81cda66261
commit
94f6030ca7
|
@ -1322,10 +1322,9 @@ static void *as_init_queue(request_queue_t *q)
|
||||||
{
|
{
|
||||||
struct as_data *ad;
|
struct as_data *ad;
|
||||||
|
|
||||||
ad = kmalloc_node(sizeof(*ad), GFP_KERNEL, q->node);
|
ad = kmalloc_node(sizeof(*ad), GFP_KERNEL | __GFP_ZERO, q->node);
|
||||||
if (!ad)
|
if (!ad)
|
||||||
return NULL;
|
return NULL;
|
||||||
memset(ad, 0, sizeof(*ad));
|
|
||||||
|
|
||||||
ad->q = q; /* Identify what queue the data belongs to */
|
ad->q = q; /* Identify what queue the data belongs to */
|
||||||
|
|
||||||
|
|
|
@ -1251,9 +1251,9 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
|
||||||
{
|
{
|
||||||
struct cfq_io_context *cic;
|
struct cfq_io_context *cic;
|
||||||
|
|
||||||
cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask, cfqd->queue->node);
|
cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO,
|
||||||
|
cfqd->queue->node);
|
||||||
if (cic) {
|
if (cic) {
|
||||||
memset(cic, 0, sizeof(*cic));
|
|
||||||
cic->last_end_request = jiffies;
|
cic->last_end_request = jiffies;
|
||||||
INIT_LIST_HEAD(&cic->queue_list);
|
INIT_LIST_HEAD(&cic->queue_list);
|
||||||
cic->dtor = cfq_free_io_context;
|
cic->dtor = cfq_free_io_context;
|
||||||
|
@ -1376,17 +1376,19 @@ retry:
|
||||||
* free memory.
|
* free memory.
|
||||||
*/
|
*/
|
||||||
spin_unlock_irq(cfqd->queue->queue_lock);
|
spin_unlock_irq(cfqd->queue->queue_lock);
|
||||||
new_cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask|__GFP_NOFAIL, cfqd->queue->node);
|
new_cfqq = kmem_cache_alloc_node(cfq_pool,
|
||||||
|
gfp_mask | __GFP_NOFAIL | __GFP_ZERO,
|
||||||
|
cfqd->queue->node);
|
||||||
spin_lock_irq(cfqd->queue->queue_lock);
|
spin_lock_irq(cfqd->queue->queue_lock);
|
||||||
goto retry;
|
goto retry;
|
||||||
} else {
|
} else {
|
||||||
cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask, cfqd->queue->node);
|
cfqq = kmem_cache_alloc_node(cfq_pool,
|
||||||
|
gfp_mask | __GFP_ZERO,
|
||||||
|
cfqd->queue->node);
|
||||||
if (!cfqq)
|
if (!cfqq)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
memset(cfqq, 0, sizeof(*cfqq));
|
|
||||||
|
|
||||||
RB_CLEAR_NODE(&cfqq->rb_node);
|
RB_CLEAR_NODE(&cfqq->rb_node);
|
||||||
INIT_LIST_HEAD(&cfqq->fifo);
|
INIT_LIST_HEAD(&cfqq->fifo);
|
||||||
|
|
||||||
|
@ -2079,12 +2081,10 @@ static void *cfq_init_queue(request_queue_t *q)
|
||||||
{
|
{
|
||||||
struct cfq_data *cfqd;
|
struct cfq_data *cfqd;
|
||||||
|
|
||||||
cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
|
cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
|
||||||
if (!cfqd)
|
if (!cfqd)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
memset(cfqd, 0, sizeof(*cfqd));
|
|
||||||
|
|
||||||
cfqd->service_tree = CFQ_RB_ROOT;
|
cfqd->service_tree = CFQ_RB_ROOT;
|
||||||
INIT_LIST_HEAD(&cfqd->cic_list);
|
INIT_LIST_HEAD(&cfqd->cic_list);
|
||||||
|
|
||||||
|
|
|
@ -360,10 +360,9 @@ static void *deadline_init_queue(request_queue_t *q)
|
||||||
{
|
{
|
||||||
struct deadline_data *dd;
|
struct deadline_data *dd;
|
||||||
|
|
||||||
dd = kmalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
|
dd = kmalloc_node(sizeof(*dd), GFP_KERNEL | __GFP_ZERO, q->node);
|
||||||
if (!dd)
|
if (!dd)
|
||||||
return NULL;
|
return NULL;
|
||||||
memset(dd, 0, sizeof(*dd));
|
|
||||||
|
|
||||||
INIT_LIST_HEAD(&dd->fifo_list[READ]);
|
INIT_LIST_HEAD(&dd->fifo_list[READ]);
|
||||||
INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
|
INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
|
||||||
|
|
|
@ -177,11 +177,10 @@ static elevator_t *elevator_alloc(request_queue_t *q, struct elevator_type *e)
|
||||||
elevator_t *eq;
|
elevator_t *eq;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
eq = kmalloc_node(sizeof(elevator_t), GFP_KERNEL, q->node);
|
eq = kmalloc_node(sizeof(elevator_t), GFP_KERNEL | __GFP_ZERO, q->node);
|
||||||
if (unlikely(!eq))
|
if (unlikely(!eq))
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
memset(eq, 0, sizeof(*eq));
|
|
||||||
eq->ops = &e->ops;
|
eq->ops = &e->ops;
|
||||||
eq->elevator_type = e;
|
eq->elevator_type = e;
|
||||||
kobject_init(&eq->kobj);
|
kobject_init(&eq->kobj);
|
||||||
|
|
|
@ -726,21 +726,21 @@ struct gendisk *alloc_disk_node(int minors, int node_id)
|
||||||
{
|
{
|
||||||
struct gendisk *disk;
|
struct gendisk *disk;
|
||||||
|
|
||||||
disk = kmalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id);
|
disk = kmalloc_node(sizeof(struct gendisk),
|
||||||
|
GFP_KERNEL | __GFP_ZERO, node_id);
|
||||||
if (disk) {
|
if (disk) {
|
||||||
memset(disk, 0, sizeof(struct gendisk));
|
|
||||||
if (!init_disk_stats(disk)) {
|
if (!init_disk_stats(disk)) {
|
||||||
kfree(disk);
|
kfree(disk);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
if (minors > 1) {
|
if (minors > 1) {
|
||||||
int size = (minors - 1) * sizeof(struct hd_struct *);
|
int size = (minors - 1) * sizeof(struct hd_struct *);
|
||||||
disk->part = kmalloc_node(size, GFP_KERNEL, node_id);
|
disk->part = kmalloc_node(size,
|
||||||
|
GFP_KERNEL | __GFP_ZERO, node_id);
|
||||||
if (!disk->part) {
|
if (!disk->part) {
|
||||||
kfree(disk);
|
kfree(disk);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
memset(disk->part, 0, size);
|
|
||||||
}
|
}
|
||||||
disk->minors = minors;
|
disk->minors = minors;
|
||||||
kobj_set_kset_s(disk,block_subsys);
|
kobj_set_kset_s(disk,block_subsys);
|
||||||
|
|
|
@ -1829,11 +1829,11 @@ request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
|
||||||
{
|
{
|
||||||
request_queue_t *q;
|
request_queue_t *q;
|
||||||
|
|
||||||
q = kmem_cache_alloc_node(requestq_cachep, gfp_mask, node_id);
|
q = kmem_cache_alloc_node(requestq_cachep,
|
||||||
|
gfp_mask | __GFP_ZERO, node_id);
|
||||||
if (!q)
|
if (!q)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
memset(q, 0, sizeof(*q));
|
|
||||||
init_timer(&q->unplug_timer);
|
init_timer(&q->unplug_timer);
|
||||||
|
|
||||||
snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue");
|
snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue");
|
||||||
|
|
|
@ -1073,14 +1073,14 @@ static int init_irq (ide_hwif_t *hwif)
|
||||||
hwgroup->hwif->next = hwif;
|
hwgroup->hwif->next = hwif;
|
||||||
spin_unlock_irq(&ide_lock);
|
spin_unlock_irq(&ide_lock);
|
||||||
} else {
|
} else {
|
||||||
hwgroup = kmalloc_node(sizeof(ide_hwgroup_t), GFP_KERNEL,
|
hwgroup = kmalloc_node(sizeof(ide_hwgroup_t),
|
||||||
|
GFP_KERNEL | __GFP_ZERO,
|
||||||
hwif_to_node(hwif->drives[0].hwif));
|
hwif_to_node(hwif->drives[0].hwif));
|
||||||
if (!hwgroup)
|
if (!hwgroup)
|
||||||
goto out_up;
|
goto out_up;
|
||||||
|
|
||||||
hwif->hwgroup = hwgroup;
|
hwif->hwgroup = hwgroup;
|
||||||
|
|
||||||
memset(hwgroup, 0, sizeof(ide_hwgroup_t));
|
|
||||||
hwgroup->hwif = hwif->next = hwif;
|
hwgroup->hwif = hwif->next = hwif;
|
||||||
hwgroup->rq = NULL;
|
hwgroup->rq = NULL;
|
||||||
hwgroup->handler = NULL;
|
hwgroup->handler = NULL;
|
||||||
|
|
|
@ -1221,7 +1221,8 @@ static int __devinit init_timers_cpu(int cpu)
|
||||||
/*
|
/*
|
||||||
* The APs use this path later in boot
|
* The APs use this path later in boot
|
||||||
*/
|
*/
|
||||||
base = kmalloc_node(sizeof(*base), GFP_KERNEL,
|
base = kmalloc_node(sizeof(*base),
|
||||||
|
GFP_KERNEL | __GFP_ZERO,
|
||||||
cpu_to_node(cpu));
|
cpu_to_node(cpu));
|
||||||
if (!base)
|
if (!base)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -1232,7 +1233,6 @@ static int __devinit init_timers_cpu(int cpu)
|
||||||
kfree(base);
|
kfree(base);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
memset(base, 0, sizeof(*base));
|
|
||||||
per_cpu(tvec_bases, cpu) = base;
|
per_cpu(tvec_bases, cpu) = base;
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -54,11 +54,10 @@ int gen_pool_add(struct gen_pool *pool, unsigned long addr, size_t size,
|
||||||
int nbytes = sizeof(struct gen_pool_chunk) +
|
int nbytes = sizeof(struct gen_pool_chunk) +
|
||||||
(nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE;
|
(nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE;
|
||||||
|
|
||||||
chunk = kmalloc_node(nbytes, GFP_KERNEL, nid);
|
chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid);
|
||||||
if (unlikely(chunk == NULL))
|
if (unlikely(chunk == NULL))
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
memset(chunk, 0, nbytes);
|
|
||||||
spin_lock_init(&chunk->lock);
|
spin_lock_init(&chunk->lock);
|
||||||
chunk->start_addr = addr;
|
chunk->start_addr = addr;
|
||||||
chunk->end_addr = addr + size;
|
chunk->end_addr = addr + size;
|
||||||
|
|
|
@ -53,12 +53,9 @@ void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu)
|
||||||
int node = cpu_to_node(cpu);
|
int node = cpu_to_node(cpu);
|
||||||
|
|
||||||
BUG_ON(pdata->ptrs[cpu]);
|
BUG_ON(pdata->ptrs[cpu]);
|
||||||
if (node_online(node)) {
|
if (node_online(node))
|
||||||
/* FIXME: kzalloc_node(size, gfp, node) */
|
pdata->ptrs[cpu] = kmalloc_node(size, gfp|__GFP_ZERO, node);
|
||||||
pdata->ptrs[cpu] = kmalloc_node(size, gfp, node);
|
else
|
||||||
if (pdata->ptrs[cpu])
|
|
||||||
memset(pdata->ptrs[cpu], 0, size);
|
|
||||||
} else
|
|
||||||
pdata->ptrs[cpu] = kzalloc(size, gfp);
|
pdata->ptrs[cpu] = kzalloc(size, gfp);
|
||||||
return pdata->ptrs[cpu];
|
return pdata->ptrs[cpu];
|
||||||
}
|
}
|
||||||
|
|
|
@ -62,10 +62,9 @@ mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
|
||||||
mempool_free_t *free_fn, void *pool_data, int node_id)
|
mempool_free_t *free_fn, void *pool_data, int node_id)
|
||||||
{
|
{
|
||||||
mempool_t *pool;
|
mempool_t *pool;
|
||||||
pool = kmalloc_node(sizeof(*pool), GFP_KERNEL, node_id);
|
pool = kmalloc_node(sizeof(*pool), GFP_KERNEL | __GFP_ZERO, node_id);
|
||||||
if (!pool)
|
if (!pool)
|
||||||
return NULL;
|
return NULL;
|
||||||
memset(pool, 0, sizeof(*pool));
|
|
||||||
pool->elements = kmalloc_node(min_nr * sizeof(void *),
|
pool->elements = kmalloc_node(min_nr * sizeof(void *),
|
||||||
GFP_KERNEL, node_id);
|
GFP_KERNEL, node_id);
|
||||||
if (!pool->elements) {
|
if (!pool->elements) {
|
||||||
|
|
|
@ -432,11 +432,12 @@ void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
|
||||||
area->nr_pages = nr_pages;
|
area->nr_pages = nr_pages;
|
||||||
/* Please note that the recursion is strictly bounded. */
|
/* Please note that the recursion is strictly bounded. */
|
||||||
if (array_size > PAGE_SIZE) {
|
if (array_size > PAGE_SIZE) {
|
||||||
pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node);
|
pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO,
|
||||||
|
PAGE_KERNEL, node);
|
||||||
area->flags |= VM_VPAGES;
|
area->flags |= VM_VPAGES;
|
||||||
} else {
|
} else {
|
||||||
pages = kmalloc_node(array_size,
|
pages = kmalloc_node(array_size,
|
||||||
(gfp_mask & GFP_LEVEL_MASK),
|
(gfp_mask & GFP_LEVEL_MASK) | __GFP_ZERO,
|
||||||
node);
|
node);
|
||||||
}
|
}
|
||||||
area->pages = pages;
|
area->pages = pages;
|
||||||
|
@ -445,7 +446,6 @@ void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
|
||||||
kfree(area);
|
kfree(area);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
memset(area->pages, 0, array_size);
|
|
||||||
|
|
||||||
for (i = 0; i < area->nr_pages; i++) {
|
for (i = 0; i < area->nr_pages; i++) {
|
||||||
if (node < 0)
|
if (node < 0)
|
||||||
|
|
Loading…
Reference in New Issue