slub: cure list_slab_objects() from double fix
According to Christopher Lameter two fixes have been merged for the same
problem. As far as I can tell, the code does not acquire the list_lock
and invoke kmalloc(). list_slab_objects() misses an unlock (the
counterpart to get_map()) and the memory allocated in free_partial()
isn't used.
Revert the mentioned commit.
Link: http://lkml.kernel.org/r/20200618201234.795692-1-bigeasy@linutronix.de
Fixes: aa456c7aeb
("slub: remove kmalloc under list_lock from list_slab_objects() V2")
Link: https://lkml.kernel.org/r/alpine.DEB.2.22.394.2006181501480.12014@www.lameter.com
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Yu Zhao <yuzhao@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
8982ae527f
commit
55860d96ca
19
mm/slub.c
19
mm/slub.c
|
@ -3766,15 +3766,13 @@ error:
|
||||||
}
|
}
|
||||||
|
|
||||||
static void list_slab_objects(struct kmem_cache *s, struct page *page,
|
static void list_slab_objects(struct kmem_cache *s, struct page *page,
|
||||||
const char *text, unsigned long *map)
|
const char *text)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_SLUB_DEBUG
|
#ifdef CONFIG_SLUB_DEBUG
|
||||||
void *addr = page_address(page);
|
void *addr = page_address(page);
|
||||||
|
unsigned long *map;
|
||||||
void *p;
|
void *p;
|
||||||
|
|
||||||
if (!map)
|
|
||||||
return;
|
|
||||||
|
|
||||||
slab_err(s, page, text, s->name);
|
slab_err(s, page, text, s->name);
|
||||||
slab_lock(page);
|
slab_lock(page);
|
||||||
|
|
||||||
|
@ -3786,6 +3784,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
|
||||||
print_tracking(s, p);
|
print_tracking(s, p);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
put_map(map);
|
||||||
slab_unlock(page);
|
slab_unlock(page);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
@ -3799,11 +3798,6 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
|
||||||
{
|
{
|
||||||
LIST_HEAD(discard);
|
LIST_HEAD(discard);
|
||||||
struct page *page, *h;
|
struct page *page, *h;
|
||||||
unsigned long *map = NULL;
|
|
||||||
|
|
||||||
#ifdef CONFIG_SLUB_DEBUG
|
|
||||||
map = bitmap_alloc(oo_objects(s->max), GFP_KERNEL);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
BUG_ON(irqs_disabled());
|
BUG_ON(irqs_disabled());
|
||||||
spin_lock_irq(&n->list_lock);
|
spin_lock_irq(&n->list_lock);
|
||||||
|
@ -3813,16 +3807,11 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
|
||||||
list_add(&page->slab_list, &discard);
|
list_add(&page->slab_list, &discard);
|
||||||
} else {
|
} else {
|
||||||
list_slab_objects(s, page,
|
list_slab_objects(s, page,
|
||||||
"Objects remaining in %s on __kmem_cache_shutdown()",
|
"Objects remaining in %s on __kmem_cache_shutdown()");
|
||||||
map);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
spin_unlock_irq(&n->list_lock);
|
spin_unlock_irq(&n->list_lock);
|
||||||
|
|
||||||
#ifdef CONFIG_SLUB_DEBUG
|
|
||||||
bitmap_free(map);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
list_for_each_entry_safe(page, h, &discard, slab_list)
|
list_for_each_entry_safe(page, h, &discard, slab_list)
|
||||||
discard_slab(s, page);
|
discard_slab(s, page);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue