mm/slub: Move the stackdepot related allocation out of IRQ-off section.
The set_track() invocation in free_debug_processing() is invoked with
acquired slab_lock(). The lock disables interrupts on PREEMPT_RT and
this forbids to allocate memory which is done in stack_depot_save().
Split set_track() into two parts: set_track_prepare() which allocate
memory and set_track_update() which only performs the assignment of the
trace data structure. Use set_track_prepare() before disabling
interrupts.
[ vbabka@suse.cz: make set_track() call set_track_update() instead of
open-coded assignments ]
Fixes: 5cf909c553
("mm/slub: use stackdepot to save stack trace in objects")
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Link: https://lore.kernel.org/r/Yp9sqoUi4fVa5ExF@linutronix.de
This commit is contained in:
parent
b13baccc38
commit
c4cf678559
41
mm/slub.c
41
mm/slub.c
|
@ -726,25 +726,48 @@ static struct track *get_track(struct kmem_cache *s, void *object,
|
|||
return kasan_reset_tag(p + alloc);
|
||||
}
|
||||
|
||||
static void noinline set_track(struct kmem_cache *s, void *object,
|
||||
enum track_item alloc, unsigned long addr)
|
||||
{
|
||||
struct track *p = get_track(s, object, alloc);
|
||||
|
||||
#ifdef CONFIG_STACKDEPOT
|
||||
static noinline depot_stack_handle_t set_track_prepare(void)
|
||||
{
|
||||
depot_stack_handle_t handle;
|
||||
unsigned long entries[TRACK_ADDRS_COUNT];
|
||||
unsigned int nr_entries;
|
||||
|
||||
nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3);
|
||||
p->handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT);
|
||||
handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT);
|
||||
|
||||
return handle;
|
||||
}
|
||||
#else
|
||||
static inline depot_stack_handle_t set_track_prepare(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void set_track_update(struct kmem_cache *s, void *object,
|
||||
enum track_item alloc, unsigned long addr,
|
||||
depot_stack_handle_t handle)
|
||||
{
|
||||
struct track *p = get_track(s, object, alloc);
|
||||
|
||||
#ifdef CONFIG_STACKDEPOT
|
||||
p->handle = handle;
|
||||
#endif
|
||||
p->addr = addr;
|
||||
p->cpu = smp_processor_id();
|
||||
p->pid = current->pid;
|
||||
p->when = jiffies;
|
||||
}
|
||||
|
||||
static __always_inline void set_track(struct kmem_cache *s, void *object,
|
||||
enum track_item alloc, unsigned long addr)
|
||||
{
|
||||
depot_stack_handle_t handle = set_track_prepare();
|
||||
|
||||
set_track_update(s, object, alloc, addr, handle);
|
||||
}
|
||||
|
||||
static void init_tracking(struct kmem_cache *s, void *object)
|
||||
{
|
||||
struct track *p;
|
||||
|
@ -1373,6 +1396,10 @@ static noinline int free_debug_processing(
|
|||
int cnt = 0;
|
||||
unsigned long flags, flags2;
|
||||
int ret = 0;
|
||||
depot_stack_handle_t handle = 0;
|
||||
|
||||
if (s->flags & SLAB_STORE_USER)
|
||||
handle = set_track_prepare();
|
||||
|
||||
spin_lock_irqsave(&n->list_lock, flags);
|
||||
slab_lock(slab, &flags2);
|
||||
|
@ -1391,7 +1418,7 @@ next_object:
|
|||
}
|
||||
|
||||
if (s->flags & SLAB_STORE_USER)
|
||||
set_track(s, object, TRACK_FREE, addr);
|
||||
set_track_update(s, object, TRACK_FREE, addr, handle);
|
||||
trace(s, slab, object, 0);
|
||||
/* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
|
||||
init_object(s, object, SLUB_RED_INACTIVE);
|
||||
|
|
Loading…
Reference in New Issue