Merge branch 'core-stacktrace-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull stack trace updates from Ingo Molnar:
 "So Thomas looked at the stacktrace code recently and noticed a few
  weirdnesses, and we all know how such stories of crummy kernel code
  meeting German engineering perfection end: a 45-patch series to clean
  it all up! :-)

  Here's the changes in Thomas's words:

   'Struct stack_trace is a sinkhole for input and output parameters
    which is largely pointless for most usage sites. In fact if embedded
    into other data structures it creates indirections and extra storage
    overhead for no benefit.

    Looking at all usage sites makes it clear that they just require an
    interface which is based on a storage array. That array is either on
    stack, global or embedded into some other data structure.

    Some of the stack depot usage sites are outright wrong, but
    fortunately the wrongness just causes more stack being used for
    nothing and does not have functional impact.

    Another oddity is the inconsistent termination of the stack trace
    with ULONG_MAX. It's pointless as the number of entries is what
    determines the length of the stored trace. In fact quite some call
    sites remove the ULONG_MAX marker afterwards with or without nasty
    comments about it. Not all architectures do that and those which do,
    do it inconsistenly either conditional on nr_entries == 0 or
    unconditionally.

    The following series cleans that up by:

      1) Removing the ULONG_MAX termination in the architecture code

      2) Removing the ULONG_MAX fixups at the call sites

      3) Providing plain storage array based interfaces for stacktrace
         and stackdepot.

      4) Cleaning up the mess at the callsites including some related
         cleanups.

      5) Removing the struct stack_trace based interfaces

    This is not changing the struct stack_trace interfaces at the
    architecture level, but it removes the exposure to the generic
    code'"

* 'core-stacktrace-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (45 commits)
  x86/stacktrace: Use common infrastructure
  stacktrace: Provide common infrastructure
  lib/stackdepot: Remove obsolete functions
  stacktrace: Remove obsolete functions
  livepatch: Simplify stack trace retrieval
  tracing: Remove the last struct stack_trace usage
  tracing: Simplify stack trace retrieval
  tracing: Make ftrace_trace_userstack() static and conditional
  tracing: Use percpu stack trace buffer more intelligently
  tracing: Simplify stacktrace retrieval in histograms
  lockdep: Simplify stack trace handling
  lockdep: Remove save argument from check_prev_add()
  lockdep: Remove unused trace argument from print_circular_bug()
  drm: Simplify stacktrace handling
  dm persistent data: Simplify stack trace handling
  dm bufio: Simplify stack trace retrieval
  btrfs: ref-verify: Simplify stack trace retrieval
  dma/debug: Simplify stracktrace retrieval
  fault-inject: Simplify stacktrace retrieval
  mm/page_owner: Simplify stack trace handling
  ...
This commit is contained in:
Linus Torvalds 2019-05-06 13:11:48 -07:00
commit 2c6a392cdd
39 changed files with 694 additions and 656 deletions

View File

@ -115,8 +115,6 @@ static noinline void __save_stack_trace(struct task_struct *tsk,
* running on another CPU? For now, ignore it as we
* can't guarantee we won't explode.
*/
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
return;
#else
frame.fp = thread_saved_fp(tsk);
@ -134,8 +132,6 @@ static noinline void __save_stack_trace(struct task_struct *tsk,
}
walk_stackframe(&frame, save_trace, &data);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
@ -153,8 +149,6 @@ void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
frame.pc = regs->ARM_pc;
walk_stackframe(&frame, save_trace, &data);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)

View File

@ -140,8 +140,6 @@ void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
#endif
walk_stackframe(current, &frame, save_trace, &data);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
EXPORT_SYMBOL_GPL(save_stack_trace_regs);
@ -172,8 +170,6 @@ static noinline void __save_stack_trace(struct task_struct *tsk,
#endif
walk_stackframe(tsk, &frame, save_trace, &data);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
put_task_stack(tsk);
}

View File

@ -29,22 +29,17 @@ static void dump_trace(struct task_struct *task, struct stack_trace *trace)
}
}
/*
* Save stack-backtrace addresses into a stack_trace buffer.
*/
void save_stack_trace(struct stack_trace *trace)
{
dump_trace(current, trace);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
EXPORT_SYMBOL_GPL(save_stack_trace);
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
dump_trace(tsk, trace);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);

View File

@ -169,8 +169,6 @@ static bool save_trace(unsigned long pc, void *arg)
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
walk_stackframe(tsk, NULL, save_trace, trace);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);

View File

@ -45,8 +45,6 @@ void save_stack_trace(struct stack_trace *trace)
sp = current_stack_pointer();
dump_trace(save_address, trace, NULL, sp);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
EXPORT_SYMBOL_GPL(save_stack_trace);
@ -58,8 +56,6 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
if (tsk == current)
sp = current_stack_pointer();
dump_trace(save_address_nosched, trace, tsk, sp);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
@ -69,7 +65,5 @@ void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
sp = kernel_stack_pointer(regs);
dump_trace(save_address, trace, NULL, sp);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
EXPORT_SYMBOL_GPL(save_stack_trace_regs);

View File

@ -49,8 +49,6 @@ void save_stack_trace(struct stack_trace *trace)
unsigned long *sp = (unsigned long *)current_stack_pointer;
unwind_stack(current, NULL, sp, &save_stack_ops, trace);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
EXPORT_SYMBOL_GPL(save_stack_trace);
@ -84,7 +82,5 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
unsigned long *sp = (unsigned long *)tsk->thread.sp;
unwind_stack(current, NULL, sp, &save_stack_ops_nosched, trace);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);

View File

@ -63,8 +63,6 @@ static const struct stacktrace_ops dump_ops = {
static void __save_stack_trace(struct task_struct *tsk, struct stack_trace *trace)
{
dump_trace(tsk, &dump_ops, trace);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
void save_stack_trace(struct stack_trace *trace)

View File

@ -120,8 +120,6 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
}
walk_stackframe(&frame, save_trace, &data);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
void save_stack_trace(struct stack_trace *trace)

View File

@ -74,6 +74,7 @@ config X86
select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
select ARCH_STACKWALK
select ARCH_SUPPORTS_ACPI
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_NUMA_BALANCING if X86_64

View File

@ -12,78 +12,31 @@
#include <asm/stacktrace.h>
#include <asm/unwind.h>
static int save_stack_address(struct stack_trace *trace, unsigned long addr,
bool nosched)
{
if (nosched && in_sched_functions(addr))
return 0;
if (trace->skip > 0) {
trace->skip--;
return 0;
}
if (trace->nr_entries >= trace->max_entries)
return -1;
trace->entries[trace->nr_entries++] = addr;
return 0;
}
static void noinline __save_stack_trace(struct stack_trace *trace,
struct task_struct *task, struct pt_regs *regs,
bool nosched)
void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
struct task_struct *task, struct pt_regs *regs)
{
struct unwind_state state;
unsigned long addr;
if (regs)
save_stack_address(trace, regs->ip, nosched);
if (regs && !consume_entry(cookie, regs->ip, false))
return;
for (unwind_start(&state, task, regs, NULL); !unwind_done(&state);
unwind_next_frame(&state)) {
addr = unwind_get_return_address(&state);
if (!addr || save_stack_address(trace, addr, nosched))
if (!addr || !consume_entry(cookie, addr, false))
break;
}
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
/*
* Save stack-backtrace addresses into a stack_trace buffer.
* This function returns an error if it detects any unreliable features of the
* stack. Otherwise it guarantees that the stack trace is reliable.
*
* If the task is not 'current', the caller *must* ensure the task is inactive.
*/
void save_stack_trace(struct stack_trace *trace)
{
trace->skip++;
__save_stack_trace(trace, current, NULL, false);
}
EXPORT_SYMBOL_GPL(save_stack_trace);
void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
{
__save_stack_trace(trace, current, regs, false);
}
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
if (!try_get_task_stack(tsk))
return;
if (tsk == current)
trace->skip++;
__save_stack_trace(trace, tsk, NULL, true);
put_task_stack(tsk);
}
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
static int __always_inline
__save_stack_trace_reliable(struct stack_trace *trace,
struct task_struct *task)
int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
void *cookie, struct task_struct *task)
{
struct unwind_state state;
struct pt_regs *regs;
@ -97,7 +50,7 @@ __save_stack_trace_reliable(struct stack_trace *trace,
if (regs) {
/* Success path for user tasks */
if (user_mode(regs))
goto success;
return 0;
/*
* Kernel mode registers on the stack indicate an
@ -120,7 +73,7 @@ __save_stack_trace_reliable(struct stack_trace *trace,
if (!addr)
return -EINVAL;
if (save_stack_address(trace, addr, false))
if (!consume_entry(cookie, addr, false))
return -EINVAL;
}
@ -132,39 +85,9 @@ __save_stack_trace_reliable(struct stack_trace *trace,
if (!(task->flags & (PF_KTHREAD | PF_IDLE)))
return -EINVAL;
success:
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
return 0;
}
/*
* This function returns an error if it detects any unreliable features of the
* stack. Otherwise it guarantees that the stack trace is reliable.
*
* If the task is not 'current', the caller *must* ensure the task is inactive.
*/
int save_stack_trace_tsk_reliable(struct task_struct *tsk,
struct stack_trace *trace)
{
int ret;
/*
* If the task doesn't have a stack (e.g., a zombie), the stack is
* "reliably" empty.
*/
if (!try_get_task_stack(tsk))
return 0;
ret = __save_stack_trace_reliable(trace, tsk);
put_task_stack(tsk);
return ret;
}
#endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */
/* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
struct stack_frame_user {
@ -189,15 +112,15 @@ copy_stack_frame(const void __user *fp, struct stack_frame_user *frame)
return ret;
}
static inline void __save_stack_trace_user(struct stack_trace *trace)
void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
const struct pt_regs *regs)
{
const struct pt_regs *regs = task_pt_regs(current);
const void __user *fp = (const void __user *)regs->bp;
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = regs->ip;
if (!consume_entry(cookie, regs->ip, false))
return;
while (trace->nr_entries < trace->max_entries) {
while (1) {
struct stack_frame_user frame;
frame.next_fp = NULL;
@ -207,8 +130,8 @@ static inline void __save_stack_trace_user(struct stack_trace *trace)
if ((unsigned long)fp < regs->sp)
break;
if (frame.ret_addr) {
trace->entries[trace->nr_entries++] =
frame.ret_addr;
if (!consume_entry(cookie, frame.ret_addr, false))
return;
}
if (fp == frame.next_fp)
break;
@ -216,14 +139,3 @@ static inline void __save_stack_trace_user(struct stack_trace *trace)
}
}
void save_stack_trace_user(struct stack_trace *trace)
{
/*
* Trace user stack if we are not a kernel thread
*/
if (current->mm) {
__save_stack_trace_user(trace);
}
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}

View File

@ -106,25 +106,19 @@
static noinline void save_stack(struct drm_mm_node *node)
{
unsigned long entries[STACKDEPTH];
struct stack_trace trace = {
.entries = entries,
.max_entries = STACKDEPTH,
.skip = 1
};
unsigned int n;
save_stack_trace(&trace);
if (trace.nr_entries != 0 &&
trace.entries[trace.nr_entries-1] == ULONG_MAX)
trace.nr_entries--;
n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
/* May be called under spinlock, so avoid sleeping */
node->stack = depot_save_stack(&trace, GFP_NOWAIT);
node->stack = stack_depot_save(entries, n, GFP_NOWAIT);
}
static void show_leaks(struct drm_mm *mm)
{
struct drm_mm_node *node;
unsigned long entries[STACKDEPTH];
unsigned long *entries;
unsigned int nr_entries;
char *buf;
buf = kmalloc(BUFSZ, GFP_KERNEL);
@ -132,19 +126,14 @@ static void show_leaks(struct drm_mm *mm)
return;
list_for_each_entry(node, drm_mm_nodes(mm), node_list) {
struct stack_trace trace = {
.entries = entries,
.max_entries = STACKDEPTH
};
if (!node->stack) {
DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
node->start, node->size);
continue;
}
depot_fetch_stack(node->stack, &trace);
snprint_stack_trace(buf, BUFSZ, &trace, 0);
nr_entries = stack_depot_fetch(node->stack, &entries);
stack_trace_snprint(buf, BUFSZ, entries, nr_entries, 0);
DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
node->start, node->size, buf);
}

View File

@ -36,11 +36,8 @@
static void vma_print_allocator(struct i915_vma *vma, const char *reason)
{
unsigned long entries[12];
struct stack_trace trace = {
.entries = entries,
.max_entries = ARRAY_SIZE(entries),
};
unsigned long *entries;
unsigned int nr_entries;
char buf[512];
if (!vma->node.stack) {
@ -49,8 +46,8 @@ static void vma_print_allocator(struct i915_vma *vma, const char *reason)
return;
}
depot_fetch_stack(vma->node.stack, &trace);
snprint_stack_trace(buf, sizeof(buf), &trace, 0);
nr_entries = stack_depot_fetch(vma->node.stack, &entries);
stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0);
DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
vma->node.start, vma->node.size, reason, buf);
}

View File

@ -60,31 +60,20 @@
static noinline depot_stack_handle_t __save_depot_stack(void)
{
unsigned long entries[STACKDEPTH];
struct stack_trace trace = {
.entries = entries,
.max_entries = ARRAY_SIZE(entries),
.skip = 1,
};
unsigned int n;
save_stack_trace(&trace);
if (trace.nr_entries &&
trace.entries[trace.nr_entries - 1] == ULONG_MAX)
trace.nr_entries--;
return depot_save_stack(&trace, GFP_NOWAIT | __GFP_NOWARN);
n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
return stack_depot_save(entries, n, GFP_NOWAIT | __GFP_NOWARN);
}
static void __print_depot_stack(depot_stack_handle_t stack,
char *buf, int sz, int indent)
{
unsigned long entries[STACKDEPTH];
struct stack_trace trace = {
.entries = entries,
.max_entries = ARRAY_SIZE(entries),
};
unsigned long *entries;
unsigned int nr_entries;
depot_fetch_stack(stack, &trace);
snprint_stack_trace(buf, sz, &trace, indent);
nr_entries = stack_depot_fetch(stack, &entries);
stack_trace_snprint(buf, sz, entries, nr_entries, indent);
}
static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915)

View File

@ -150,7 +150,7 @@ struct dm_buffer {
void (*end_io)(struct dm_buffer *, blk_status_t);
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
#define MAX_STACK 10
struct stack_trace stack_trace;
unsigned int stack_len;
unsigned long stack_entries[MAX_STACK];
#endif
};
@ -232,11 +232,7 @@ static DEFINE_MUTEX(dm_bufio_clients_lock);
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
static void buffer_record_stack(struct dm_buffer *b)
{
b->stack_trace.nr_entries = 0;
b->stack_trace.max_entries = MAX_STACK;
b->stack_trace.entries = b->stack_entries;
b->stack_trace.skip = 2;
save_stack_trace(&b->stack_trace);
b->stack_len = stack_trace_save(b->stack_entries, MAX_STACK, 2);
}
#endif
@ -438,7 +434,7 @@ static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
adjust_total_allocated(b->data_mode, (long)c->block_size);
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
memset(&b->stack_trace, 0, sizeof(b->stack_trace));
b->stack_len = 0;
#endif
return b;
}
@ -1520,8 +1516,9 @@ static void drop_buffers(struct dm_bufio_client *c)
DMERR("leaked buffer %llx, hold count %u, list %d",
(unsigned long long)b->block, b->hold_count, i);
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
print_stack_trace(&b->stack_trace, 1);
b->hold_count = 0; /* mark unclaimed to avoid BUG_ON below */
stack_trace_print(b->stack_entries, b->stack_len, 1);
/* mark unclaimed to avoid BUG_ON below */
b->hold_count = 0;
#endif
}

View File

@ -35,7 +35,10 @@
#define MAX_HOLDERS 4
#define MAX_STACK 10
typedef unsigned long stack_entries[MAX_STACK];
struct stack_store {
unsigned int nr_entries;
unsigned long entries[MAX_STACK];
};
struct block_lock {
spinlock_t lock;
@ -44,8 +47,7 @@ struct block_lock {
struct task_struct *holders[MAX_HOLDERS];
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
struct stack_trace traces[MAX_HOLDERS];
stack_entries entries[MAX_HOLDERS];
struct stack_store traces[MAX_HOLDERS];
#endif
};
@ -73,7 +75,7 @@ static void __add_holder(struct block_lock *lock, struct task_struct *task)
{
unsigned h = __find_holder(lock, NULL);
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
struct stack_trace *t;
struct stack_store *t;
#endif
get_task_struct(task);
@ -81,11 +83,7 @@ static void __add_holder(struct block_lock *lock, struct task_struct *task)
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
t = lock->traces + h;
t->nr_entries = 0;
t->max_entries = MAX_STACK;
t->entries = lock->entries[h];
t->skip = 2;
save_stack_trace(t);
t->nr_entries = stack_trace_save(t->entries, MAX_STACK, 2);
#endif
}
@ -106,7 +104,8 @@ static int __check_holder(struct block_lock *lock)
DMERR("recursive lock detected in metadata");
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
DMERR("previously held here:");
print_stack_trace(lock->traces + i, 4);
stack_trace_print(lock->traces[i].entries,
lock->traces[i].nr_entries, 4);
DMERR("subsequent acquisition attempted here:");
dump_stack();

View File

@ -205,28 +205,17 @@ static struct root_entry *lookup_root_entry(struct rb_root *root, u64 objectid)
#ifdef CONFIG_STACKTRACE
static void __save_stack_trace(struct ref_action *ra)
{
struct stack_trace stack_trace;
stack_trace.max_entries = MAX_TRACE;
stack_trace.nr_entries = 0;
stack_trace.entries = ra->trace;
stack_trace.skip = 2;
save_stack_trace(&stack_trace);
ra->trace_len = stack_trace.nr_entries;
ra->trace_len = stack_trace_save(ra->trace, MAX_TRACE, 2);
}
static void __print_stack_trace(struct btrfs_fs_info *fs_info,
struct ref_action *ra)
{
struct stack_trace trace;
if (ra->trace_len == 0) {
btrfs_err(fs_info, " ref-verify: no stacktrace");
return;
}
trace.nr_entries = ra->trace_len;
trace.entries = ra->trace;
print_stack_trace(&trace, 2);
stack_trace_print(ra->trace, ra->trace_len, 2);
}
#else
static void inline __save_stack_trace(struct ref_action *ra)

View File

@ -407,7 +407,6 @@ static void unlock_trace(struct task_struct *task)
static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task)
{
struct stack_trace trace;
unsigned long *entries;
int err;
@ -430,20 +429,17 @@ static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
if (!entries)
return -ENOMEM;
trace.nr_entries = 0;
trace.max_entries = MAX_STACK_TRACE_DEPTH;
trace.entries = entries;
trace.skip = 0;
err = lock_trace(task);
if (!err) {
unsigned int i;
unsigned int i, nr_entries;
save_stack_trace_tsk(task, &trace);
nr_entries = stack_trace_save_tsk(task, entries,
MAX_STACK_TRACE_DEPTH, 0);
for (i = 0; i < trace.nr_entries; i++) {
for (i = 0; i < nr_entries; i++) {
seq_printf(m, "[<0>] %pB\n", (void *)entries[i]);
}
unlock_trace(task);
}
kfree(entries);
@ -489,10 +485,9 @@ static int lstats_show_proc(struct seq_file *m, void *v)
lr->count, lr->time, lr->max);
for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
unsigned long bt = lr->backtrace[q];
if (!bt)
break;
if (bt == ULONG_MAX)
break;
seq_printf(m, " %ps", (void *)bt);
}
seq_putc(m, '\n');

View File

@ -241,21 +241,11 @@ static inline void ftrace_free_mem(struct module *mod, void *start, void *end) {
#ifdef CONFIG_STACK_TRACER
#define STACK_TRACE_ENTRIES 500
struct stack_trace;
extern unsigned stack_trace_index[];
extern struct stack_trace stack_trace_max;
extern unsigned long stack_trace_max_size;
extern arch_spinlock_t stack_trace_max_lock;
extern int stack_tracer_enabled;
void stack_trace_print(void);
int
stack_trace_sysctl(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
int stack_trace_sysctl(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
/* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
DECLARE_PER_CPU(int, disable_stack_tracer);

View File

@ -66,6 +66,11 @@ struct lock_class_key {
extern struct lock_class_key __lockdep_no_validate__;
struct lock_trace {
unsigned int nr_entries;
unsigned int offset;
};
#define LOCKSTAT_POINTS 4
/*
@ -100,7 +105,7 @@ struct lock_class {
* IRQ/softirq usage tracking bits:
*/
unsigned long usage_mask;
struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES];
struct lock_trace usage_traces[XXX_LOCK_USAGE_STATES];
/*
* Generation counter, when doing certain classes of graph walking,
@ -188,7 +193,7 @@ struct lock_list {
struct list_head entry;
struct lock_class *class;
struct lock_class *links_to;
struct stack_trace trace;
struct lock_trace trace;
int distance;
/*

View File

@ -23,10 +23,10 @@
typedef u32 depot_stack_handle_t;
struct stack_trace;
depot_stack_handle_t stack_depot_save(unsigned long *entries,
unsigned int nr_entries, gfp_t gfp_flags);
depot_stack_handle_t depot_save_stack(struct stack_trace *trace, gfp_t flags);
void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace);
unsigned int stack_depot_fetch(depot_stack_handle_t handle,
unsigned long **entries);
#endif

View File

@ -3,11 +3,64 @@
#define __LINUX_STACKTRACE_H
#include <linux/types.h>
#include <asm/errno.h>
struct task_struct;
struct pt_regs;
#ifdef CONFIG_STACKTRACE
void stack_trace_print(unsigned long *trace, unsigned int nr_entries,
int spaces);
int stack_trace_snprint(char *buf, size_t size, unsigned long *entries,
unsigned int nr_entries, int spaces);
unsigned int stack_trace_save(unsigned long *store, unsigned int size,
unsigned int skipnr);
unsigned int stack_trace_save_tsk(struct task_struct *task,
unsigned long *store, unsigned int size,
unsigned int skipnr);
unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store,
unsigned int size, unsigned int skipnr);
unsigned int stack_trace_save_user(unsigned long *store, unsigned int size);
/* Internal interfaces. Do not use in generic code */
#ifdef CONFIG_ARCH_STACKWALK
/**
* stack_trace_consume_fn - Callback for arch_stack_walk()
* @cookie: Caller supplied pointer handed back by arch_stack_walk()
* @addr: The stack entry address to consume
* @reliable: True when the stack entry is reliable. Required by
* some printk based consumers.
*
* Return: True, if the entry was consumed or skipped
* False, if there is no space left to store
*/
typedef bool (*stack_trace_consume_fn)(void *cookie, unsigned long addr,
bool reliable);
/**
* arch_stack_walk - Architecture specific function to walk the stack
* @consume_entry: Callback which is invoked by the architecture code for
* each entry.
* @cookie: Caller supplied pointer which is handed back to
* @consume_entry
* @task: Pointer to a task struct, can be NULL
* @regs: Pointer to registers, can be NULL
*
* ============ ======= ============================================
* task regs
* ============ ======= ============================================
* task NULL Stack trace from task (can be current)
* current regs Stack trace starting on regs->stackpointer
* ============ ======= ============================================
*/
void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
struct task_struct *task, struct pt_regs *regs);
int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, void *cookie,
struct task_struct *task);
void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
const struct pt_regs *regs);
#else /* CONFIG_ARCH_STACKWALK */
struct stack_trace {
unsigned int nr_entries, max_entries;
unsigned long *entries;
@ -21,24 +74,20 @@ extern void save_stack_trace_tsk(struct task_struct *tsk,
struct stack_trace *trace);
extern int save_stack_trace_tsk_reliable(struct task_struct *tsk,
struct stack_trace *trace);
extern void print_stack_trace(struct stack_trace *trace, int spaces);
extern int snprint_stack_trace(char *buf, size_t size,
struct stack_trace *trace, int spaces);
#ifdef CONFIG_USER_STACKTRACE_SUPPORT
extern void save_stack_trace_user(struct stack_trace *trace);
#else
# define save_stack_trace_user(trace) do { } while (0)
#endif
#else /* !CONFIG_STACKTRACE */
# define save_stack_trace(trace) do { } while (0)
# define save_stack_trace_tsk(tsk, trace) do { } while (0)
# define save_stack_trace_user(trace) do { } while (0)
# define print_stack_trace(trace, spaces) do { } while (0)
# define snprint_stack_trace(buf, size, trace, spaces) do { } while (0)
# define save_stack_trace_tsk_reliable(tsk, trace) ({ -ENOSYS; })
#endif /* !CONFIG_ARCH_STACKWALK */
#endif /* CONFIG_STACKTRACE */
#if defined(CONFIG_STACKTRACE) && defined(CONFIG_HAVE_RELIABLE_STACKTRACE)
int stack_trace_save_tsk_reliable(struct task_struct *tsk, unsigned long *store,
unsigned int size);
#else
static inline int stack_trace_save_tsk_reliable(struct task_struct *tsk,
unsigned long *store,
unsigned int size)
{
return -ENOSYS;
}
#endif
#endif /* __LINUX_STACKTRACE_H */

View File

@ -48,19 +48,14 @@ static void backtrace_test_irq(void)
#ifdef CONFIG_STACKTRACE
static void backtrace_test_saved(void)
{
struct stack_trace trace;
unsigned long entries[8];
unsigned int nr_entries;
pr_info("Testing a saved backtrace.\n");
pr_info("The following trace is a kernel self test and not a bug!\n");
trace.nr_entries = 0;
trace.max_entries = ARRAY_SIZE(entries);
trace.entries = entries;
trace.skip = 0;
save_stack_trace(&trace);
print_stack_trace(&trace, 0);
nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
stack_trace_print(entries, nr_entries, 0);
}
#else
static void backtrace_test_saved(void)

View File

@ -89,8 +89,8 @@ struct dma_debug_entry {
int sg_mapped_ents;
enum map_err_types map_err_type;
#ifdef CONFIG_STACKTRACE
struct stack_trace stacktrace;
unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
unsigned int stack_len;
unsigned long stack_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
#endif
};
@ -174,7 +174,7 @@ static inline void dump_entry_trace(struct dma_debug_entry *entry)
#ifdef CONFIG_STACKTRACE
if (entry) {
pr_warning("Mapped at:\n");
print_stack_trace(&entry->stacktrace, 0);
stack_trace_print(entry->stack_entries, entry->stack_len, 0);
}
#endif
}
@ -704,12 +704,10 @@ static struct dma_debug_entry *dma_entry_alloc(void)
spin_unlock_irqrestore(&free_entries_lock, flags);
#ifdef CONFIG_STACKTRACE
entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
entry->stacktrace.entries = entry->st_entries;
entry->stacktrace.skip = 1;
save_stack_trace(&entry->stacktrace);
entry->stack_len = stack_trace_save(entry->stack_entries,
ARRAY_SIZE(entry->stack_entries),
1);
#endif
return entry;
}

View File

@ -120,8 +120,8 @@ account_global_scheduler_latency(struct task_struct *tsk,
break;
}
/* 0 and ULONG_MAX entries mean end of backtrace: */
if (record == 0 || record == ULONG_MAX)
/* 0 entry marks end of backtrace: */
if (!record)
break;
}
if (same) {
@ -141,20 +141,6 @@ account_global_scheduler_latency(struct task_struct *tsk,
memcpy(&latency_record[i], lat, sizeof(struct latency_record));
}
/*
* Iterator to store a backtrace into a latency record entry
*/
static inline void store_stacktrace(struct task_struct *tsk,
struct latency_record *lat)
{
struct stack_trace trace;
memset(&trace, 0, sizeof(trace));
trace.max_entries = LT_BACKTRACEDEPTH;
trace.entries = &lat->backtrace[0];
save_stack_trace_tsk(tsk, &trace);
}
/**
* __account_scheduler_latency - record an occurred latency
* @tsk - the task struct of the task hitting the latency
@ -191,7 +177,8 @@ __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
lat.count = 1;
lat.time = usecs;
lat.max = usecs;
store_stacktrace(tsk, &lat);
stack_trace_save_tsk(tsk, lat.backtrace, LT_BACKTRACEDEPTH, 0);
raw_spin_lock_irqsave(&latency_lock, flags);
@ -210,8 +197,8 @@ __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
break;
}
/* 0 and ULONG_MAX entries mean end of backtrace: */
if (record == 0 || record == ULONG_MAX)
/* 0 entry is end of backtrace */
if (!record)
break;
}
if (same) {
@ -252,10 +239,10 @@ static int lstats_show(struct seq_file *m, void *v)
lr->count, lr->time, lr->max);
for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
unsigned long bt = lr->backtrace[q];
if (!bt)
break;
if (bt == ULONG_MAX)
break;
seq_printf(m, " %ps", (void *)bt);
}
seq_puts(m, "\n");

View File

@ -202,15 +202,15 @@ void klp_update_patch_state(struct task_struct *task)
* Determine whether the given stack trace includes any references to a
* to-be-patched or to-be-unpatched function.
*/
static int klp_check_stack_func(struct klp_func *func,
struct stack_trace *trace)
static int klp_check_stack_func(struct klp_func *func, unsigned long *entries,
unsigned int nr_entries)
{
unsigned long func_addr, func_size, address;
struct klp_ops *ops;
int i;
for (i = 0; i < trace->nr_entries; i++) {
address = trace->entries[i];
for (i = 0; i < nr_entries; i++) {
address = entries[i];
if (klp_target_state == KLP_UNPATCHED) {
/*
@ -254,29 +254,25 @@ static int klp_check_stack_func(struct klp_func *func,
static int klp_check_stack(struct task_struct *task, char *err_buf)
{
static unsigned long entries[MAX_STACK_ENTRIES];
struct stack_trace trace;
struct klp_object *obj;
struct klp_func *func;
int ret;
int ret, nr_entries;
trace.skip = 0;
trace.nr_entries = 0;
trace.max_entries = MAX_STACK_ENTRIES;
trace.entries = entries;
ret = save_stack_trace_tsk_reliable(task, &trace);
ret = stack_trace_save_tsk_reliable(task, entries, ARRAY_SIZE(entries));
WARN_ON_ONCE(ret == -ENOSYS);
if (ret) {
if (ret < 0) {
snprintf(err_buf, STACK_ERR_BUF_SIZE,
"%s: %s:%d has an unreliable stack\n",
__func__, task->comm, task->pid);
return ret;
}
nr_entries = ret;
klp_for_each_object(klp_transition_patch, obj) {
if (!obj->patched)
continue;
klp_for_each_func(obj, func) {
ret = klp_check_stack_func(func, &trace);
ret = klp_check_stack_func(func, entries, nr_entries);
if (ret) {
snprintf(err_buf, STACK_ERR_BUF_SIZE,
"%s: %s:%d is sleeping on function %s\n",

View File

@ -434,29 +434,14 @@ static void print_lockdep_off(const char *bug_msg)
#endif
}
static int save_trace(struct stack_trace *trace)
static int save_trace(struct lock_trace *trace)
{
trace->nr_entries = 0;
trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
trace->entries = stack_trace + nr_stack_trace_entries;
trace->skip = 3;
save_stack_trace(trace);
/*
* Some daft arches put -1 at the end to indicate its a full trace.
*
* <rant> this is buggy anyway, since it takes a whole extra entry so a
* complete trace that maxes out the entries provided will be reported
* as incomplete, friggin useless </rant>
*/
if (trace->nr_entries != 0 &&
trace->entries[trace->nr_entries-1] == ULONG_MAX)
trace->nr_entries--;
trace->max_entries = trace->nr_entries;
unsigned long *entries = stack_trace + nr_stack_trace_entries;
unsigned int max_entries;
trace->offset = nr_stack_trace_entries;
max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
trace->nr_entries = stack_trace_save(entries, max_entries, 3);
nr_stack_trace_entries += trace->nr_entries;
if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
@ -1207,7 +1192,7 @@ static struct lock_list *alloc_list_entry(void)
static int add_lock_to_list(struct lock_class *this,
struct lock_class *links_to, struct list_head *head,
unsigned long ip, int distance,
struct stack_trace *trace)
struct lock_trace *trace)
{
struct lock_list *entry;
/*
@ -1426,6 +1411,13 @@ static inline int __bfs_backwards(struct lock_list *src_entry,
* checking.
*/
static void print_lock_trace(struct lock_trace *trace, unsigned int spaces)
{
unsigned long *entries = stack_trace + trace->offset;
stack_trace_print(entries, trace->nr_entries, spaces);
}
/*
* Print a dependency chain entry (this is only done when a deadlock
* has been detected):
@ -1438,8 +1430,7 @@ print_circular_bug_entry(struct lock_list *target, int depth)
printk("\n-> #%u", depth);
print_lock_name(target->class);
printk(KERN_CONT ":\n");
print_stack_trace(&target->trace, 6);
print_lock_trace(&target->trace, 6);
return 0;
}
@ -1533,10 +1524,9 @@ static inline int class_equal(struct lock_list *entry, void *data)
}
static noinline int print_circular_bug(struct lock_list *this,
struct lock_list *target,
struct held_lock *check_src,
struct held_lock *check_tgt,
struct stack_trace *trace)
struct lock_list *target,
struct held_lock *check_src,
struct held_lock *check_tgt)
{
struct task_struct *curr = current;
struct lock_list *parent;
@ -1752,7 +1742,7 @@ static void print_lock_class_header(struct lock_class *class, int depth)
len += printk("%*s %s", depth, "", usage_str[bit]);
len += printk(KERN_CONT " at:\n");
print_stack_trace(class->usage_traces + bit, len);
print_lock_trace(class->usage_traces + bit, len);
}
}
printk("%*s }\n", depth, "");
@ -1777,7 +1767,7 @@ print_shortest_lock_dependencies(struct lock_list *leaf,
do {
print_lock_class_header(entry->class, depth);
printk("%*s ... acquired at:\n", depth, "");
print_stack_trace(&entry->trace, 2);
print_lock_trace(&entry->trace, 2);
printk("\n");
if (depth == 0 && (entry != root)) {
@ -1890,14 +1880,14 @@ print_bad_irq_dependency(struct task_struct *curr,
print_lock_name(backwards_entry->class);
pr_warn("\n... which became %s-irq-safe at:\n", irqclass);
print_stack_trace(backwards_entry->class->usage_traces + bit1, 1);
print_lock_trace(backwards_entry->class->usage_traces + bit1, 1);
pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass);
print_lock_name(forwards_entry->class);
pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass);
pr_warn("...");
print_stack_trace(forwards_entry->class->usage_traces + bit2, 1);
print_lock_trace(forwards_entry->class->usage_traces + bit2, 1);
pr_warn("\nother info that might help us debug this:\n\n");
print_irq_lock_scenario(backwards_entry, forwards_entry,
@ -2170,8 +2160,7 @@ check_deadlock(struct task_struct *curr, struct held_lock *next,
*/
static int
check_prev_add(struct task_struct *curr, struct held_lock *prev,
struct held_lock *next, int distance, struct stack_trace *trace,
int (*save)(struct stack_trace *trace))
struct held_lock *next, int distance, struct lock_trace *trace)
{
struct lock_list *uninitialized_var(target_entry);
struct lock_list *entry;
@ -2209,15 +2198,15 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
this.parent = NULL;
ret = check_noncircular(&this, hlock_class(prev), &target_entry);
if (unlikely(!ret)) {
if (!trace->entries) {
if (!trace->nr_entries) {
/*
* If @save fails here, the printing might trigger
* a WARN but because of the !nr_entries it should
* not do bad things.
* If save_trace fails here, the printing might
* trigger a WARN but because of the !nr_entries it
* should not do bad things.
*/
save(trace);
save_trace(trace);
}
return print_circular_bug(&this, target_entry, next, prev, trace);
return print_circular_bug(&this, target_entry, next, prev);
}
else if (unlikely(ret < 0))
return print_bfs_bug(ret);
@ -2265,7 +2254,7 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
return print_bfs_bug(ret);
if (!trace->entries && !save(trace))
if (!trace->nr_entries && !save_trace(trace))
return 0;
/*
@ -2297,14 +2286,9 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
static int
check_prevs_add(struct task_struct *curr, struct held_lock *next)
{
struct lock_trace trace = { .nr_entries = 0 };
int depth = curr->lockdep_depth;
struct held_lock *hlock;
struct stack_trace trace = {
.nr_entries = 0,
.max_entries = 0,
.entries = NULL,
.skip = 0,
};
/*
* Debugging checks.
@ -2330,7 +2314,8 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
* added:
*/
if (hlock->read != 2 && hlock->check) {
int ret = check_prev_add(curr, hlock, next, distance, &trace, save_trace);
int ret = check_prev_add(curr, hlock, next, distance,
&trace);
if (!ret)
return 0;
@ -2731,6 +2716,10 @@ static inline int validate_chain(struct task_struct *curr,
{
return 1;
}
static void print_lock_trace(struct lock_trace *trace, unsigned int spaces)
{
}
#endif
/*
@ -2827,7 +2816,7 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
print_lock(this);
pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]);
print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1);
print_lock_trace(hlock_class(this)->usage_traces + prev_bit, 1);
print_irqtrace_events(curr);
pr_warn("\nother info that might help us debug this:\n");

View File

@ -5,41 +5,56 @@
*
* Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
*/
#include <linux/sched/task_stack.h>
#include <linux/sched/debug.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/kallsyms.h>
#include <linux/stacktrace.h>
void print_stack_trace(struct stack_trace *trace, int spaces)
/**
* stack_trace_print - Print the entries in the stack trace
* @entries: Pointer to storage array
* @nr_entries: Number of entries in the storage array
* @spaces: Number of leading spaces to print
*/
void stack_trace_print(unsigned long *entries, unsigned int nr_entries,
int spaces)
{
int i;
unsigned int i;
if (WARN_ON(!trace->entries))
if (WARN_ON(!entries))
return;
for (i = 0; i < trace->nr_entries; i++)
printk("%*c%pS\n", 1 + spaces, ' ', (void *)trace->entries[i]);
for (i = 0; i < nr_entries; i++)
printk("%*c%pS\n", 1 + spaces, ' ', (void *)entries[i]);
}
EXPORT_SYMBOL_GPL(print_stack_trace);
EXPORT_SYMBOL_GPL(stack_trace_print);
int snprint_stack_trace(char *buf, size_t size,
struct stack_trace *trace, int spaces)
/**
* stack_trace_snprint - Print the entries in the stack trace into a buffer
* @buf: Pointer to the print buffer
* @size: Size of the print buffer
* @entries: Pointer to storage array
* @nr_entries: Number of entries in the storage array
* @spaces: Number of leading spaces to print
*
* Return: Number of bytes printed.
*/
int stack_trace_snprint(char *buf, size_t size, unsigned long *entries,
unsigned int nr_entries, int spaces)
{
int i;
int generated;
int total = 0;
unsigned int generated, i, total = 0;
if (WARN_ON(!trace->entries))
if (WARN_ON(!entries))
return 0;
for (i = 0; i < trace->nr_entries; i++) {
for (i = 0; i < nr_entries && size; i++) {
generated = snprintf(buf, size, "%*c%pS\n", 1 + spaces, ' ',
(void *)trace->entries[i]);
(void *)entries[i]);
total += generated;
/* Assume that generated isn't a negative number */
if (generated >= size) {
buf += size;
size = 0;
@ -51,7 +66,176 @@ int snprint_stack_trace(char *buf, size_t size,
return total;
}
EXPORT_SYMBOL_GPL(snprint_stack_trace);
EXPORT_SYMBOL_GPL(stack_trace_snprint);
#ifdef CONFIG_ARCH_STACKWALK
struct stacktrace_cookie {
unsigned long *store;
unsigned int size;
unsigned int skip;
unsigned int len;
};
static bool stack_trace_consume_entry(void *cookie, unsigned long addr,
bool reliable)
{
struct stacktrace_cookie *c = cookie;
if (c->len >= c->size)
return false;
if (c->skip > 0) {
c->skip--;
return true;
}
c->store[c->len++] = addr;
return c->len < c->size;
}
static bool stack_trace_consume_entry_nosched(void *cookie, unsigned long addr,
bool reliable)
{
if (in_sched_functions(addr))
return true;
return stack_trace_consume_entry(cookie, addr, reliable);
}
/**
* stack_trace_save - Save a stack trace into a storage array
* @store: Pointer to storage array
* @size: Size of the storage array
* @skipnr: Number of entries to skip at the start of the stack trace
*
* Return: Number of trace entries stored.
*/
unsigned int stack_trace_save(unsigned long *store, unsigned int size,
unsigned int skipnr)
{
stack_trace_consume_fn consume_entry = stack_trace_consume_entry;
struct stacktrace_cookie c = {
.store = store,
.size = size,
.skip = skipnr + 1,
};
arch_stack_walk(consume_entry, &c, current, NULL);
return c.len;
}
EXPORT_SYMBOL_GPL(stack_trace_save);
/**
* stack_trace_save_tsk - Save a task stack trace into a storage array
* @task: The task to examine
* @store: Pointer to storage array
* @size: Size of the storage array
* @skipnr: Number of entries to skip at the start of the stack trace
*
* Return: Number of trace entries stored.
*/
unsigned int stack_trace_save_tsk(struct task_struct *tsk, unsigned long *store,
unsigned int size, unsigned int skipnr)
{
stack_trace_consume_fn consume_entry = stack_trace_consume_entry_nosched;
struct stacktrace_cookie c = {
.store = store,
.size = size,
.skip = skipnr + 1,
};
if (!try_get_task_stack(tsk))
return 0;
arch_stack_walk(consume_entry, &c, tsk, NULL);
put_task_stack(tsk);
return c.len;
}
/**
* stack_trace_save_regs - Save a stack trace based on pt_regs into a storage array
* @regs: Pointer to pt_regs to examine
* @store: Pointer to storage array
* @size: Size of the storage array
* @skipnr: Number of entries to skip at the start of the stack trace
*
* Return: Number of trace entries stored.
*/
unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store,
unsigned int size, unsigned int skipnr)
{
stack_trace_consume_fn consume_entry = stack_trace_consume_entry;
struct stacktrace_cookie c = {
.store = store,
.size = size,
.skip = skipnr,
};
arch_stack_walk(consume_entry, &c, current, regs);
return c.len;
}
#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
/**
* stack_trace_save_tsk_reliable - Save task stack with verification
* @tsk: Pointer to the task to examine
* @store: Pointer to storage array
* @size: Size of the storage array
*
* Return: An error if it detects any unreliable features of the
* stack. Otherwise it guarantees that the stack trace is
* reliable and returns the number of entries stored.
*
* If the task is not 'current', the caller *must* ensure the task is inactive.
*/
int stack_trace_save_tsk_reliable(struct task_struct *tsk, unsigned long *store,
unsigned int size)
{
stack_trace_consume_fn consume_entry = stack_trace_consume_entry;
struct stacktrace_cookie c = {
.store = store,
.size = size,
};
int ret;
/*
* If the task doesn't have a stack (e.g., a zombie), the stack is
* "reliably" empty.
*/
if (!try_get_task_stack(tsk))
return 0;
ret = arch_stack_walk_reliable(consume_entry, &c, tsk);
put_task_stack(tsk);
return ret;
}
#endif
#ifdef CONFIG_USER_STACKTRACE_SUPPORT
/**
* stack_trace_save_user - Save a user space stack trace into a storage array
* @store: Pointer to storage array
* @size: Size of the storage array
*
* Return: Number of trace entries stored.
*/
unsigned int stack_trace_save_user(unsigned long *store, unsigned int size)
{
stack_trace_consume_fn consume_entry = stack_trace_consume_entry;
struct stacktrace_cookie c = {
.store = store,
.size = size,
};
/* Trace user stack if not a kernel thread */
if (!current->mm)
return 0;
arch_stack_walk_user(consume_entry, &c, task_pt_regs(current));
return c.len;
}
#endif
#else /* CONFIG_ARCH_STACKWALK */
/*
* Architectures that do not implement save_stack_trace_*()
@ -77,3 +261,118 @@ save_stack_trace_tsk_reliable(struct task_struct *tsk,
WARN_ONCE(1, KERN_INFO "save_stack_tsk_reliable() not implemented yet.\n");
return -ENOSYS;
}
/**
* stack_trace_save - Save a stack trace into a storage array
* @store: Pointer to storage array
* @size: Size of the storage array
* @skipnr: Number of entries to skip at the start of the stack trace
*
* Return: Number of trace entries stored
*/
unsigned int stack_trace_save(unsigned long *store, unsigned int size,
unsigned int skipnr)
{
struct stack_trace trace = {
.entries = store,
.max_entries = size,
.skip = skipnr + 1,
};
save_stack_trace(&trace);
return trace.nr_entries;
}
EXPORT_SYMBOL_GPL(stack_trace_save);
/**
* stack_trace_save_tsk - Save a task stack trace into a storage array
* @task: The task to examine
* @store: Pointer to storage array
* @size: Size of the storage array
* @skipnr: Number of entries to skip at the start of the stack trace
*
* Return: Number of trace entries stored
*/
unsigned int stack_trace_save_tsk(struct task_struct *task,
unsigned long *store, unsigned int size,
unsigned int skipnr)
{
struct stack_trace trace = {
.entries = store,
.max_entries = size,
.skip = skipnr + 1,
};
save_stack_trace_tsk(task, &trace);
return trace.nr_entries;
}
/**
* stack_trace_save_regs - Save a stack trace based on pt_regs into a storage array
* @regs: Pointer to pt_regs to examine
* @store: Pointer to storage array
* @size: Size of the storage array
* @skipnr: Number of entries to skip at the start of the stack trace
*
* Return: Number of trace entries stored
*/
unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store,
unsigned int size, unsigned int skipnr)
{
struct stack_trace trace = {
.entries = store,
.max_entries = size,
.skip = skipnr,
};
save_stack_trace_regs(regs, &trace);
return trace.nr_entries;
}
#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
/**
* stack_trace_save_tsk_reliable - Save task stack with verification
* @tsk: Pointer to the task to examine
* @store: Pointer to storage array
* @size: Size of the storage array
*
* Return: An error if it detects any unreliable features of the
* stack. Otherwise it guarantees that the stack trace is
* reliable and returns the number of entries stored.
*
* If the task is not 'current', the caller *must* ensure the task is inactive.
*/
int stack_trace_save_tsk_reliable(struct task_struct *tsk, unsigned long *store,
unsigned int size)
{
struct stack_trace trace = {
.entries = store,
.max_entries = size,
};
int ret = save_stack_trace_tsk_reliable(tsk, &trace);
return ret ? ret : trace.nr_entries;
}
#endif
#ifdef CONFIG_USER_STACKTRACE_SUPPORT
/**
* stack_trace_save_user - Save a user space stack trace into a storage array
* @store: Pointer to storage array
* @size: Size of the storage array
*
* Return: Number of trace entries stored
*/
unsigned int stack_trace_save_user(unsigned long *store, unsigned int size)
{
struct stack_trace trace = {
.entries = store,
.max_entries = size,
};
save_stack_trace_user(&trace);
return trace.nr_entries;
}
#endif /* CONFIG_USER_STACKTRACE_SUPPORT */
#endif /* !CONFIG_ARCH_STACKWALK */

View File

@ -159,6 +159,8 @@ static union trace_eval_map_item *trace_eval_maps;
#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
static int tracing_set_tracer(struct trace_array *tr, const char *buf);
static void ftrace_trace_userstack(struct ring_buffer *buffer,
unsigned long flags, int pc);
#define MAX_TRACER_SIZE 100
static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
@ -2752,12 +2754,21 @@ trace_function(struct trace_array *tr,
#ifdef CONFIG_STACKTRACE
#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
/* Allow 4 levels of nesting: normal, softirq, irq, NMI */
#define FTRACE_KSTACK_NESTING 4
#define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
struct ftrace_stack {
unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
unsigned long calls[FTRACE_KSTACK_ENTRIES];
};
static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
struct ftrace_stacks {
struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
};
static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
static DEFINE_PER_CPU(int, ftrace_stack_reserve);
static void __ftrace_trace_stack(struct ring_buffer *buffer,
@ -2766,13 +2777,10 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
{
struct trace_event_call *call = &event_kernel_stack;
struct ring_buffer_event *event;
unsigned int size, nr_entries;
struct ftrace_stack *fstack;
struct stack_entry *entry;
struct stack_trace trace;
int use_stack;
int size = FTRACE_STACK_ENTRIES;
trace.nr_entries = 0;
trace.skip = skip;
int stackidx;
/*
* Add one, for this function and the call to save_stack_trace()
@ -2780,7 +2788,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
*/
#ifndef CONFIG_UNWINDER_ORC
if (!regs)
trace.skip++;
skip++;
#endif
/*
@ -2791,53 +2799,40 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
*/
preempt_disable_notrace();
use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
/* This should never happen. If it does, yell once and skip */
if (WARN_ON_ONCE(stackidx > FTRACE_KSTACK_NESTING))
goto out;
/*
* We don't need any atomic variables, just a barrier.
* If an interrupt comes in, we don't care, because it would
* have exited and put the counter back to what we want.
* We just need a barrier to keep gcc from moving things
* around.
* The above __this_cpu_inc_return() is 'atomic' cpu local. An
* interrupt will either see the value pre increment or post
* increment. If the interrupt happens pre increment it will have
* restored the counter when it returns. We just need a barrier to
* keep gcc from moving things around.
*/
barrier();
if (use_stack == 1) {
trace.entries = this_cpu_ptr(ftrace_stack.calls);
trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
if (regs)
save_stack_trace_regs(regs, &trace);
else
save_stack_trace(&trace);
fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
size = ARRAY_SIZE(fstack->calls);
if (trace.nr_entries > size)
size = trace.nr_entries;
} else
/* From now on, use_stack is a boolean */
use_stack = 0;
size *= sizeof(unsigned long);
if (regs) {
nr_entries = stack_trace_save_regs(regs, fstack->calls,
size, skip);
} else {
nr_entries = stack_trace_save(fstack->calls, size, skip);
}
size = nr_entries * sizeof(unsigned long);
event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
sizeof(*entry) + size, flags, pc);
if (!event)
goto out;
entry = ring_buffer_event_data(event);
memset(&entry->caller, 0, size);
if (use_stack)
memcpy(&entry->caller, trace.entries,
trace.nr_entries * sizeof(unsigned long));
else {
trace.max_entries = FTRACE_STACK_ENTRIES;
trace.entries = entry->caller;
if (regs)
save_stack_trace_regs(regs, &trace);
else
save_stack_trace(&trace);
}
entry->size = trace.nr_entries;
memcpy(&entry->caller, fstack->calls, size);
entry->size = nr_entries;
if (!call_filter_check_discard(call, entry, buffer, event))
__buffer_unlock_commit(buffer, event);
@ -2907,15 +2902,15 @@ void trace_dump_stack(int skip)
}
EXPORT_SYMBOL_GPL(trace_dump_stack);
#ifdef CONFIG_USER_STACKTRACE_SUPPORT
static DEFINE_PER_CPU(int, user_stack_count);
void
static void
ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
{
struct trace_event_call *call = &event_user_stack;
struct ring_buffer_event *event;
struct userstack_entry *entry;
struct stack_trace trace;
if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
return;
@ -2946,12 +2941,7 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
entry->tgid = current->tgid;
memset(&entry->caller, 0, sizeof(entry->caller));
trace.nr_entries = 0;
trace.max_entries = FTRACE_STACK_ENTRIES;
trace.skip = 0;
trace.entries = entry->caller;
save_stack_trace_user(&trace);
stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
if (!call_filter_check_discard(call, entry, buffer, event))
__buffer_unlock_commit(buffer, event);
@ -2960,13 +2950,12 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
out:
preempt_enable();
}
#ifdef UNUSED
static void __trace_userstack(struct trace_array *tr, unsigned long flags)
#else /* CONFIG_USER_STACKTRACE_SUPPORT */
static void ftrace_trace_userstack(struct ring_buffer *buffer,
unsigned long flags, int pc)
{
ftrace_trace_userstack(tr, flags, preempt_count());
}
#endif /* UNUSED */
#endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
#endif /* CONFIG_STACKTRACE */

View File

@ -782,17 +782,9 @@ void update_max_tr_single(struct trace_array *tr,
#endif /* CONFIG_TRACER_MAX_TRACE */
#ifdef CONFIG_STACKTRACE
void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
int pc);
void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
int pc);
#else
static inline void ftrace_trace_userstack(struct ring_buffer *buffer,
unsigned long flags, int pc)
{
}
static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
int skip, int pc)
{

View File

@ -5186,7 +5186,6 @@ static void event_hist_trigger(struct event_trigger_data *data, void *rec,
u64 var_ref_vals[TRACING_MAP_VARS_MAX];
char compound_key[HIST_KEY_SIZE_MAX];
struct tracing_map_elt *elt = NULL;
struct stack_trace stacktrace;
struct hist_field *key_field;
u64 field_contents;
void *key = NULL;
@ -5198,14 +5197,9 @@ static void event_hist_trigger(struct event_trigger_data *data, void *rec,
key_field = hist_data->fields[i];
if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
stacktrace.max_entries = HIST_STACKTRACE_DEPTH;
stacktrace.entries = entries;
stacktrace.nr_entries = 0;
stacktrace.skip = HIST_STACKTRACE_SKIP;
memset(stacktrace.entries, 0, HIST_STACKTRACE_SIZE);
save_stack_trace(&stacktrace);
memset(entries, 0, HIST_STACKTRACE_SIZE);
stack_trace_save(entries, HIST_STACKTRACE_DEPTH,
HIST_STACKTRACE_SKIP);
key = entries;
} else {
field_contents = key_field->fn(key_field, elt, rbe, rec);
@ -5246,7 +5240,7 @@ static void hist_trigger_stacktrace_print(struct seq_file *m,
unsigned int i;
for (i = 0; i < max_entries; i++) {
if (stacktrace_entries[i] == ULONG_MAX)
if (!stacktrace_entries[i])
return;
seq_printf(m, "%*c", 1 + spaces, ' ');

View File

@ -18,44 +18,32 @@
#include "trace.h"
static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
{ [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
unsigned stack_trace_index[STACK_TRACE_ENTRIES];
#define STACK_TRACE_ENTRIES 500
/*
* Reserve one entry for the passed in ip. This will allow
* us to remove most or all of the stack size overhead
* added by the stack tracer itself.
*/
struct stack_trace stack_trace_max = {
.max_entries = STACK_TRACE_ENTRIES - 1,
.entries = &stack_dump_trace[0],
};
static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES];
static unsigned stack_trace_index[STACK_TRACE_ENTRIES];
unsigned long stack_trace_max_size;
arch_spinlock_t stack_trace_max_lock =
static unsigned int stack_trace_nr_entries;
static unsigned long stack_trace_max_size;
static arch_spinlock_t stack_trace_max_lock =
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
DEFINE_PER_CPU(int, disable_stack_tracer);
static DEFINE_MUTEX(stack_sysctl_mutex);
int stack_tracer_enabled;
static int last_stack_tracer_enabled;
void stack_trace_print(void)
static void print_max_stack(void)
{
long i;
int size;
pr_emerg(" Depth Size Location (%d entries)\n"
" ----- ---- --------\n",
stack_trace_max.nr_entries);
stack_trace_nr_entries);
for (i = 0; i < stack_trace_max.nr_entries; i++) {
if (stack_dump_trace[i] == ULONG_MAX)
break;
if (i+1 == stack_trace_max.nr_entries ||
stack_dump_trace[i+1] == ULONG_MAX)
for (i = 0; i < stack_trace_nr_entries; i++) {
if (i + 1 == stack_trace_nr_entries)
size = stack_trace_index[i];
else
size = stack_trace_index[i] - stack_trace_index[i+1];
@ -65,16 +53,7 @@ void stack_trace_print(void)
}
}
/*
* When arch-specific code overrides this function, the following
* data should be filled up, assuming stack_trace_max_lock is held to
* prevent concurrent updates.
* stack_trace_index[]
* stack_trace_max
* stack_trace_max_size
*/
void __weak
check_stack(unsigned long ip, unsigned long *stack)
static void check_stack(unsigned long ip, unsigned long *stack)
{
unsigned long this_size, flags; unsigned long *p, *top, *start;
static int tracer_frame;
@ -110,13 +89,12 @@ check_stack(unsigned long ip, unsigned long *stack)
stack_trace_max_size = this_size;
stack_trace_max.nr_entries = 0;
stack_trace_max.skip = 0;
save_stack_trace(&stack_trace_max);
stack_trace_nr_entries = stack_trace_save(stack_dump_trace,
ARRAY_SIZE(stack_dump_trace) - 1,
0);
/* Skip over the overhead of the stack tracer itself */
for (i = 0; i < stack_trace_max.nr_entries; i++) {
for (i = 0; i < stack_trace_nr_entries; i++) {
if (stack_dump_trace[i] == ip)
break;
}
@ -125,7 +103,7 @@ check_stack(unsigned long ip, unsigned long *stack)
* Some archs may not have the passed in ip in the dump.
* If that happens, we need to show everything.
*/
if (i == stack_trace_max.nr_entries)
if (i == stack_trace_nr_entries)
i = 0;
/*
@ -143,15 +121,13 @@ check_stack(unsigned long ip, unsigned long *stack)
* loop will only happen once. This code only takes place
* on a new max, so it is far from a fast path.
*/
while (i < stack_trace_max.nr_entries) {
while (i < stack_trace_nr_entries) {
int found = 0;
stack_trace_index[x] = this_size;
p = start;
for (; p < top && i < stack_trace_max.nr_entries; p++) {
if (stack_dump_trace[i] == ULONG_MAX)
break;
for (; p < top && i < stack_trace_nr_entries; p++) {
/*
* The READ_ONCE_NOCHECK is used to let KASAN know that
* this is not a stack-out-of-bounds error.
@ -182,12 +158,10 @@ check_stack(unsigned long ip, unsigned long *stack)
i++;
}
stack_trace_max.nr_entries = x;
for (; x < i; x++)
stack_dump_trace[x] = ULONG_MAX;
stack_trace_nr_entries = x;
if (task_stack_end_corrupted(current)) {
stack_trace_print();
print_max_stack();
BUG();
}
@ -286,7 +260,7 @@ __next(struct seq_file *m, loff_t *pos)
{
long n = *pos - 1;
if (n >= stack_trace_max.nr_entries || stack_dump_trace[n] == ULONG_MAX)
if (n >= stack_trace_nr_entries)
return NULL;
m->private = (void *)n;
@ -350,7 +324,7 @@ static int t_show(struct seq_file *m, void *v)
seq_printf(m, " Depth Size Location"
" (%d entries)\n"
" ----- ---- --------\n",
stack_trace_max.nr_entries);
stack_trace_nr_entries);
if (!stack_tracer_enabled && !stack_trace_max_size)
print_disabled(m);
@ -360,12 +334,10 @@ static int t_show(struct seq_file *m, void *v)
i = *(long *)v;
if (i >= stack_trace_max.nr_entries ||
stack_dump_trace[i] == ULONG_MAX)
if (i >= stack_trace_nr_entries)
return 0;
if (i+1 == stack_trace_max.nr_entries ||
stack_dump_trace[i+1] == ULONG_MAX)
if (i + 1 == stack_trace_nr_entries)
size = stack_trace_index[i];
else
size = stack_trace_index[i] - stack_trace_index[i+1];
@ -422,23 +394,21 @@ stack_trace_sysctl(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int was_enabled;
int ret;
mutex_lock(&stack_sysctl_mutex);
was_enabled = !!stack_tracer_enabled;
ret = proc_dointvec(table, write, buffer, lenp, ppos);
if (ret || !write ||
(last_stack_tracer_enabled == !!stack_tracer_enabled))
if (ret || !write || (was_enabled == !!stack_tracer_enabled))
goto out;
last_stack_tracer_enabled = !!stack_tracer_enabled;
if (stack_tracer_enabled)
register_ftrace_function(&trace_ops);
else
unregister_ftrace_function(&trace_ops);
out:
mutex_unlock(&stack_sysctl_mutex);
return ret;
@ -454,7 +424,6 @@ static __init int enable_stacktrace(char *str)
strncpy(stack_trace_filter_buf, str + len, COMMAND_LINE_SIZE);
stack_tracer_enabled = 1;
last_stack_tracer_enabled = 1;
return 1;
}
__setup("stacktrace", enable_stacktrace);

View File

@ -597,6 +597,10 @@ config ARCH_HAS_UACCESS_FLUSHCACHE
config ARCH_HAS_UACCESS_MCSAFE
bool
# Temporary. Goes away when all archs are cleaned up
config ARCH_STACKWALK
bool
config STACKDEPOT
bool
select STACKTRACE

View File

@ -65,22 +65,16 @@ static bool fail_task(struct fault_attr *attr, struct task_struct *task)
static bool fail_stacktrace(struct fault_attr *attr)
{
struct stack_trace trace;
int depth = attr->stacktrace_depth;
unsigned long entries[MAX_STACK_TRACE_DEPTH];
int n;
int n, nr_entries;
bool found = (attr->require_start == 0 && attr->require_end == ULONG_MAX);
if (depth == 0)
return found;
trace.nr_entries = 0;
trace.entries = entries;
trace.max_entries = depth;
trace.skip = 1;
save_stack_trace(&trace);
for (n = 0; n < trace.nr_entries; n++) {
nr_entries = stack_trace_save(entries, depth, 1);
for (n = 0; n < nr_entries; n++) {
if (attr->reject_start <= entries[n] &&
entries[n] < attr->reject_end)
return false;

View File

@ -194,40 +194,52 @@ static inline struct stack_record *find_stack(struct stack_record *bucket,
return NULL;
}
void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace)
/**
* stack_depot_fetch - Fetch stack entries from a depot
*
* @handle: Stack depot handle which was returned from
* stack_depot_save().
* @entries: Pointer to store the entries address
*
* Return: The number of trace entries for this depot.
*/
unsigned int stack_depot_fetch(depot_stack_handle_t handle,
unsigned long **entries)
{
union handle_parts parts = { .handle = handle };
void *slab = stack_slabs[parts.slabindex];
size_t offset = parts.offset << STACK_ALLOC_ALIGN;
struct stack_record *stack = slab + offset;
trace->nr_entries = trace->max_entries = stack->size;
trace->entries = stack->entries;
trace->skip = 0;
*entries = stack->entries;
return stack->size;
}
EXPORT_SYMBOL_GPL(depot_fetch_stack);
EXPORT_SYMBOL_GPL(stack_depot_fetch);
/**
* depot_save_stack - save stack in a stack depot.
* @trace - the stacktrace to save.
* @alloc_flags - flags for allocating additional memory if required.
* stack_depot_save - Save a stack trace from an array
*
* Returns the handle of the stack struct stored in depot.
* @entries: Pointer to storage array
* @nr_entries: Size of the storage array
* @alloc_flags: Allocation gfp flags
*
* Return: The handle of the stack struct stored in depot
*/
depot_stack_handle_t depot_save_stack(struct stack_trace *trace,
gfp_t alloc_flags)
depot_stack_handle_t stack_depot_save(unsigned long *entries,
unsigned int nr_entries,
gfp_t alloc_flags)
{
u32 hash;
depot_stack_handle_t retval = 0;
struct stack_record *found = NULL, **bucket;
unsigned long flags;
depot_stack_handle_t retval = 0;
struct page *page = NULL;
void *prealloc = NULL;
unsigned long flags;
u32 hash;
if (unlikely(trace->nr_entries == 0))
if (unlikely(nr_entries == 0))
goto fast_exit;
hash = hash_stack(trace->entries, trace->nr_entries);
hash = hash_stack(entries, nr_entries);
bucket = &stack_table[hash & STACK_HASH_MASK];
/*
@ -235,8 +247,8 @@ depot_stack_handle_t depot_save_stack(struct stack_trace *trace,
* The smp_load_acquire() here pairs with smp_store_release() to
* |bucket| below.
*/
found = find_stack(smp_load_acquire(bucket), trace->entries,
trace->nr_entries, hash);
found = find_stack(smp_load_acquire(bucket), entries,
nr_entries, hash);
if (found)
goto exit;
@ -264,10 +276,10 @@ depot_stack_handle_t depot_save_stack(struct stack_trace *trace,
spin_lock_irqsave(&depot_lock, flags);
found = find_stack(*bucket, trace->entries, trace->nr_entries, hash);
found = find_stack(*bucket, entries, nr_entries, hash);
if (!found) {
struct stack_record *new =
depot_alloc_stack(trace->entries, trace->nr_entries,
depot_alloc_stack(entries, nr_entries,
hash, &prealloc, alloc_flags);
if (new) {
new->next = *bucket;
@ -297,4 +309,4 @@ exit:
fast_exit:
return retval;
}
EXPORT_SYMBOL_GPL(depot_save_stack);
EXPORT_SYMBOL_GPL(stack_depot_save);

View File

@ -49,37 +49,28 @@ static inline int in_irqentry_text(unsigned long ptr)
ptr < (unsigned long)&__softirqentry_text_end);
}
static inline void filter_irq_stacks(struct stack_trace *trace)
static inline unsigned int filter_irq_stacks(unsigned long *entries,
unsigned int nr_entries)
{
int i;
unsigned int i;
if (!trace->nr_entries)
return;
for (i = 0; i < trace->nr_entries; i++)
if (in_irqentry_text(trace->entries[i])) {
for (i = 0; i < nr_entries; i++) {
if (in_irqentry_text(entries[i])) {
/* Include the irqentry function into the stack. */
trace->nr_entries = i + 1;
break;
return i + 1;
}
}
return nr_entries;
}
static inline depot_stack_handle_t save_stack(gfp_t flags)
{
unsigned long entries[KASAN_STACK_DEPTH];
struct stack_trace trace = {
.nr_entries = 0,
.entries = entries,
.max_entries = KASAN_STACK_DEPTH,
.skip = 0
};
unsigned int nr_entries;
save_stack_trace(&trace);
filter_irq_stacks(&trace);
if (trace.nr_entries != 0 &&
trace.entries[trace.nr_entries-1] == ULONG_MAX)
trace.nr_entries--;
return depot_save_stack(&trace, flags);
nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
nr_entries = filter_irq_stacks(entries, nr_entries);
return stack_depot_save(entries, nr_entries, flags);
}
static inline void set_track(struct kasan_track *track, gfp_t flags)

View File

@ -100,10 +100,11 @@ static void print_track(struct kasan_track *track, const char *prefix)
{
pr_err("%s by task %u:\n", prefix, track->pid);
if (track->stack) {
struct stack_trace trace;
unsigned long *entries;
unsigned int nr_entries;
depot_fetch_stack(track->stack, &trace);
print_stack_trace(&trace, 0);
nr_entries = stack_depot_fetch(track->stack, &entries);
stack_trace_print(entries, nr_entries, 0);
} else {
pr_err("(stack is not available)\n");
}

View File

@ -410,11 +410,6 @@ static void print_unreferenced(struct seq_file *seq,
*/
static void dump_object_info(struct kmemleak_object *object)
{
struct stack_trace trace;
trace.nr_entries = object->trace_len;
trace.entries = object->trace;
pr_notice("Object 0x%08lx (size %zu):\n",
object->pointer, object->size);
pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
@ -424,7 +419,7 @@ static void dump_object_info(struct kmemleak_object *object)
pr_notice(" flags = 0x%x\n", object->flags);
pr_notice(" checksum = %u\n", object->checksum);
pr_notice(" backtrace:\n");
print_stack_trace(&trace, 4);
stack_trace_print(object->trace, object->trace_len, 4);
}
/*
@ -553,15 +548,7 @@ static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int ali
*/
static int __save_stack_trace(unsigned long *trace)
{
struct stack_trace stack_trace;
stack_trace.max_entries = MAX_TRACE;
stack_trace.nr_entries = 0;
stack_trace.entries = trace;
stack_trace.skip = 2;
save_stack_trace(&stack_trace);
return stack_trace.nr_entries;
return stack_trace_save(trace, MAX_TRACE, 2);
}
/*
@ -2021,13 +2008,8 @@ early_param("kmemleak", kmemleak_boot_config);
static void __init print_log_trace(struct early_log *log)
{
struct stack_trace trace;
trace.nr_entries = log->trace_len;
trace.entries = log->trace;
pr_notice("Early log backtrace:\n");
print_stack_trace(&trace, 2);
stack_trace_print(log->trace, log->trace_len, 2);
}
/*

View File

@ -58,15 +58,10 @@ static bool need_page_owner(void)
static __always_inline depot_stack_handle_t create_dummy_stack(void)
{
unsigned long entries[4];
struct stack_trace dummy;
unsigned int nr_entries;
dummy.nr_entries = 0;
dummy.max_entries = ARRAY_SIZE(entries);
dummy.entries = &entries[0];
dummy.skip = 0;
save_stack_trace(&dummy);
return depot_save_stack(&dummy, GFP_KERNEL);
nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
return stack_depot_save(entries, nr_entries, GFP_KERNEL);
}
static noinline void register_dummy_stack(void)
@ -120,49 +115,39 @@ void __reset_page_owner(struct page *page, unsigned int order)
}
}
static inline bool check_recursive_alloc(struct stack_trace *trace,
unsigned long ip)
static inline bool check_recursive_alloc(unsigned long *entries,
unsigned int nr_entries,
unsigned long ip)
{
int i;
unsigned int i;
if (!trace->nr_entries)
return false;
for (i = 0; i < trace->nr_entries; i++) {
if (trace->entries[i] == ip)
for (i = 0; i < nr_entries; i++) {
if (entries[i] == ip)
return true;
}
return false;
}
static noinline depot_stack_handle_t save_stack(gfp_t flags)
{
unsigned long entries[PAGE_OWNER_STACK_DEPTH];
struct stack_trace trace = {
.nr_entries = 0,
.entries = entries,
.max_entries = PAGE_OWNER_STACK_DEPTH,
.skip = 2
};
depot_stack_handle_t handle;
unsigned int nr_entries;
save_stack_trace(&trace);
if (trace.nr_entries != 0 &&
trace.entries[trace.nr_entries-1] == ULONG_MAX)
trace.nr_entries--;
nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
/*
* We need to check recursion here because our request to stackdepot
* could trigger memory allocation to save new entry. New memory
* allocation would reach here and call depot_save_stack() again
* if we don't catch it. There is still not enough memory in stackdepot
* so it would try to allocate memory again and loop forever.
* We need to check recursion here because our request to
* stackdepot could trigger memory allocation to save new
* entry. New memory allocation would reach here and call
* stack_depot_save_entries() again if we don't catch it. There is
* still not enough memory in stackdepot so it would try to
* allocate memory again and loop forever.
*/
if (check_recursive_alloc(&trace, _RET_IP_))
if (check_recursive_alloc(entries, nr_entries, _RET_IP_))
return dummy_handle;
handle = depot_save_stack(&trace, flags);
handle = stack_depot_save(entries, nr_entries, flags);
if (!handle)
handle = failure_handle;
@ -340,16 +325,10 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn,
struct page *page, struct page_owner *page_owner,
depot_stack_handle_t handle)
{
int ret;
int pageblock_mt, page_mt;
int ret, pageblock_mt, page_mt;
unsigned long *entries;
unsigned int nr_entries;
char *kbuf;
unsigned long entries[PAGE_OWNER_STACK_DEPTH];
struct stack_trace trace = {
.nr_entries = 0,
.entries = entries,
.max_entries = PAGE_OWNER_STACK_DEPTH,
.skip = 0
};
count = min_t(size_t, count, PAGE_SIZE);
kbuf = kmalloc(count, GFP_KERNEL);
@ -378,8 +357,8 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn,
if (ret >= count)
goto err;
depot_fetch_stack(handle, &trace);
ret += snprint_stack_trace(kbuf + ret, count - ret, &trace, 0);
nr_entries = stack_depot_fetch(handle, &entries);
ret += stack_trace_snprint(kbuf + ret, count - ret, entries, nr_entries, 0);
if (ret >= count)
goto err;
@ -410,14 +389,9 @@ void __dump_page_owner(struct page *page)
{
struct page_ext *page_ext = lookup_page_ext(page);
struct page_owner *page_owner;
unsigned long entries[PAGE_OWNER_STACK_DEPTH];
struct stack_trace trace = {
.nr_entries = 0,
.entries = entries,
.max_entries = PAGE_OWNER_STACK_DEPTH,
.skip = 0
};
depot_stack_handle_t handle;
unsigned long *entries;
unsigned int nr_entries;
gfp_t gfp_mask;
int mt;
@ -441,10 +415,10 @@ void __dump_page_owner(struct page *page)
return;
}
depot_fetch_stack(handle, &trace);
nr_entries = stack_depot_fetch(handle, &entries);
pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n",
page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask);
print_stack_trace(&trace, 0);
stack_trace_print(entries, nr_entries, 0);
if (page_owner->last_migrate_reason != -1)
pr_alert("page has been migrated, last migrate reason: %s\n",

View File

@ -552,31 +552,22 @@ static void set_track(struct kmem_cache *s, void *object,
if (addr) {
#ifdef CONFIG_STACKTRACE
struct stack_trace trace;
int i;
unsigned int nr_entries;
trace.nr_entries = 0;
trace.max_entries = TRACK_ADDRS_COUNT;
trace.entries = p->addrs;
trace.skip = 3;
metadata_access_enable();
save_stack_trace(&trace);
nr_entries = stack_trace_save(p->addrs, TRACK_ADDRS_COUNT, 3);
metadata_access_disable();
/* See rant in lockdep.c */
if (trace.nr_entries != 0 &&
trace.entries[trace.nr_entries - 1] == ULONG_MAX)
trace.nr_entries--;
for (i = trace.nr_entries; i < TRACK_ADDRS_COUNT; i++)
p->addrs[i] = 0;
if (nr_entries < TRACK_ADDRS_COUNT)
p->addrs[nr_entries] = 0;
#endif
p->addr = addr;
p->cpu = smp_processor_id();
p->pid = current->pid;
p->when = jiffies;
} else
} else {
memset(p, 0, sizeof(struct track));
}
}
static void init_tracking(struct kmem_cache *s, void *object)