2006-07-03 15:24:40 +08:00
|
|
|
/*
|
|
|
|
* Stack trace management functions
|
|
|
|
*
|
|
|
|
* Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
|
|
|
|
*/
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/stacktrace.h>
|
2006-09-26 16:52:34 +08:00
|
|
|
#include <linux/module.h>
|
2008-11-22 19:28:47 +08:00
|
|
|
#include <linux/uaccess.h>
|
2006-09-26 16:52:34 +08:00
|
|
|
#include <asm/stacktrace.h>
|
2006-07-03 15:24:40 +08:00
|
|
|
|
2006-09-26 16:52:34 +08:00
|
|
|
static void save_stack_warning(void *data, char *msg)
|
2006-07-03 15:24:40 +08:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2006-09-26 16:52:34 +08:00
|
|
|
static void
|
|
|
|
save_stack_warning_symbol(void *data, char *msg, unsigned long symbol)
|
2006-07-03 15:24:40 +08:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2006-09-26 16:52:34 +08:00
|
|
|
static int save_stack_stack(void *data, char *name)
|
2006-07-03 15:24:40 +08:00
|
|
|
{
|
2007-05-08 15:23:29 +08:00
|
|
|
return -1;
|
2006-09-26 16:52:34 +08:00
|
|
|
}
|
2006-07-03 15:24:40 +08:00
|
|
|
|
2008-01-30 20:33:07 +08:00
|
|
|
static void save_stack_address(void *data, unsigned long addr, int reliable)
|
2006-09-26 16:52:34 +08:00
|
|
|
{
|
2008-01-30 20:33:23 +08:00
|
|
|
struct stack_trace *trace = data;
|
2008-02-23 02:23:58 +08:00
|
|
|
if (!reliable)
|
|
|
|
return;
|
2006-09-26 16:52:34 +08:00
|
|
|
if (trace->skip > 0) {
|
|
|
|
trace->skip--;
|
|
|
|
return;
|
2006-07-03 15:24:40 +08:00
|
|
|
}
|
2007-02-13 20:26:21 +08:00
|
|
|
if (trace->nr_entries < trace->max_entries)
|
2006-09-26 16:52:34 +08:00
|
|
|
trace->entries[trace->nr_entries++] = addr;
|
2006-07-03 15:24:40 +08:00
|
|
|
}
|
|
|
|
|
2008-01-30 20:33:07 +08:00
|
|
|
static void
|
|
|
|
save_stack_address_nosched(void *data, unsigned long addr, int reliable)
|
2008-01-26 04:08:34 +08:00
|
|
|
{
|
|
|
|
struct stack_trace *trace = (struct stack_trace *)data;
|
2008-02-23 02:23:58 +08:00
|
|
|
if (!reliable)
|
|
|
|
return;
|
2008-01-26 04:08:34 +08:00
|
|
|
if (in_sched_functions(addr))
|
|
|
|
return;
|
|
|
|
if (trace->skip > 0) {
|
|
|
|
trace->skip--;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (trace->nr_entries < trace->max_entries)
|
|
|
|
trace->entries[trace->nr_entries++] = addr;
|
|
|
|
}
|
|
|
|
|
2007-10-18 00:04:37 +08:00
|
|
|
static const struct stacktrace_ops save_stack_ops = {
|
2006-09-26 16:52:34 +08:00
|
|
|
.warning = save_stack_warning,
|
|
|
|
.warning_symbol = save_stack_warning_symbol,
|
|
|
|
.stack = save_stack_stack,
|
|
|
|
.address = save_stack_address,
|
|
|
|
};
|
2006-07-03 15:24:40 +08:00
|
|
|
|
2008-01-26 04:08:34 +08:00
|
|
|
static const struct stacktrace_ops save_stack_ops_nosched = {
|
|
|
|
.warning = save_stack_warning,
|
|
|
|
.warning_symbol = save_stack_warning_symbol,
|
|
|
|
.stack = save_stack_stack,
|
|
|
|
.address = save_stack_address_nosched,
|
|
|
|
};
|
|
|
|
|
2006-07-03 15:24:40 +08:00
|
|
|
/*
|
|
|
|
* Save stack-backtrace addresses into a stack_trace buffer.
|
|
|
|
*/
|
2007-05-08 15:23:29 +08:00
|
|
|
void save_stack_trace(struct stack_trace *trace)
|
2006-07-03 15:24:40 +08:00
|
|
|
{
|
2008-01-30 20:33:07 +08:00
|
|
|
dump_trace(current, NULL, NULL, 0, &save_stack_ops, trace);
|
2007-02-13 20:26:21 +08:00
|
|
|
if (trace->nr_entries < trace->max_entries)
|
|
|
|
trace->entries[trace->nr_entries++] = ULONG_MAX;
|
2006-07-03 15:24:40 +08:00
|
|
|
}
|
2008-06-28 03:20:17 +08:00
|
|
|
EXPORT_SYMBOL_GPL(save_stack_trace);
|
2008-01-26 04:08:34 +08:00
|
|
|
|
|
|
|
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
|
|
|
|
{
|
2008-01-30 20:33:07 +08:00
|
|
|
dump_trace(tsk, NULL, NULL, 0, &save_stack_ops_nosched, trace);
|
2008-01-26 04:08:34 +08:00
|
|
|
if (trace->nr_entries < trace->max_entries)
|
|
|
|
trace->entries[trace->nr_entries++] = ULONG_MAX;
|
|
|
|
}
|
2008-06-28 03:20:17 +08:00
|
|
|
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
|
2008-11-22 19:28:47 +08:00
|
|
|
|
|
|
|
/* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
|
|
|
|
|
|
|
|
struct stack_frame {
|
|
|
|
const void __user *next_fp;
|
|
|
|
unsigned long return_address;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ret = 1;
|
|
|
|
pagefault_disable();
|
|
|
|
if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
|
|
|
|
ret = 0;
|
|
|
|
pagefault_enable();
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void save_stack_trace_user(struct stack_trace *trace)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Trace user stack if we are not a kernel thread
|
|
|
|
*/
|
|
|
|
if (current->mm) {
|
|
|
|
const struct pt_regs *regs = task_pt_regs(current);
|
|
|
|
const void __user *fp = (const void __user *)regs->bp;
|
|
|
|
|
|
|
|
if (trace->nr_entries < trace->max_entries)
|
|
|
|
trace->entries[trace->nr_entries++] = regs->ip;
|
|
|
|
|
|
|
|
while (trace->nr_entries < trace->max_entries) {
|
|
|
|
struct stack_frame frame;
|
|
|
|
frame.next_fp = NULL;
|
|
|
|
frame.return_address = 0;
|
|
|
|
if (!copy_stack_frame(fp, &frame))
|
|
|
|
break;
|
|
|
|
if ((unsigned long)fp < regs->sp)
|
|
|
|
break;
|
|
|
|
if (frame.return_address)
|
|
|
|
trace->entries[trace->nr_entries++] =
|
|
|
|
frame.return_address;
|
|
|
|
if (fp == frame.next_fp)
|
|
|
|
break;
|
|
|
|
fp = frame.next_fp;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (trace->nr_entries < trace->max_entries)
|
|
|
|
trace->entries[trace->nr_entries++] = ULONG_MAX;
|
|
|
|
}
|
|
|
|
|