Merge branch 'mainline/function-graph' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into tracing/function-graph-tracer
This commit is contained in:
commit
4cd0332db7
|
@ -55,29 +55,4 @@ struct dyn_arch_ftrace {
|
||||||
#endif /* __ASSEMBLY__ */
|
#endif /* __ASSEMBLY__ */
|
||||||
#endif /* CONFIG_FUNCTION_TRACER */
|
#endif /* CONFIG_FUNCTION_TRACER */
|
||||||
|
|
||||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Stack of return addresses for functions
|
|
||||||
* of a thread.
|
|
||||||
* Used in struct thread_info
|
|
||||||
*/
|
|
||||||
struct ftrace_ret_stack {
|
|
||||||
unsigned long ret;
|
|
||||||
unsigned long func;
|
|
||||||
unsigned long long calltime;
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Primary handler of a function return.
|
|
||||||
* It relays on ftrace_return_to_handler.
|
|
||||||
* Defined in entry_32/64.S
|
|
||||||
*/
|
|
||||||
extern void return_to_handler(void);
|
|
||||||
|
|
||||||
#endif /* __ASSEMBLY__ */
|
|
||||||
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
|
||||||
|
|
||||||
#endif /* _ASM_X86_FTRACE_H */
|
#endif /* _ASM_X86_FTRACE_H */
|
||||||
|
|
|
@ -10,6 +10,7 @@
|
||||||
#include <linux/kdebug.h>
|
#include <linux/kdebug.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/ptrace.h>
|
#include <linux/ptrace.h>
|
||||||
|
#include <linux/ftrace.h>
|
||||||
#include <linux/kexec.h>
|
#include <linux/kexec.h>
|
||||||
#include <linux/bug.h>
|
#include <linux/bug.h>
|
||||||
#include <linux/nmi.h>
|
#include <linux/nmi.h>
|
||||||
|
|
|
@ -369,79 +369,6 @@ int ftrace_disable_ftrace_graph_caller(void)
|
||||||
|
|
||||||
#endif /* !CONFIG_DYNAMIC_FTRACE */
|
#endif /* !CONFIG_DYNAMIC_FTRACE */
|
||||||
|
|
||||||
/* Add a function return address to the trace stack on thread info.*/
|
|
||||||
static int push_return_trace(unsigned long ret, unsigned long long time,
|
|
||||||
unsigned long func, int *depth)
|
|
||||||
{
|
|
||||||
int index;
|
|
||||||
|
|
||||||
if (!current->ret_stack)
|
|
||||||
return -EBUSY;
|
|
||||||
|
|
||||||
/* The return trace stack is full */
|
|
||||||
if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
|
|
||||||
atomic_inc(¤t->trace_overrun);
|
|
||||||
return -EBUSY;
|
|
||||||
}
|
|
||||||
|
|
||||||
index = ++current->curr_ret_stack;
|
|
||||||
barrier();
|
|
||||||
current->ret_stack[index].ret = ret;
|
|
||||||
current->ret_stack[index].func = func;
|
|
||||||
current->ret_stack[index].calltime = time;
|
|
||||||
*depth = index;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Retrieve a function return address to the trace stack on thread info.*/
|
|
||||||
static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
|
|
||||||
{
|
|
||||||
int index;
|
|
||||||
|
|
||||||
index = current->curr_ret_stack;
|
|
||||||
|
|
||||||
if (unlikely(index < 0)) {
|
|
||||||
ftrace_graph_stop();
|
|
||||||
WARN_ON(1);
|
|
||||||
/* Might as well panic, otherwise we have no where to go */
|
|
||||||
*ret = (unsigned long)panic;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
*ret = current->ret_stack[index].ret;
|
|
||||||
trace->func = current->ret_stack[index].func;
|
|
||||||
trace->calltime = current->ret_stack[index].calltime;
|
|
||||||
trace->overrun = atomic_read(¤t->trace_overrun);
|
|
||||||
trace->depth = index;
|
|
||||||
barrier();
|
|
||||||
current->curr_ret_stack--;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Send the trace to the ring-buffer.
|
|
||||||
* @return the original return address.
|
|
||||||
*/
|
|
||||||
unsigned long ftrace_return_to_handler(void)
|
|
||||||
{
|
|
||||||
struct ftrace_graph_ret trace;
|
|
||||||
unsigned long ret;
|
|
||||||
|
|
||||||
pop_return_trace(&trace, &ret);
|
|
||||||
trace.rettime = cpu_clock(raw_smp_processor_id());
|
|
||||||
ftrace_graph_return(&trace);
|
|
||||||
|
|
||||||
if (unlikely(!ret)) {
|
|
||||||
ftrace_graph_stop();
|
|
||||||
WARN_ON(1);
|
|
||||||
/* Might as well panic. What else to do? */
|
|
||||||
ret = (unsigned long)panic;
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Hook the return address and push it in the stack of return addrs
|
* Hook the return address and push it in the stack of return addrs
|
||||||
* in current thread info.
|
* in current thread info.
|
||||||
|
@ -494,7 +421,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
|
||||||
|
|
||||||
calltime = cpu_clock(raw_smp_processor_id());
|
calltime = cpu_clock(raw_smp_processor_id());
|
||||||
|
|
||||||
if (push_return_trace(old, calltime,
|
if (ftrace_push_return_trace(old, calltime,
|
||||||
self_addr, &trace.depth) == -EBUSY) {
|
self_addr, &trace.depth) == -EBUSY) {
|
||||||
*parent = old;
|
*parent = old;
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -403,6 +403,30 @@ struct ftrace_graph_ret {
|
||||||
|
|
||||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Stack of return addresses for functions
|
||||||
|
* of a thread.
|
||||||
|
* Used in struct thread_info
|
||||||
|
*/
|
||||||
|
struct ftrace_ret_stack {
|
||||||
|
unsigned long ret;
|
||||||
|
unsigned long func;
|
||||||
|
unsigned long long calltime;
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Primary handler of a function return.
|
||||||
|
* It relays on ftrace_return_to_handler.
|
||||||
|
* Defined in entry_32/64.S
|
||||||
|
*/
|
||||||
|
extern void return_to_handler(void);
|
||||||
|
|
||||||
|
extern int
|
||||||
|
ftrace_push_return_trace(unsigned long ret, unsigned long long time,
|
||||||
|
unsigned long func, int *depth);
|
||||||
|
extern void
|
||||||
|
ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Sometimes we don't want to trace a function with the function
|
* Sometimes we don't want to trace a function with the function
|
||||||
* graph tracer but we want them to keep traced by the usual function
|
* graph tracer but we want them to keep traced by the usual function
|
||||||
|
|
|
@ -50,6 +50,81 @@ static struct tracer_flags tracer_flags = {
|
||||||
/* pid on the last trace processed */
|
/* pid on the last trace processed */
|
||||||
|
|
||||||
|
|
||||||
|
/* Add a function return address to the trace stack on thread info.*/
|
||||||
|
int
|
||||||
|
ftrace_push_return_trace(unsigned long ret, unsigned long long time,
|
||||||
|
unsigned long func, int *depth)
|
||||||
|
{
|
||||||
|
int index;
|
||||||
|
|
||||||
|
if (!current->ret_stack)
|
||||||
|
return -EBUSY;
|
||||||
|
|
||||||
|
/* The return trace stack is full */
|
||||||
|
if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
|
||||||
|
atomic_inc(¤t->trace_overrun);
|
||||||
|
return -EBUSY;
|
||||||
|
}
|
||||||
|
|
||||||
|
index = ++current->curr_ret_stack;
|
||||||
|
barrier();
|
||||||
|
current->ret_stack[index].ret = ret;
|
||||||
|
current->ret_stack[index].func = func;
|
||||||
|
current->ret_stack[index].calltime = time;
|
||||||
|
*depth = index;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Retrieve a function return address to the trace stack on thread info.*/
|
||||||
|
void
|
||||||
|
ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
|
||||||
|
{
|
||||||
|
int index;
|
||||||
|
|
||||||
|
index = current->curr_ret_stack;
|
||||||
|
|
||||||
|
if (unlikely(index < 0)) {
|
||||||
|
ftrace_graph_stop();
|
||||||
|
WARN_ON(1);
|
||||||
|
/* Might as well panic, otherwise we have no where to go */
|
||||||
|
*ret = (unsigned long)panic;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
*ret = current->ret_stack[index].ret;
|
||||||
|
trace->func = current->ret_stack[index].func;
|
||||||
|
trace->calltime = current->ret_stack[index].calltime;
|
||||||
|
trace->overrun = atomic_read(¤t->trace_overrun);
|
||||||
|
trace->depth = index;
|
||||||
|
barrier();
|
||||||
|
current->curr_ret_stack--;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Send the trace to the ring-buffer.
|
||||||
|
* @return the original return address.
|
||||||
|
*/
|
||||||
|
unsigned long ftrace_return_to_handler(void)
|
||||||
|
{
|
||||||
|
struct ftrace_graph_ret trace;
|
||||||
|
unsigned long ret;
|
||||||
|
|
||||||
|
ftrace_pop_return_trace(&trace, &ret);
|
||||||
|
trace.rettime = cpu_clock(raw_smp_processor_id());
|
||||||
|
ftrace_graph_return(&trace);
|
||||||
|
|
||||||
|
if (unlikely(!ret)) {
|
||||||
|
ftrace_graph_stop();
|
||||||
|
WARN_ON(1);
|
||||||
|
/* Might as well panic. What else to do? */
|
||||||
|
ret = (unsigned long)panic;
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
static int graph_trace_init(struct trace_array *tr)
|
static int graph_trace_init(struct trace_array *tr)
|
||||||
{
|
{
|
||||||
int ret = register_ftrace_graph(&trace_graph_return,
|
int ret = register_ftrace_graph(&trace_graph_return,
|
||||||
|
|
Loading…
Reference in New Issue