Merge branch 'tracing/ftrace' into auto-ftrace-next
This commit is contained in:
commit
6712e299b7
|
@ -89,6 +89,7 @@ void ftrace_enable_daemon(void);
|
|||
|
||||
/* totally disable ftrace - can not re-enable after this */
|
||||
void ftrace_kill(void);
|
||||
void ftrace_kill_atomic(void);
|
||||
|
||||
static inline void tracer_disable(void)
|
||||
{
|
||||
|
|
|
@ -11,7 +11,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \
|
|||
hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
|
||||
notifier.o ksysfs.o pm_qos_params.o sched_clock.o
|
||||
|
||||
CFLAGS_REMOVE_sched.o = -pg -mno-spe
|
||||
CFLAGS_REMOVE_sched.o = -mno-spe
|
||||
|
||||
ifdef CONFIG_FTRACE
|
||||
# Do not trace debug files and internal ftrace files
|
||||
|
|
|
@ -1601,6 +1601,23 @@ core_initcall(ftrace_dynamic_init);
|
|||
# define ftrace_force_shutdown() do { } while (0)
|
||||
#endif /* CONFIG_DYNAMIC_FTRACE */
|
||||
|
||||
/**
|
||||
* ftrace_kill_atomic - kill ftrace from critical sections
|
||||
*
|
||||
* This function should be used by panic code. It stops ftrace
|
||||
* but in a not so nice way. If you need to simply kill ftrace
|
||||
* from a non-atomic section, use ftrace_kill.
|
||||
*/
|
||||
void ftrace_kill_atomic(void)
|
||||
{
|
||||
ftrace_disabled = 1;
|
||||
ftrace_enabled = 0;
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
ftraced_suspend = -1;
|
||||
#endif
|
||||
clear_ftrace_function();
|
||||
}
|
||||
|
||||
/**
|
||||
* ftrace_kill - totally shutdown ftrace
|
||||
*
|
||||
|
|
|
@ -96,6 +96,9 @@ static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
|
|||
/* tracer_enabled is used to toggle activation of a tracer */
|
||||
static int tracer_enabled = 1;
|
||||
|
||||
/* function tracing enabled */
|
||||
int ftrace_function_enabled;
|
||||
|
||||
/*
|
||||
* trace_nr_entries is the number of entries that is allocated
|
||||
* for a buffer. Note, the number of entries is always rounded
|
||||
|
@ -134,6 +137,7 @@ static notrace void no_trace_init(struct trace_array *tr)
|
|||
{
|
||||
int cpu;
|
||||
|
||||
ftrace_function_enabled = 0;
|
||||
if(tr->ctrl)
|
||||
for_each_online_cpu(cpu)
|
||||
tracing_reset(tr->data[cpu]);
|
||||
|
@ -1027,7 +1031,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
|
|||
long disabled;
|
||||
int cpu;
|
||||
|
||||
if (unlikely(!tracer_enabled))
|
||||
if (unlikely(!ftrace_function_enabled))
|
||||
return;
|
||||
|
||||
if (skip_trace(ip))
|
||||
|
@ -1052,11 +1056,15 @@ static struct ftrace_ops trace_ops __read_mostly =
|
|||
|
||||
void tracing_start_function_trace(void)
|
||||
{
|
||||
ftrace_function_enabled = 0;
|
||||
register_ftrace_function(&trace_ops);
|
||||
if (tracer_enabled)
|
||||
ftrace_function_enabled = 1;
|
||||
}
|
||||
|
||||
void tracing_stop_function_trace(void)
|
||||
{
|
||||
ftrace_function_enabled = 0;
|
||||
unregister_ftrace_function(&trace_ops);
|
||||
}
|
||||
#endif
|
||||
|
@ -1383,7 +1391,7 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
|
|||
"server",
|
||||
#elif defined(CONFIG_PREEMPT_VOLUNTARY)
|
||||
"desktop",
|
||||
#elif defined(CONFIG_PREEMPT_DESKTOP)
|
||||
#elif defined(CONFIG_PREEMPT)
|
||||
"preempt",
|
||||
#else
|
||||
"unknown",
|
||||
|
@ -1892,8 +1900,10 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
|
|||
m->private = iter;
|
||||
|
||||
/* stop the trace while dumping */
|
||||
if (iter->tr->ctrl)
|
||||
if (iter->tr->ctrl) {
|
||||
tracer_enabled = 0;
|
||||
ftrace_function_enabled = 0;
|
||||
}
|
||||
|
||||
if (iter->trace && iter->trace->open)
|
||||
iter->trace->open(iter);
|
||||
|
@ -1926,8 +1936,14 @@ int tracing_release(struct inode *inode, struct file *file)
|
|||
iter->trace->close(iter);
|
||||
|
||||
/* reenable tracing if it was previously enabled */
|
||||
if (iter->tr->ctrl)
|
||||
if (iter->tr->ctrl) {
|
||||
tracer_enabled = 1;
|
||||
/*
|
||||
* It is safe to enable function tracing even if it
|
||||
* isn't used
|
||||
*/
|
||||
ftrace_function_enabled = 1;
|
||||
}
|
||||
mutex_unlock(&trace_types_lock);
|
||||
|
||||
seq_release(inode, file);
|
||||
|
|
|
@ -223,8 +223,6 @@ void trace_function(struct trace_array *tr,
|
|||
unsigned long parent_ip,
|
||||
unsigned long flags);
|
||||
|
||||
void tracing_start_function_trace(void);
|
||||
void tracing_stop_function_trace(void);
|
||||
void tracing_start_cmdline_record(void);
|
||||
void tracing_stop_cmdline_record(void);
|
||||
int register_tracer(struct tracer *type);
|
||||
|
@ -241,6 +239,14 @@ void update_max_tr_single(struct trace_array *tr,
|
|||
|
||||
extern cycle_t ftrace_now(int cpu);
|
||||
|
||||
#ifdef CONFIG_FTRACE
|
||||
void tracing_start_function_trace(void);
|
||||
void tracing_stop_function_trace(void);
|
||||
#else
|
||||
# define tracing_start_function_trace() do { } while (0)
|
||||
# define tracing_stop_function_trace() do { } while (0)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CONTEXT_SWITCH_TRACER
|
||||
typedef void
|
||||
(*tracer_switch_func_t)(void *private,
|
||||
|
|
|
@ -28,7 +28,10 @@ static void function_reset(struct trace_array *tr)
|
|||
|
||||
static void start_function_trace(struct trace_array *tr)
|
||||
{
|
||||
tr->cpu = get_cpu();
|
||||
function_reset(tr);
|
||||
put_cpu();
|
||||
|
||||
tracing_start_cmdline_record();
|
||||
tracing_start_function_trace();
|
||||
}
|
||||
|
|
|
@ -227,14 +227,14 @@ void tracing_stop_cmdline_record(void)
|
|||
static void start_sched_trace(struct trace_array *tr)
|
||||
{
|
||||
sched_switch_reset(tr);
|
||||
tracer_enabled = 1;
|
||||
tracing_start_cmdline_record();
|
||||
tracer_enabled = 1;
|
||||
}
|
||||
|
||||
static void stop_sched_trace(struct trace_array *tr)
|
||||
{
|
||||
tracing_stop_cmdline_record();
|
||||
tracer_enabled = 0;
|
||||
tracing_stop_cmdline_record();
|
||||
}
|
||||
|
||||
static void sched_switch_trace_init(struct trace_array *tr)
|
||||
|
|
|
@ -352,9 +352,10 @@ static void start_wakeup_tracer(struct trace_array *tr)
|
|||
*/
|
||||
smp_wmb();
|
||||
|
||||
tracer_enabled = 1;
|
||||
register_ftrace_function(&trace_ops);
|
||||
|
||||
tracer_enabled = 1;
|
||||
|
||||
return;
|
||||
fail_deprobe_wake_new:
|
||||
marker_probe_unregister("kernel_sched_wakeup_new",
|
||||
|
|
Loading…
Reference in New Issue