Merge branch 'tip/tracing/ftrace' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into tracing/ftrace
This commit is contained in:
commit
327019b01e
include/linux
kernel/trace
|
@ -68,6 +68,7 @@ struct ftrace_branch_data {
|
||||||
unsigned long miss;
|
unsigned long miss;
|
||||||
unsigned long hit;
|
unsigned long hit;
|
||||||
};
|
};
|
||||||
|
unsigned long miss_hit[2];
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -125,10 +126,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
|
||||||
.line = __LINE__, \
|
.line = __LINE__, \
|
||||||
}; \
|
}; \
|
||||||
______r = !!(cond); \
|
______r = !!(cond); \
|
||||||
if (______r) \
|
______f.miss_hit[______r]++; \
|
||||||
______f.hit++; \
|
|
||||||
else \
|
|
||||||
______f.miss++; \
|
|
||||||
______r; \
|
______r; \
|
||||||
}))
|
}))
|
||||||
#endif /* CONFIG_PROFILE_ALL_BRANCHES */
|
#endif /* CONFIG_PROFILE_ALL_BRANCHES */
|
||||||
|
|
|
@ -118,8 +118,11 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer);
|
||||||
unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu);
|
unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu);
|
||||||
unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu);
|
unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu);
|
||||||
|
|
||||||
u64 ring_buffer_time_stamp(int cpu);
|
u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu);
|
||||||
void ring_buffer_normalize_time_stamp(int cpu, u64 *ts);
|
void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
|
||||||
|
int cpu, u64 *ts);
|
||||||
|
void ring_buffer_set_clock(struct ring_buffer *buffer,
|
||||||
|
u64 (*clock)(void));
|
||||||
|
|
||||||
size_t ring_buffer_page_len(void *page);
|
size_t ring_buffer_page_len(void *page);
|
||||||
|
|
||||||
|
|
|
@ -180,29 +180,6 @@ EXPORT_SYMBOL_GPL(tracing_is_on);
|
||||||
|
|
||||||
#include "trace.h"
|
#include "trace.h"
|
||||||
|
|
||||||
/* Up this if you want to test the TIME_EXTENTS and normalization */
|
|
||||||
#define DEBUG_SHIFT 0
|
|
||||||
|
|
||||||
u64 ring_buffer_time_stamp(int cpu)
|
|
||||||
{
|
|
||||||
u64 time;
|
|
||||||
|
|
||||||
preempt_disable_notrace();
|
|
||||||
/* shift to debug/test normalization and TIME_EXTENTS */
|
|
||||||
time = trace_clock_local() << DEBUG_SHIFT;
|
|
||||||
preempt_enable_no_resched_notrace();
|
|
||||||
|
|
||||||
return time;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
|
|
||||||
|
|
||||||
void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
|
|
||||||
{
|
|
||||||
/* Just stupid testing the normalize function and deltas */
|
|
||||||
*ts >>= DEBUG_SHIFT;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
|
|
||||||
|
|
||||||
#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
|
#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
|
||||||
#define RB_ALIGNMENT 4U
|
#define RB_ALIGNMENT 4U
|
||||||
#define RB_MAX_SMALL_DATA 28
|
#define RB_MAX_SMALL_DATA 28
|
||||||
|
@ -374,6 +351,7 @@ struct ring_buffer {
|
||||||
#ifdef CONFIG_HOTPLUG_CPU
|
#ifdef CONFIG_HOTPLUG_CPU
|
||||||
struct notifier_block cpu_notify;
|
struct notifier_block cpu_notify;
|
||||||
#endif
|
#endif
|
||||||
|
u64 (*clock)(void);
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ring_buffer_iter {
|
struct ring_buffer_iter {
|
||||||
|
@ -394,6 +372,30 @@ struct ring_buffer_iter {
|
||||||
_____ret; \
|
_____ret; \
|
||||||
})
|
})
|
||||||
|
|
||||||
|
/* Up this if you want to test the TIME_EXTENTS and normalization */
|
||||||
|
#define DEBUG_SHIFT 0
|
||||||
|
|
||||||
|
u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
|
||||||
|
{
|
||||||
|
u64 time;
|
||||||
|
|
||||||
|
preempt_disable_notrace();
|
||||||
|
/* shift to debug/test normalization and TIME_EXTENTS */
|
||||||
|
time = buffer->clock() << DEBUG_SHIFT;
|
||||||
|
preempt_enable_no_resched_notrace();
|
||||||
|
|
||||||
|
return time;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
|
||||||
|
|
||||||
|
void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
|
||||||
|
int cpu, u64 *ts)
|
||||||
|
{
|
||||||
|
/* Just stupid testing the normalize function and deltas */
|
||||||
|
*ts >>= DEBUG_SHIFT;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* check_pages - integrity check of buffer pages
|
* check_pages - integrity check of buffer pages
|
||||||
* @cpu_buffer: CPU buffer with pages to test
|
* @cpu_buffer: CPU buffer with pages to test
|
||||||
|
@ -569,6 +571,7 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
|
||||||
|
|
||||||
buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
|
buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
|
||||||
buffer->flags = flags;
|
buffer->flags = flags;
|
||||||
|
buffer->clock = trace_clock_local;
|
||||||
|
|
||||||
/* need at least two pages */
|
/* need at least two pages */
|
||||||
if (buffer->pages == 1)
|
if (buffer->pages == 1)
|
||||||
|
@ -645,6 +648,12 @@ ring_buffer_free(struct ring_buffer *buffer)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(ring_buffer_free);
|
EXPORT_SYMBOL_GPL(ring_buffer_free);
|
||||||
|
|
||||||
|
void ring_buffer_set_clock(struct ring_buffer *buffer,
|
||||||
|
u64 (*clock)(void))
|
||||||
|
{
|
||||||
|
buffer->clock = clock;
|
||||||
|
}
|
||||||
|
|
||||||
static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
|
static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -1191,7 +1200,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
|
||||||
cpu_buffer->tail_page = next_page;
|
cpu_buffer->tail_page = next_page;
|
||||||
|
|
||||||
/* reread the time stamp */
|
/* reread the time stamp */
|
||||||
*ts = ring_buffer_time_stamp(cpu_buffer->cpu);
|
*ts = ring_buffer_time_stamp(buffer, cpu_buffer->cpu);
|
||||||
cpu_buffer->tail_page->page->time_stamp = *ts;
|
cpu_buffer->tail_page->page->time_stamp = *ts;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1334,7 +1343,7 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
|
||||||
if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
|
if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
ts = ring_buffer_time_stamp(cpu_buffer->cpu);
|
ts = ring_buffer_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Only the first commit can update the timestamp.
|
* Only the first commit can update the timestamp.
|
||||||
|
@ -2051,7 +2060,8 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
|
||||||
case RINGBUF_TYPE_DATA:
|
case RINGBUF_TYPE_DATA:
|
||||||
if (ts) {
|
if (ts) {
|
||||||
*ts = cpu_buffer->read_stamp + event->time_delta;
|
*ts = cpu_buffer->read_stamp + event->time_delta;
|
||||||
ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
|
ring_buffer_normalize_time_stamp(buffer,
|
||||||
|
cpu_buffer->cpu, ts);
|
||||||
}
|
}
|
||||||
return event;
|
return event;
|
||||||
|
|
||||||
|
@ -2112,7 +2122,8 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
|
||||||
case RINGBUF_TYPE_DATA:
|
case RINGBUF_TYPE_DATA:
|
||||||
if (ts) {
|
if (ts) {
|
||||||
*ts = iter->read_stamp + event->time_delta;
|
*ts = iter->read_stamp + event->time_delta;
|
||||||
ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
|
ring_buffer_normalize_time_stamp(buffer,
|
||||||
|
cpu_buffer->cpu, ts);
|
||||||
}
|
}
|
||||||
return event;
|
return event;
|
||||||
|
|
||||||
|
|
|
@ -155,13 +155,6 @@ ns2usecs(cycle_t nsec)
|
||||||
return nsec;
|
return nsec;
|
||||||
}
|
}
|
||||||
|
|
||||||
cycle_t ftrace_now(int cpu)
|
|
||||||
{
|
|
||||||
u64 ts = ring_buffer_time_stamp(cpu);
|
|
||||||
ring_buffer_normalize_time_stamp(cpu, &ts);
|
|
||||||
return ts;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The global_trace is the descriptor that holds the tracing
|
* The global_trace is the descriptor that holds the tracing
|
||||||
* buffers for the live tracing. For each CPU, it contains
|
* buffers for the live tracing. For each CPU, it contains
|
||||||
|
@ -178,6 +171,20 @@ static struct trace_array global_trace;
|
||||||
|
|
||||||
static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
|
static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
|
||||||
|
|
||||||
|
cycle_t ftrace_now(int cpu)
|
||||||
|
{
|
||||||
|
u64 ts;
|
||||||
|
|
||||||
|
/* Early boot up does not have a buffer yet */
|
||||||
|
if (!global_trace.buffer)
|
||||||
|
return trace_clock_local();
|
||||||
|
|
||||||
|
ts = ring_buffer_time_stamp(global_trace.buffer, cpu);
|
||||||
|
ring_buffer_normalize_time_stamp(global_trace.buffer, cpu, &ts);
|
||||||
|
|
||||||
|
return ts;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The max_tr is used to snapshot the global_trace when a maximum
|
* The max_tr is used to snapshot the global_trace when a maximum
|
||||||
* latency is reached. Some tracers will use this to store a maximum
|
* latency is reached. Some tracers will use this to store a maximum
|
||||||
|
@ -308,6 +315,7 @@ static const char *trace_options[] = {
|
||||||
"printk-msg-only",
|
"printk-msg-only",
|
||||||
"context-info",
|
"context-info",
|
||||||
"latency-format",
|
"latency-format",
|
||||||
|
"global-clock",
|
||||||
NULL
|
NULL
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -2244,6 +2252,34 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void set_tracer_flags(unsigned int mask, int enabled)
|
||||||
|
{
|
||||||
|
/* do nothing if flag is already set */
|
||||||
|
if (!!(trace_flags & mask) == !!enabled)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (enabled)
|
||||||
|
trace_flags |= mask;
|
||||||
|
else
|
||||||
|
trace_flags &= ~mask;
|
||||||
|
|
||||||
|
if (mask == TRACE_ITER_GLOBAL_CLK) {
|
||||||
|
u64 (*func)(void);
|
||||||
|
|
||||||
|
if (enabled)
|
||||||
|
func = trace_clock_global;
|
||||||
|
else
|
||||||
|
func = trace_clock_local;
|
||||||
|
|
||||||
|
mutex_lock(&trace_types_lock);
|
||||||
|
ring_buffer_set_clock(global_trace.buffer, func);
|
||||||
|
|
||||||
|
if (max_tr.buffer)
|
||||||
|
ring_buffer_set_clock(max_tr.buffer, func);
|
||||||
|
mutex_unlock(&trace_types_lock);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static ssize_t
|
static ssize_t
|
||||||
tracing_trace_options_write(struct file *filp, const char __user *ubuf,
|
tracing_trace_options_write(struct file *filp, const char __user *ubuf,
|
||||||
size_t cnt, loff_t *ppos)
|
size_t cnt, loff_t *ppos)
|
||||||
|
@ -2271,10 +2307,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
|
||||||
int len = strlen(trace_options[i]);
|
int len = strlen(trace_options[i]);
|
||||||
|
|
||||||
if (strncmp(cmp, trace_options[i], len) == 0) {
|
if (strncmp(cmp, trace_options[i], len) == 0) {
|
||||||
if (neg)
|
set_tracer_flags(1 << i, !neg);
|
||||||
trace_flags &= ~(1 << i);
|
|
||||||
else
|
|
||||||
trace_flags |= (1 << i);
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -684,6 +684,7 @@ enum trace_iterator_flags {
|
||||||
TRACE_ITER_PRINTK_MSGONLY = 0x10000,
|
TRACE_ITER_PRINTK_MSGONLY = 0x10000,
|
||||||
TRACE_ITER_CONTEXT_INFO = 0x20000, /* Print pid/cpu/time */
|
TRACE_ITER_CONTEXT_INFO = 0x20000, /* Print pid/cpu/time */
|
||||||
TRACE_ITER_LATENCY_FMT = 0x40000,
|
TRACE_ITER_LATENCY_FMT = 0x40000,
|
||||||
|
TRACE_ITER_GLOBAL_CLK = 0x80000,
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -122,10 +122,14 @@ fail_start:
|
||||||
static void start_power_trace(struct trace_array *tr)
|
static void start_power_trace(struct trace_array *tr)
|
||||||
{
|
{
|
||||||
trace_power_enabled = 1;
|
trace_power_enabled = 1;
|
||||||
tracing_power_register();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void stop_power_trace(struct trace_array *tr)
|
static void stop_power_trace(struct trace_array *tr)
|
||||||
|
{
|
||||||
|
trace_power_enabled = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void power_trace_reset(struct trace_array *tr)
|
||||||
{
|
{
|
||||||
trace_power_enabled = 0;
|
trace_power_enabled = 0;
|
||||||
unregister_trace_power_start(probe_power_start);
|
unregister_trace_power_start(probe_power_start);
|
||||||
|
@ -188,7 +192,7 @@ static struct tracer power_tracer __read_mostly =
|
||||||
.init = power_trace_init,
|
.init = power_trace_init,
|
||||||
.start = start_power_trace,
|
.start = start_power_trace,
|
||||||
.stop = stop_power_trace,
|
.stop = stop_power_trace,
|
||||||
.reset = stop_power_trace,
|
.reset = power_trace_reset,
|
||||||
.print_line = power_print_line,
|
.print_line = power_print_line,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -18,6 +18,7 @@ static struct trace_array *ctx_trace;
|
||||||
static int __read_mostly tracer_enabled;
|
static int __read_mostly tracer_enabled;
|
||||||
static int sched_ref;
|
static int sched_ref;
|
||||||
static DEFINE_MUTEX(sched_register_mutex);
|
static DEFINE_MUTEX(sched_register_mutex);
|
||||||
|
static int sched_stopped;
|
||||||
|
|
||||||
static void
|
static void
|
||||||
probe_sched_switch(struct rq *__rq, struct task_struct *prev,
|
probe_sched_switch(struct rq *__rq, struct task_struct *prev,
|
||||||
|
@ -28,7 +29,7 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev,
|
||||||
int cpu;
|
int cpu;
|
||||||
int pc;
|
int pc;
|
||||||
|
|
||||||
if (!sched_ref)
|
if (!sched_ref || sched_stopped)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
tracing_record_cmdline(prev);
|
tracing_record_cmdline(prev);
|
||||||
|
@ -193,6 +194,7 @@ static void stop_sched_trace(struct trace_array *tr)
|
||||||
static int sched_switch_trace_init(struct trace_array *tr)
|
static int sched_switch_trace_init(struct trace_array *tr)
|
||||||
{
|
{
|
||||||
ctx_trace = tr;
|
ctx_trace = tr;
|
||||||
|
tracing_reset_online_cpus(tr);
|
||||||
tracing_start_sched_switch_record();
|
tracing_start_sched_switch_record();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -205,13 +207,12 @@ static void sched_switch_trace_reset(struct trace_array *tr)
|
||||||
|
|
||||||
static void sched_switch_trace_start(struct trace_array *tr)
|
static void sched_switch_trace_start(struct trace_array *tr)
|
||||||
{
|
{
|
||||||
tracing_reset_online_cpus(tr);
|
sched_stopped = 0;
|
||||||
tracing_start_sched_switch();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sched_switch_trace_stop(struct trace_array *tr)
|
static void sched_switch_trace_stop(struct trace_array *tr)
|
||||||
{
|
{
|
||||||
tracing_stop_sched_switch();
|
sched_stopped = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct tracer sched_switch_trace __read_mostly =
|
static struct tracer sched_switch_trace __read_mostly =
|
||||||
|
|
Loading…
Reference in New Issue