ftrace: fix updates to max trace
This patch fixes some bugs to the updating of the max trace that was caused by implementing the new buffering. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
18cef379d3
commit
89b2f97819
|
@ -153,6 +153,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
|
||||||
memcpy(max_tr.data[i], data, sizeof(*data));
|
memcpy(max_tr.data[i], data, sizeof(*data));
|
||||||
data->trace = save_trace;
|
data->trace = save_trace;
|
||||||
data->trace_pages = save_pages;
|
data->trace_pages = save_pages;
|
||||||
|
tracing_reset(data);
|
||||||
}
|
}
|
||||||
|
|
||||||
__update_max_tr(tr, tsk, cpu);
|
__update_max_tr(tr, tsk, cpu);
|
||||||
|
@ -183,6 +184,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
|
||||||
memcpy(max_tr.data[cpu], data, sizeof(*data));
|
memcpy(max_tr.data[cpu], data, sizeof(*data));
|
||||||
data->trace = save_trace;
|
data->trace = save_trace;
|
||||||
data->trace_pages = save_pages;
|
data->trace_pages = save_pages;
|
||||||
|
tracing_reset(data);
|
||||||
|
|
||||||
__update_max_tr(tr, tsk, cpu);
|
__update_max_tr(tr, tsk, cpu);
|
||||||
spin_unlock(&ftrace_max_lock);
|
spin_unlock(&ftrace_max_lock);
|
||||||
|
@ -877,6 +879,8 @@ print_lat_fmt(struct seq_file *m, struct trace_iterator *iter,
|
||||||
entry->ctx.next_prio,
|
entry->ctx.next_prio,
|
||||||
comm);
|
comm);
|
||||||
break;
|
break;
|
||||||
|
default:
|
||||||
|
seq_printf(m, "Unknown type %d\n", entry->type);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1625,7 +1629,6 @@ __init static int tracer_alloc_buffers(void)
|
||||||
* round up a bit.
|
* round up a bit.
|
||||||
*/
|
*/
|
||||||
global_trace.entries = ENTRIES_PER_PAGE;
|
global_trace.entries = ENTRIES_PER_PAGE;
|
||||||
max_tr.entries = global_trace.entries;
|
|
||||||
pages++;
|
pages++;
|
||||||
|
|
||||||
while (global_trace.entries < trace_nr_entries) {
|
while (global_trace.entries < trace_nr_entries) {
|
||||||
|
@ -1633,6 +1636,7 @@ __init static int tracer_alloc_buffers(void)
|
||||||
break;
|
break;
|
||||||
pages++;
|
pages++;
|
||||||
}
|
}
|
||||||
|
max_tr.entries = global_trace.entries;
|
||||||
|
|
||||||
pr_info("tracer: %d pages allocated for %ld",
|
pr_info("tracer: %d pages allocated for %ld",
|
||||||
pages, trace_nr_entries);
|
pages, trace_nr_entries);
|
||||||
|
|
|
@ -23,6 +23,8 @@ static int tracer_enabled __read_mostly;
|
||||||
|
|
||||||
static DEFINE_PER_CPU(int, tracing_cpu);
|
static DEFINE_PER_CPU(int, tracing_cpu);
|
||||||
|
|
||||||
|
static DEFINE_SPINLOCK(max_trace_lock);
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
TRACER_IRQS_OFF = (1 << 1),
|
TRACER_IRQS_OFF = (1 << 1),
|
||||||
TRACER_PREEMPT_OFF = (1 << 2),
|
TRACER_PREEMPT_OFF = (1 << 2),
|
||||||
|
@ -126,7 +128,7 @@ check_critical_timing(struct trace_array *tr,
|
||||||
int cpu)
|
int cpu)
|
||||||
{
|
{
|
||||||
unsigned long latency, t0, t1;
|
unsigned long latency, t0, t1;
|
||||||
cycle_t T0, T1, T2, delta;
|
cycle_t T0, T1, delta;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -142,20 +144,18 @@ check_critical_timing(struct trace_array *tr,
|
||||||
if (!report_latency(delta))
|
if (!report_latency(delta))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
ftrace(tr, data, CALLER_ADDR0, parent_ip, flags);
|
spin_lock(&max_trace_lock);
|
||||||
/*
|
|
||||||
* Update the timestamp, because the trace entry above
|
|
||||||
* might change it (it can only get larger so the latency
|
|
||||||
* is fair to be reported):
|
|
||||||
*/
|
|
||||||
T2 = now(cpu);
|
|
||||||
|
|
||||||
delta = T2-T0;
|
/* check if we are still the max latency */
|
||||||
|
if (!report_latency(delta))
|
||||||
|
goto out_unlock;
|
||||||
|
|
||||||
|
ftrace(tr, data, CALLER_ADDR0, parent_ip, flags);
|
||||||
|
|
||||||
latency = nsecs_to_usecs(delta);
|
latency = nsecs_to_usecs(delta);
|
||||||
|
|
||||||
if (data->critical_sequence != max_sequence)
|
if (data->critical_sequence != max_sequence)
|
||||||
goto out;
|
goto out_unlock;
|
||||||
|
|
||||||
tracing_max_latency = delta;
|
tracing_max_latency = delta;
|
||||||
t0 = nsecs_to_usecs(T0);
|
t0 = nsecs_to_usecs(T0);
|
||||||
|
@ -189,6 +189,9 @@ check_critical_timing(struct trace_array *tr,
|
||||||
|
|
||||||
max_sequence++;
|
max_sequence++;
|
||||||
|
|
||||||
|
out_unlock:
|
||||||
|
spin_unlock(&max_trace_lock);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
data->critical_sequence = max_sequence;
|
data->critical_sequence = max_sequence;
|
||||||
data->preempt_timestamp = now(cpu);
|
data->preempt_timestamp = now(cpu);
|
||||||
|
@ -366,14 +369,14 @@ void notrace trace_preempt_off(unsigned long a0, unsigned long a1)
|
||||||
|
|
||||||
static void start_irqsoff_tracer(struct trace_array *tr)
|
static void start_irqsoff_tracer(struct trace_array *tr)
|
||||||
{
|
{
|
||||||
tracer_enabled = 1;
|
|
||||||
register_ftrace_function(&trace_ops);
|
register_ftrace_function(&trace_ops);
|
||||||
|
tracer_enabled = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void stop_irqsoff_tracer(struct trace_array *tr)
|
static void stop_irqsoff_tracer(struct trace_array *tr)
|
||||||
{
|
{
|
||||||
unregister_ftrace_function(&trace_ops);
|
|
||||||
tracer_enabled = 0;
|
tracer_enabled = 0;
|
||||||
|
unregister_ftrace_function(&trace_ops);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __irqsoff_tracer_init(struct trace_array *tr)
|
static void __irqsoff_tracer_init(struct trace_array *tr)
|
||||||
|
|
Loading…
Reference in New Issue