Merge branch 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: tracing: limit the number of loops the ring buffer self test can make tracing: have function trace select kallsyms tracing: disable tracing while testing ring buffer tracing/function-graph-tracer: trace the idle tasks
This commit is contained in:
commit
f54b2fe4ae
|
@ -52,6 +52,7 @@ config FUNCTION_TRACER
|
|||
depends on HAVE_FUNCTION_TRACER
|
||||
depends on DEBUG_KERNEL
|
||||
select FRAME_POINTER
|
||||
select KALLSYMS
|
||||
select TRACING
|
||||
select CONTEXT_SWITCH_TRACER
|
||||
help
|
||||
|
@ -238,6 +239,7 @@ config STACK_TRACER
|
|||
depends on DEBUG_KERNEL
|
||||
select FUNCTION_TRACER
|
||||
select STACKTRACE
|
||||
select KALLSYMS
|
||||
help
|
||||
This special tracer records the maximum stack footprint of the
|
||||
kernel and displays it in debugfs/tracing/stack_trace.
|
||||
|
|
|
@ -2033,7 +2033,7 @@ free:
|
|||
static int start_graph_tracing(void)
|
||||
{
|
||||
struct ftrace_ret_stack **ret_stack_list;
|
||||
int ret;
|
||||
int ret, cpu;
|
||||
|
||||
ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
|
||||
sizeof(struct ftrace_ret_stack *),
|
||||
|
@ -2042,6 +2042,10 @@ static int start_graph_tracing(void)
|
|||
if (!ret_stack_list)
|
||||
return -ENOMEM;
|
||||
|
||||
/* The cpu_boot init_task->ret_stack will never be freed */
|
||||
for_each_online_cpu(cpu)
|
||||
ftrace_graph_init_task(idle_task(cpu));
|
||||
|
||||
do {
|
||||
ret = alloc_retstack_tasklist(ret_stack_list);
|
||||
} while (ret == -EAGAIN);
|
||||
|
|
|
@ -23,10 +23,20 @@ static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
|
|||
{
|
||||
struct ring_buffer_event *event;
|
||||
struct trace_entry *entry;
|
||||
unsigned int loops = 0;
|
||||
|
||||
while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
|
||||
entry = ring_buffer_event_data(event);
|
||||
|
||||
/*
|
||||
* The ring buffer is a size of trace_buf_size, if
|
||||
* we loop more than the size, there's something wrong
|
||||
* with the ring buffer.
|
||||
*/
|
||||
if (loops++ > trace_buf_size) {
|
||||
printk(KERN_CONT ".. bad ring buffer ");
|
||||
goto failed;
|
||||
}
|
||||
if (!trace_valid_entry(entry)) {
|
||||
printk(KERN_CONT ".. invalid entry %d ",
|
||||
entry->type);
|
||||
|
@ -57,11 +67,20 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
|
|||
|
||||
cnt = ring_buffer_entries(tr->buffer);
|
||||
|
||||
/*
|
||||
* The trace_test_buffer_cpu runs a while loop to consume all data.
|
||||
* If the calling tracer is broken, and is constantly filling
|
||||
* the buffer, this will run forever, and hard lock the box.
|
||||
* We disable the ring buffer while we do this test to prevent
|
||||
* a hard lock up.
|
||||
*/
|
||||
tracing_off();
|
||||
for_each_possible_cpu(cpu) {
|
||||
ret = trace_test_buffer_cpu(tr, cpu);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
tracing_on();
|
||||
__raw_spin_unlock(&ftrace_max_lock);
|
||||
local_irq_restore(flags);
|
||||
|
||||
|
|
Loading…
Reference in New Issue