Various bug fixes:
- Two small memory leaks in error paths. - A missed return error code on an error path. - A fix to check the tracing ring buffer CPU when it doesn't exist (caused by setting maxcpus on the command line that is less than the actual number of CPUs, and then onlining them manually). - A fix to have the reset of boot tracers called by lateinit_sync() instead of just lateinit(). As some of the tracers register via lateinit(), and if the clear happens before the tracer is registered, it will never start even though it was told to via the kernel command line. -----BEGIN PGP SIGNATURE----- iQFIBAABCAAyFiEEQEw9Eu0DdyUUkuUUybkF8mrZjcsFAlme4DwUHHJvc3RlZHRA Z29vZG1pcy5vcmcACgkQybkF8mrZjcssNwf+Itap7Mtbk48wJYNqfjk1pzyiOcYV WM88EOBFM46dttVN6cBs2uUmtdvmX/g52RtsHzG6ZbwxzLE+tIGbSO2plGoknOyD lro5CSHT2j3bu0enqkxfznDUT0PNrELEaYBoMK0yhMsXm0v+XqHUxkIqb19Ubuo+ ORPBShZghJtAiEBFArV1nXBW1kzrIFJwjymdF2ccqUlg+XxtPS1wgnZPIOjCa8ia YM4bX3aTUh4LiUUvS7FlJsrwjB+JFOHdXu1Vg140CvJEon1a+bW4Jx88MxoN6zrp xmFlXm/8MLWz27GO11IkveH01mSrdP67bKIIx8v2ybPBbwsW0Msb2HfZkw== =rUJQ -----END PGP SIGNATURE----- Merge tag 'trace-v4.13-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace Pull tracing fixes from Steven Rostedt: "Various bug fixes: - Two small memory leaks in error paths. - A missed return error code on an error path. - A fix to check the tracing ring buffer CPU when it doesn't exist (caused by setting maxcpus on the command line that is less than the actual number of CPUs, and then onlining them manually). - A fix to have the reset of boot tracers called by lateinit_sync() instead of just lateinit(). As some of the tracers register via lateinit(), and if the clear happens before the tracer is registered, it will never start even though it was told to via the kernel command line" * tag 'trace-v4.13-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: tracing: Fix freeing of filter in create_filter() when set_str is false tracing: Fix kmemleak in tracing_map_array_free() ftrace: Check for null ret_stack on profile function graph entry function ring-buffer: Have ring_buffer_alloc_read_page() return error on offline CPU tracing: Missing error code in tracer_alloc_buffers() tracing: Call clear_boot_tracer() at lateinit_sync
This commit is contained in:
commit
415be6c256
|
@ -889,6 +889,10 @@ static int profile_graph_entry(struct ftrace_graph_ent *trace)
|
|||
|
||||
function_profile_call(trace->func, 0, NULL, NULL);
|
||||
|
||||
/* If function graph is shutting down, ret_stack can be NULL */
|
||||
if (!current->ret_stack)
|
||||
return 0;
|
||||
|
||||
if (index >= 0 && index < FTRACE_RETFUNC_DEPTH)
|
||||
current->ret_stack[index].subtime = 0;
|
||||
|
||||
|
|
|
@ -4386,15 +4386,19 @@ EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
|
|||
* the page that was allocated, with the read page of the buffer.
|
||||
*
|
||||
* Returns:
|
||||
* The page allocated, or NULL on error.
|
||||
* The page allocated, or ERR_PTR
|
||||
*/
|
||||
void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
|
||||
{
|
||||
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
|
||||
struct ring_buffer_per_cpu *cpu_buffer;
|
||||
struct buffer_data_page *bpage = NULL;
|
||||
unsigned long flags;
|
||||
struct page *page;
|
||||
|
||||
if (!cpumask_test_cpu(cpu, buffer->cpumask))
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
cpu_buffer = buffer->buffers[cpu];
|
||||
local_irq_save(flags);
|
||||
arch_spin_lock(&cpu_buffer->lock);
|
||||
|
||||
|
@ -4412,7 +4416,7 @@ void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
|
|||
page = alloc_pages_node(cpu_to_node(cpu),
|
||||
GFP_KERNEL | __GFP_NORETRY, 0);
|
||||
if (!page)
|
||||
return NULL;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
bpage = page_address(page);
|
||||
|
||||
|
@ -4467,8 +4471,8 @@ EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
|
|||
*
|
||||
* for example:
|
||||
* rpage = ring_buffer_alloc_read_page(buffer, cpu);
|
||||
* if (!rpage)
|
||||
* return error;
|
||||
* if (IS_ERR(rpage))
|
||||
* return PTR_ERR(rpage);
|
||||
* ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
|
||||
* if (ret >= 0)
|
||||
* process_page(rpage, ret);
|
||||
|
|
|
@ -113,7 +113,7 @@ static enum event_status read_page(int cpu)
|
|||
int i;
|
||||
|
||||
bpage = ring_buffer_alloc_read_page(buffer, cpu);
|
||||
if (!bpage)
|
||||
if (IS_ERR(bpage))
|
||||
return EVENT_DROPPED;
|
||||
|
||||
ret = ring_buffer_read_page(buffer, &bpage, PAGE_SIZE, cpu, 1);
|
||||
|
|
|
@ -6598,7 +6598,7 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
|
|||
{
|
||||
struct ftrace_buffer_info *info = filp->private_data;
|
||||
struct trace_iterator *iter = &info->iter;
|
||||
ssize_t ret;
|
||||
ssize_t ret = 0;
|
||||
ssize_t size;
|
||||
|
||||
if (!count)
|
||||
|
@ -6612,10 +6612,15 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
|
|||
if (!info->spare) {
|
||||
info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
|
||||
iter->cpu_file);
|
||||
info->spare_cpu = iter->cpu_file;
|
||||
if (IS_ERR(info->spare)) {
|
||||
ret = PTR_ERR(info->spare);
|
||||
info->spare = NULL;
|
||||
} else {
|
||||
info->spare_cpu = iter->cpu_file;
|
||||
}
|
||||
}
|
||||
if (!info->spare)
|
||||
return -ENOMEM;
|
||||
return ret;
|
||||
|
||||
/* Do we have previous read data to read? */
|
||||
if (info->read < PAGE_SIZE)
|
||||
|
@ -6790,8 +6795,9 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
|
|||
ref->ref = 1;
|
||||
ref->buffer = iter->trace_buffer->buffer;
|
||||
ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
|
||||
if (!ref->page) {
|
||||
ret = -ENOMEM;
|
||||
if (IS_ERR(ref->page)) {
|
||||
ret = PTR_ERR(ref->page);
|
||||
ref->page = NULL;
|
||||
kfree(ref);
|
||||
break;
|
||||
}
|
||||
|
@ -8293,6 +8299,7 @@ __init static int tracer_alloc_buffers(void)
|
|||
if (ret < 0)
|
||||
goto out_free_cpumask;
|
||||
/* Used for event triggers */
|
||||
ret = -ENOMEM;
|
||||
temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
|
||||
if (!temp_buffer)
|
||||
goto out_rm_hp_state;
|
||||
|
@ -8407,4 +8414,4 @@ __init static int clear_boot_tracer(void)
|
|||
}
|
||||
|
||||
fs_initcall(tracer_init_tracefs);
|
||||
late_initcall(clear_boot_tracer);
|
||||
late_initcall_sync(clear_boot_tracer);
|
||||
|
|
|
@ -1959,6 +1959,10 @@ static int create_filter(struct trace_event_call *call,
|
|||
if (err && set_str)
|
||||
append_filter_err(ps, filter);
|
||||
}
|
||||
if (err && !set_str) {
|
||||
free_event_filter(filter);
|
||||
filter = NULL;
|
||||
}
|
||||
create_filter_finish(ps);
|
||||
|
||||
*filterp = filter;
|
||||
|
|
|
@ -221,16 +221,19 @@ void tracing_map_array_free(struct tracing_map_array *a)
|
|||
if (!a)
|
||||
return;
|
||||
|
||||
if (!a->pages) {
|
||||
kfree(a);
|
||||
return;
|
||||
}
|
||||
if (!a->pages)
|
||||
goto free;
|
||||
|
||||
for (i = 0; i < a->n_pages; i++) {
|
||||
if (!a->pages[i])
|
||||
break;
|
||||
free_page((unsigned long)a->pages[i]);
|
||||
}
|
||||
|
||||
kfree(a->pages);
|
||||
|
||||
free:
|
||||
kfree(a);
|
||||
}
|
||||
|
||||
struct tracing_map_array *tracing_map_array_alloc(unsigned int n_elts,
|
||||
|
|
Loading…
Reference in New Issue