This contains a series of last minute clean ups, small fixes and
error checks. -----BEGIN PGP SIGNATURE----- iIoEABYIADIWIQRRSw7ePDh/lE+zeZMp5XQQmuv6qgUCXIukmxQccm9zdGVkdEBn b29kbWlzLm9yZwAKCRAp5XQQmuv6qi+PAQCKf6Yz7LZ3oBtjKy7jJRhkAn3Ie5Ls n7KOXnOGntO1cgD/RdynJcFtpwgDxuj7L/c3iInel0B/rdU5VLbglXy+2AA= =y/ft -----END PGP SIGNATURE----- Merge tag 'trace-v5.1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace Pull tracing fixes and cleanups from Steven Rostedt: "This contains a series of last minute clean ups, small fixes and error checks" * tag 'trace-v5.1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: tracing/probe: Verify alloc_trace_*probe() result tracing/probe: Check event/group naming rule at parsing tracing/probe: Check the size of argument name and body tracing/probe: Check event name length correctly tracing/probe: Check maxactive error cases tracing: kdb: Fix ftdump to not sleep trace/probes: Remove kernel doc style from non kernel doc comment tracing/probes: Make reserved_field_names static
This commit is contained in:
commit
aa2e3ac64a
|
@ -128,7 +128,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
|
||||||
unsigned long *lost_events);
|
unsigned long *lost_events);
|
||||||
|
|
||||||
struct ring_buffer_iter *
|
struct ring_buffer_iter *
|
||||||
ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu);
|
ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu, gfp_t flags);
|
||||||
void ring_buffer_read_prepare_sync(void);
|
void ring_buffer_read_prepare_sync(void);
|
||||||
void ring_buffer_read_start(struct ring_buffer_iter *iter);
|
void ring_buffer_read_start(struct ring_buffer_iter *iter);
|
||||||
void ring_buffer_read_finish(struct ring_buffer_iter *iter);
|
void ring_buffer_read_finish(struct ring_buffer_iter *iter);
|
||||||
|
|
|
@ -4191,6 +4191,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_consume);
|
||||||
* ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
|
* ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
|
||||||
* @buffer: The ring buffer to read from
|
* @buffer: The ring buffer to read from
|
||||||
* @cpu: The cpu buffer to iterate over
|
* @cpu: The cpu buffer to iterate over
|
||||||
|
* @flags: gfp flags to use for memory allocation
|
||||||
*
|
*
|
||||||
* This performs the initial preparations necessary to iterate
|
* This performs the initial preparations necessary to iterate
|
||||||
* through the buffer. Memory is allocated, buffer recording
|
* through the buffer. Memory is allocated, buffer recording
|
||||||
|
@ -4208,7 +4209,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_consume);
|
||||||
* This overall must be paired with ring_buffer_read_finish.
|
* This overall must be paired with ring_buffer_read_finish.
|
||||||
*/
|
*/
|
||||||
struct ring_buffer_iter *
|
struct ring_buffer_iter *
|
||||||
ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
|
ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu, gfp_t flags)
|
||||||
{
|
{
|
||||||
struct ring_buffer_per_cpu *cpu_buffer;
|
struct ring_buffer_per_cpu *cpu_buffer;
|
||||||
struct ring_buffer_iter *iter;
|
struct ring_buffer_iter *iter;
|
||||||
|
@ -4216,7 +4217,7 @@ ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
|
||||||
if (!cpumask_test_cpu(cpu, buffer->cpumask))
|
if (!cpumask_test_cpu(cpu, buffer->cpumask))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
iter = kmalloc(sizeof(*iter), GFP_KERNEL);
|
iter = kmalloc(sizeof(*iter), flags);
|
||||||
if (!iter)
|
if (!iter)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
|
|
@ -4079,7 +4079,8 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
|
||||||
if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
|
if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
|
||||||
for_each_tracing_cpu(cpu) {
|
for_each_tracing_cpu(cpu) {
|
||||||
iter->buffer_iter[cpu] =
|
iter->buffer_iter[cpu] =
|
||||||
ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
|
ring_buffer_read_prepare(iter->trace_buffer->buffer,
|
||||||
|
cpu, GFP_KERNEL);
|
||||||
}
|
}
|
||||||
ring_buffer_read_prepare_sync();
|
ring_buffer_read_prepare_sync();
|
||||||
for_each_tracing_cpu(cpu) {
|
for_each_tracing_cpu(cpu) {
|
||||||
|
@ -4089,7 +4090,8 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
|
||||||
} else {
|
} else {
|
||||||
cpu = iter->cpu_file;
|
cpu = iter->cpu_file;
|
||||||
iter->buffer_iter[cpu] =
|
iter->buffer_iter[cpu] =
|
||||||
ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
|
ring_buffer_read_prepare(iter->trace_buffer->buffer,
|
||||||
|
cpu, GFP_KERNEL);
|
||||||
ring_buffer_read_prepare_sync();
|
ring_buffer_read_prepare_sync();
|
||||||
ring_buffer_read_start(iter->buffer_iter[cpu]);
|
ring_buffer_read_start(iter->buffer_iter[cpu]);
|
||||||
tracing_iter_reset(iter, cpu);
|
tracing_iter_reset(iter, cpu);
|
||||||
|
|
|
@ -51,14 +51,16 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
|
||||||
if (cpu_file == RING_BUFFER_ALL_CPUS) {
|
if (cpu_file == RING_BUFFER_ALL_CPUS) {
|
||||||
for_each_tracing_cpu(cpu) {
|
for_each_tracing_cpu(cpu) {
|
||||||
iter.buffer_iter[cpu] =
|
iter.buffer_iter[cpu] =
|
||||||
ring_buffer_read_prepare(iter.trace_buffer->buffer, cpu);
|
ring_buffer_read_prepare(iter.trace_buffer->buffer,
|
||||||
|
cpu, GFP_ATOMIC);
|
||||||
ring_buffer_read_start(iter.buffer_iter[cpu]);
|
ring_buffer_read_start(iter.buffer_iter[cpu]);
|
||||||
tracing_iter_reset(&iter, cpu);
|
tracing_iter_reset(&iter, cpu);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
iter.cpu_file = cpu_file;
|
iter.cpu_file = cpu_file;
|
||||||
iter.buffer_iter[cpu_file] =
|
iter.buffer_iter[cpu_file] =
|
||||||
ring_buffer_read_prepare(iter.trace_buffer->buffer, cpu_file);
|
ring_buffer_read_prepare(iter.trace_buffer->buffer,
|
||||||
|
cpu_file, GFP_ATOMIC);
|
||||||
ring_buffer_read_start(iter.buffer_iter[cpu_file]);
|
ring_buffer_read_start(iter.buffer_iter[cpu_file]);
|
||||||
tracing_iter_reset(&iter, cpu_file);
|
tracing_iter_reset(&iter, cpu_file);
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,7 +35,7 @@ static struct dyn_event_operations trace_kprobe_ops = {
|
||||||
.match = trace_kprobe_match,
|
.match = trace_kprobe_match,
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* Kprobe event core functions
|
* Kprobe event core functions
|
||||||
*/
|
*/
|
||||||
struct trace_kprobe {
|
struct trace_kprobe {
|
||||||
|
@ -221,7 +221,7 @@ static struct trace_kprobe *alloc_trace_kprobe(const char *group,
|
||||||
|
|
||||||
tk->rp.maxactive = maxactive;
|
tk->rp.maxactive = maxactive;
|
||||||
|
|
||||||
if (!event || !is_good_name(event)) {
|
if (!event || !group) {
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
|
@ -231,11 +231,6 @@ static struct trace_kprobe *alloc_trace_kprobe(const char *group,
|
||||||
if (!tk->tp.call.name)
|
if (!tk->tp.call.name)
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
if (!group || !is_good_name(group)) {
|
|
||||||
ret = -EINVAL;
|
|
||||||
goto error;
|
|
||||||
}
|
|
||||||
|
|
||||||
tk->tp.class.system = kstrdup(group, GFP_KERNEL);
|
tk->tp.class.system = kstrdup(group, GFP_KERNEL);
|
||||||
if (!tk->tp.class.system)
|
if (!tk->tp.class.system)
|
||||||
goto error;
|
goto error;
|
||||||
|
@ -624,7 +619,11 @@ static int trace_kprobe_create(int argc, const char *argv[])
|
||||||
if (event)
|
if (event)
|
||||||
event++;
|
event++;
|
||||||
|
|
||||||
if (is_return && isdigit(argv[0][1])) {
|
if (isdigit(argv[0][1])) {
|
||||||
|
if (!is_return) {
|
||||||
|
pr_info("Maxactive is not for kprobe");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
if (event)
|
if (event)
|
||||||
len = event - &argv[0][1] - 1;
|
len = event - &argv[0][1] - 1;
|
||||||
else
|
else
|
||||||
|
@ -634,8 +633,8 @@ static int trace_kprobe_create(int argc, const char *argv[])
|
||||||
memcpy(buf, &argv[0][1], len);
|
memcpy(buf, &argv[0][1], len);
|
||||||
buf[len] = '\0';
|
buf[len] = '\0';
|
||||||
ret = kstrtouint(buf, 0, &maxactive);
|
ret = kstrtouint(buf, 0, &maxactive);
|
||||||
if (ret) {
|
if (ret || !maxactive) {
|
||||||
pr_info("Failed to parse maxactive.\n");
|
pr_info("Invalid maxactive number\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
/* kretprobes instances are iterated over via a list. The
|
/* kretprobes instances are iterated over via a list. The
|
||||||
|
@ -694,9 +693,9 @@ static int trace_kprobe_create(int argc, const char *argv[])
|
||||||
tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
|
tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
|
||||||
argc, is_return);
|
argc, is_return);
|
||||||
if (IS_ERR(tk)) {
|
if (IS_ERR(tk)) {
|
||||||
pr_info("Failed to allocate trace_probe.(%d)\n",
|
|
||||||
(int)PTR_ERR(tk));
|
|
||||||
ret = PTR_ERR(tk);
|
ret = PTR_ERR(tk);
|
||||||
|
/* This must return -ENOMEM otherwise there is a bug */
|
||||||
|
WARN_ON_ONCE(ret != -ENOMEM);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,7 @@
|
||||||
|
|
||||||
#include "trace_probe.h"
|
#include "trace_probe.h"
|
||||||
|
|
||||||
const char *reserved_field_names[] = {
|
static const char *reserved_field_names[] = {
|
||||||
"common_type",
|
"common_type",
|
||||||
"common_flags",
|
"common_flags",
|
||||||
"common_preempt_count",
|
"common_preempt_count",
|
||||||
|
@ -159,6 +159,7 @@ int traceprobe_parse_event_name(const char **pevent, const char **pgroup,
|
||||||
char *buf)
|
char *buf)
|
||||||
{
|
{
|
||||||
const char *slash, *event = *pevent;
|
const char *slash, *event = *pevent;
|
||||||
|
int len;
|
||||||
|
|
||||||
slash = strchr(event, '/');
|
slash = strchr(event, '/');
|
||||||
if (slash) {
|
if (slash) {
|
||||||
|
@ -171,12 +172,25 @@ int traceprobe_parse_event_name(const char **pevent, const char **pgroup,
|
||||||
return -E2BIG;
|
return -E2BIG;
|
||||||
}
|
}
|
||||||
strlcpy(buf, event, slash - event + 1);
|
strlcpy(buf, event, slash - event + 1);
|
||||||
|
if (!is_good_name(buf)) {
|
||||||
|
pr_info("Group name must follow the same rules as C identifiers\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
*pgroup = buf;
|
*pgroup = buf;
|
||||||
*pevent = slash + 1;
|
*pevent = slash + 1;
|
||||||
|
event = *pevent;
|
||||||
}
|
}
|
||||||
if (strlen(event) == 0) {
|
len = strlen(event);
|
||||||
|
if (len == 0) {
|
||||||
pr_info("Event name is not specified\n");
|
pr_info("Event name is not specified\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
} else if (len > MAX_EVENT_NAME_LEN) {
|
||||||
|
pr_info("Event name is too long\n");
|
||||||
|
return -E2BIG;
|
||||||
|
}
|
||||||
|
if (!is_good_name(event)) {
|
||||||
|
pr_info("Event name must follow the same rules as C identifiers\n");
|
||||||
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -548,6 +562,8 @@ int traceprobe_parse_probe_arg(struct trace_probe *tp, int i, char *arg,
|
||||||
|
|
||||||
body = strchr(arg, '=');
|
body = strchr(arg, '=');
|
||||||
if (body) {
|
if (body) {
|
||||||
|
if (body - arg > MAX_ARG_NAME_LEN || body == arg)
|
||||||
|
return -EINVAL;
|
||||||
parg->name = kmemdup_nul(arg, body - arg, GFP_KERNEL);
|
parg->name = kmemdup_nul(arg, body - arg, GFP_KERNEL);
|
||||||
body++;
|
body++;
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -32,6 +32,7 @@
|
||||||
#define MAX_TRACE_ARGS 128
|
#define MAX_TRACE_ARGS 128
|
||||||
#define MAX_ARGSTR_LEN 63
|
#define MAX_ARGSTR_LEN 63
|
||||||
#define MAX_ARRAY_LEN 64
|
#define MAX_ARRAY_LEN 64
|
||||||
|
#define MAX_ARG_NAME_LEN 32
|
||||||
#define MAX_STRING_SIZE PATH_MAX
|
#define MAX_STRING_SIZE PATH_MAX
|
||||||
|
|
||||||
/* Reserved field names */
|
/* Reserved field names */
|
||||||
|
|
|
@ -273,10 +273,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
|
||||||
{
|
{
|
||||||
struct trace_uprobe *tu;
|
struct trace_uprobe *tu;
|
||||||
|
|
||||||
if (!event || !is_good_name(event))
|
if (!event || !group)
|
||||||
return ERR_PTR(-EINVAL);
|
|
||||||
|
|
||||||
if (!group || !is_good_name(group))
|
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
|
|
||||||
tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
|
tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
|
||||||
|
@ -524,8 +521,9 @@ static int trace_uprobe_create(int argc, const char **argv)
|
||||||
|
|
||||||
tu = alloc_trace_uprobe(group, event, argc, is_return);
|
tu = alloc_trace_uprobe(group, event, argc, is_return);
|
||||||
if (IS_ERR(tu)) {
|
if (IS_ERR(tu)) {
|
||||||
pr_info("Failed to allocate trace_uprobe.(%d)\n", (int)PTR_ERR(tu));
|
|
||||||
ret = PTR_ERR(tu);
|
ret = PTR_ERR(tu);
|
||||||
|
/* This must return -ENOMEM otherwise there is a bug */
|
||||||
|
WARN_ON_ONCE(ret != -ENOMEM);
|
||||||
goto fail_address_parse;
|
goto fail_address_parse;
|
||||||
}
|
}
|
||||||
tu->offset = offset;
|
tu->offset = offset;
|
||||||
|
|
Loading…
Reference in New Issue