2019-06-01 16:08:47 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2013-03-07 20:45:20 +08:00
|
|
|
/*
|
|
|
|
* builtin-ftrace.c
|
|
|
|
*
|
|
|
|
* Copyright (c) 2013 LG Electronics, Namhyung Kim <namhyung@kernel.org>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "builtin.h"
|
|
|
|
|
2017-04-18 21:46:11 +08:00
|
|
|
#include <errno.h>
|
2013-03-07 20:45:20 +08:00
|
|
|
#include <unistd.h>
|
|
|
|
#include <signal.h>
|
2019-08-31 01:45:20 +08:00
|
|
|
#include <stdlib.h>
|
2017-02-24 09:12:48 +08:00
|
|
|
#include <fcntl.h>
|
2017-04-20 06:06:30 +08:00
|
|
|
#include <poll.h>
|
2019-08-07 22:44:17 +08:00
|
|
|
#include <linux/capability.h>
|
2019-08-30 03:18:59 +08:00
|
|
|
#include <linux/string.h>
|
2013-03-07 20:45:20 +08:00
|
|
|
|
|
|
|
#include "debug.h"
|
2019-08-30 03:18:59 +08:00
|
|
|
#include <subcmd/pager.h>
|
2013-03-07 20:45:20 +08:00
|
|
|
#include <subcmd/parse-options.h>
|
2017-04-18 22:44:58 +08:00
|
|
|
#include <api/fs/tracing_path.h>
|
2013-03-07 20:45:20 +08:00
|
|
|
#include "evlist.h"
|
|
|
|
#include "target.h"
|
2017-02-24 09:12:50 +08:00
|
|
|
#include "cpumap.h"
|
2013-03-07 20:45:20 +08:00
|
|
|
#include "thread_map.h"
|
2019-08-07 22:44:17 +08:00
|
|
|
#include "util/cap.h"
|
2017-01-31 19:38:29 +08:00
|
|
|
#include "util/config.h"
|
2020-08-08 10:31:27 +08:00
|
|
|
#include "util/units.h"
|
2020-08-08 10:31:31 +08:00
|
|
|
#include "util/parse-sublevel-options.h"
|
2013-03-07 20:45:20 +08:00
|
|
|
|
|
|
|
#define DEFAULT_TRACER "function_graph"
|
|
|
|
|
|
|
|
struct perf_ftrace {
|
2019-07-21 19:23:52 +08:00
|
|
|
struct evlist *evlist;
|
2017-06-18 22:23:01 +08:00
|
|
|
struct target target;
|
|
|
|
const char *tracer;
|
2020-08-08 10:31:25 +08:00
|
|
|
bool list_avail_functions;
|
2017-06-18 22:23:01 +08:00
|
|
|
struct list_head filters;
|
|
|
|
struct list_head notrace;
|
|
|
|
struct list_head graph_funcs;
|
|
|
|
struct list_head nograph_funcs;
|
2017-06-18 22:23:02 +08:00
|
|
|
int graph_depth;
|
2020-08-08 10:31:27 +08:00
|
|
|
unsigned long percpu_buffer_size;
|
2020-08-08 10:31:29 +08:00
|
|
|
bool inherit;
|
2020-08-08 10:31:31 +08:00
|
|
|
int func_stack_trace;
|
2020-08-08 10:31:34 +08:00
|
|
|
int func_irq_info;
|
2020-08-08 10:31:32 +08:00
|
|
|
int graph_nosleep_time;
|
2020-08-08 10:31:33 +08:00
|
|
|
int graph_noirqs;
|
2020-08-08 10:31:35 +08:00
|
|
|
int graph_verbose;
|
2020-08-08 10:31:36 +08:00
|
|
|
int graph_thresh;
|
2020-08-08 10:31:38 +08:00
|
|
|
unsigned int initial_delay;
|
2017-06-18 22:23:01 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct filter_entry {
|
|
|
|
struct list_head list;
|
|
|
|
char name[];
|
2013-03-07 20:45:20 +08:00
|
|
|
};
|
|
|
|
|
2020-05-10 23:06:11 +08:00
|
|
|
static volatile int workload_exec_errno;
|
2013-03-07 20:45:20 +08:00
|
|
|
static bool done;
|
|
|
|
|
|
|
|
static void sig_handler(int sig __maybe_unused)
|
|
|
|
{
|
|
|
|
done = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* perf_evlist__prepare_workload will send a SIGUSR1 if the fork fails, since
|
|
|
|
* we asked by setting its exec_error to the function below,
|
|
|
|
* ftrace__workload_exec_failed_signal.
|
|
|
|
*
|
|
|
|
* XXX We need to handle this more appropriately, emitting an error, etc.
|
|
|
|
*/
|
|
|
|
static void ftrace__workload_exec_failed_signal(int signo __maybe_unused,
|
|
|
|
siginfo_t *info __maybe_unused,
|
|
|
|
void *ucontext __maybe_unused)
|
|
|
|
{
|
2020-05-10 23:06:11 +08:00
|
|
|
workload_exec_errno = info->si_value.sival_int;
|
2013-03-07 20:45:20 +08:00
|
|
|
done = true;
|
|
|
|
}
|
|
|
|
|
2017-02-24 09:12:48 +08:00
|
|
|
static int __write_tracing_file(const char *name, const char *val, bool append)
|
2013-03-07 20:45:20 +08:00
|
|
|
{
|
|
|
|
char *file;
|
|
|
|
int fd, ret = -1;
|
|
|
|
ssize_t size = strlen(val);
|
2017-02-24 09:12:48 +08:00
|
|
|
int flags = O_WRONLY;
|
2017-06-18 22:22:59 +08:00
|
|
|
char errbuf[512];
|
2018-02-19 10:33:29 +08:00
|
|
|
char *val_copy;
|
2013-03-07 20:45:20 +08:00
|
|
|
|
|
|
|
file = get_tracing_file(name);
|
|
|
|
if (!file) {
|
|
|
|
pr_debug("cannot get tracing file: %s\n", name);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2017-02-24 09:12:48 +08:00
|
|
|
if (append)
|
|
|
|
flags |= O_APPEND;
|
|
|
|
else
|
|
|
|
flags |= O_TRUNC;
|
|
|
|
|
|
|
|
fd = open(file, flags);
|
2013-03-07 20:45:20 +08:00
|
|
|
if (fd < 0) {
|
2017-06-18 22:22:59 +08:00
|
|
|
pr_debug("cannot open tracing file: %s: %s\n",
|
|
|
|
name, str_error_r(errno, errbuf, sizeof(errbuf)));
|
2013-03-07 20:45:20 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2018-02-19 10:33:29 +08:00
|
|
|
/*
|
|
|
|
* Copy the original value and append a '\n'. Without this,
|
|
|
|
* the kernel can hide possible errors.
|
|
|
|
*/
|
|
|
|
val_copy = strdup(val);
|
|
|
|
if (!val_copy)
|
|
|
|
goto out_close;
|
|
|
|
val_copy[size] = '\n';
|
|
|
|
|
|
|
|
if (write(fd, val_copy, size + 1) == size + 1)
|
2013-03-07 20:45:20 +08:00
|
|
|
ret = 0;
|
|
|
|
else
|
2017-06-18 22:22:59 +08:00
|
|
|
pr_debug("write '%s' to tracing/%s failed: %s\n",
|
|
|
|
val, name, str_error_r(errno, errbuf, sizeof(errbuf)));
|
2013-03-07 20:45:20 +08:00
|
|
|
|
2018-02-19 10:33:29 +08:00
|
|
|
free(val_copy);
|
|
|
|
out_close:
|
2013-03-07 20:45:20 +08:00
|
|
|
close(fd);
|
|
|
|
out:
|
|
|
|
put_tracing_file(file);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-02-24 09:12:48 +08:00
|
|
|
static int write_tracing_file(const char *name, const char *val)
|
|
|
|
{
|
|
|
|
return __write_tracing_file(name, val, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int append_tracing_file(const char *name, const char *val)
|
|
|
|
{
|
|
|
|
return __write_tracing_file(name, val, true);
|
|
|
|
}
|
|
|
|
|
2020-08-08 10:31:25 +08:00
|
|
|
static int read_tracing_file_to_stdout(const char *name)
|
|
|
|
{
|
|
|
|
char buf[4096];
|
|
|
|
char *file;
|
|
|
|
int fd;
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
file = get_tracing_file(name);
|
|
|
|
if (!file) {
|
|
|
|
pr_debug("cannot get tracing file: %s\n", name);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
fd = open(file, O_RDONLY);
|
|
|
|
if (fd < 0) {
|
|
|
|
pr_debug("cannot open tracing file: %s: %s\n",
|
|
|
|
name, str_error_r(errno, buf, sizeof(buf)));
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* read contents to stdout */
|
|
|
|
while (true) {
|
|
|
|
int n = read(fd, buf, sizeof(buf));
|
|
|
|
if (n == 0)
|
|
|
|
break;
|
|
|
|
else if (n < 0)
|
|
|
|
goto out_close;
|
|
|
|
|
|
|
|
if (fwrite(buf, n, 1, stdout) != 1)
|
|
|
|
goto out_close;
|
|
|
|
}
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
out_close:
|
|
|
|
close(fd);
|
|
|
|
out:
|
|
|
|
put_tracing_file(file);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-08-08 10:31:26 +08:00
|
|
|
static int write_tracing_file_int(const char *name, int value)
|
|
|
|
{
|
|
|
|
char buf[16];
|
|
|
|
|
|
|
|
snprintf(buf, sizeof(buf), "%d", value);
|
|
|
|
if (write_tracing_file(name, buf) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-08-08 10:31:29 +08:00
|
|
|
static int write_tracing_option_file(const char *name, const char *val)
|
|
|
|
{
|
|
|
|
char *file;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (asprintf(&file, "options/%s", name) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
ret = __write_tracing_file(file, val, false);
|
|
|
|
free(file);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-02-24 09:12:50 +08:00
|
|
|
static int reset_tracing_cpu(void);
|
2017-06-18 22:23:01 +08:00
|
|
|
static void reset_tracing_filters(void);
|
2017-02-24 09:12:50 +08:00
|
|
|
|
2020-08-08 10:31:29 +08:00
|
|
|
static void reset_tracing_options(struct perf_ftrace *ftrace __maybe_unused)
|
|
|
|
{
|
|
|
|
write_tracing_option_file("function-fork", "0");
|
2020-08-08 10:31:31 +08:00
|
|
|
write_tracing_option_file("func_stack_trace", "0");
|
2020-08-08 10:31:32 +08:00
|
|
|
write_tracing_option_file("sleep-time", "1");
|
2020-08-08 10:31:33 +08:00
|
|
|
write_tracing_option_file("funcgraph-irqs", "1");
|
2020-08-08 10:31:35 +08:00
|
|
|
write_tracing_option_file("funcgraph-proc", "0");
|
|
|
|
write_tracing_option_file("funcgraph-abstime", "0");
|
|
|
|
write_tracing_option_file("latency-format", "0");
|
2020-08-08 10:31:34 +08:00
|
|
|
write_tracing_option_file("irq-info", "0");
|
2020-08-08 10:31:29 +08:00
|
|
|
}
|
|
|
|
|
2013-03-07 20:45:20 +08:00
|
|
|
static int reset_tracing_files(struct perf_ftrace *ftrace __maybe_unused)
|
|
|
|
{
|
|
|
|
if (write_tracing_file("tracing_on", "0") < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (write_tracing_file("current_tracer", "nop") < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (write_tracing_file("set_ftrace_pid", " ") < 0)
|
|
|
|
return -1;
|
|
|
|
|
2017-02-24 09:12:50 +08:00
|
|
|
if (reset_tracing_cpu() < 0)
|
|
|
|
return -1;
|
|
|
|
|
2017-06-18 22:23:02 +08:00
|
|
|
if (write_tracing_file("max_graph_depth", "0") < 0)
|
|
|
|
return -1;
|
|
|
|
|
2020-08-08 10:31:36 +08:00
|
|
|
if (write_tracing_file("tracing_thresh", "0") < 0)
|
|
|
|
return -1;
|
|
|
|
|
2017-06-18 22:23:01 +08:00
|
|
|
reset_tracing_filters();
|
2020-08-08 10:31:29 +08:00
|
|
|
reset_tracing_options(ftrace);
|
2013-03-07 20:45:20 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-02-24 09:12:48 +08:00
|
|
|
static int set_tracing_pid(struct perf_ftrace *ftrace)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
char buf[16];
|
|
|
|
|
|
|
|
if (target__has_cpu(&ftrace->target))
|
|
|
|
return 0;
|
|
|
|
|
2019-08-22 19:11:41 +08:00
|
|
|
for (i = 0; i < perf_thread_map__nr(ftrace->evlist->core.threads); i++) {
|
2017-02-24 09:12:48 +08:00
|
|
|
scnprintf(buf, sizeof(buf), "%d",
|
2019-07-21 19:24:42 +08:00
|
|
|
ftrace->evlist->core.threads->map[i]);
|
2017-02-24 09:12:48 +08:00
|
|
|
if (append_tracing_file("set_ftrace_pid", buf) < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-07-21 19:23:49 +08:00
|
|
|
static int set_tracing_cpumask(struct perf_cpu_map *cpumap)
|
2017-02-24 09:12:50 +08:00
|
|
|
{
|
|
|
|
char *cpumask;
|
|
|
|
size_t mask_size;
|
|
|
|
int ret;
|
|
|
|
int last_cpu;
|
|
|
|
|
|
|
|
last_cpu = cpu_map__cpu(cpumap, cpumap->nr - 1);
|
2019-08-02 16:29:51 +08:00
|
|
|
mask_size = last_cpu / 4 + 2; /* one more byte for EOS */
|
2017-02-24 09:12:50 +08:00
|
|
|
mask_size += last_cpu / 32; /* ',' is needed for every 32th cpus */
|
|
|
|
|
|
|
|
cpumask = malloc(mask_size);
|
|
|
|
if (cpumask == NULL) {
|
|
|
|
pr_debug("failed to allocate cpu mask\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
cpu_map__snprint_mask(cpumap, cpumask, mask_size);
|
|
|
|
|
|
|
|
ret = write_tracing_file("tracing_cpumask", cpumask);
|
|
|
|
|
|
|
|
free(cpumask);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int set_tracing_cpu(struct perf_ftrace *ftrace)
|
|
|
|
{
|
2019-07-21 19:24:41 +08:00
|
|
|
struct perf_cpu_map *cpumap = ftrace->evlist->core.cpus;
|
2017-02-24 09:12:50 +08:00
|
|
|
|
|
|
|
if (!target__has_cpu(&ftrace->target))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return set_tracing_cpumask(cpumap);
|
|
|
|
}
|
|
|
|
|
2020-08-08 10:31:31 +08:00
|
|
|
static int set_tracing_func_stack_trace(struct perf_ftrace *ftrace)
|
|
|
|
{
|
|
|
|
if (!ftrace->func_stack_trace)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (write_tracing_option_file("func_stack_trace", "1") < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-08-08 10:31:34 +08:00
|
|
|
static int set_tracing_func_irqinfo(struct perf_ftrace *ftrace)
|
|
|
|
{
|
|
|
|
if (!ftrace->func_irq_info)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (write_tracing_option_file("irq-info", "1") < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-02-24 09:12:50 +08:00
|
|
|
static int reset_tracing_cpu(void)
|
|
|
|
{
|
2019-07-21 19:24:30 +08:00
|
|
|
struct perf_cpu_map *cpumap = perf_cpu_map__new(NULL);
|
2017-02-24 09:12:50 +08:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = set_tracing_cpumask(cpumap);
|
2019-07-21 19:24:17 +08:00
|
|
|
perf_cpu_map__put(cpumap);
|
2017-02-24 09:12:50 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-06-18 22:23:01 +08:00
|
|
|
static int __set_tracing_filter(const char *filter_file, struct list_head *funcs)
|
|
|
|
{
|
|
|
|
struct filter_entry *pos;
|
|
|
|
|
|
|
|
list_for_each_entry(pos, funcs, list) {
|
|
|
|
if (append_tracing_file(filter_file, pos->name) < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int set_tracing_filters(struct perf_ftrace *ftrace)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = __set_tracing_filter("set_ftrace_filter", &ftrace->filters);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = __set_tracing_filter("set_ftrace_notrace", &ftrace->notrace);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = __set_tracing_filter("set_graph_function", &ftrace->graph_funcs);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/* old kernels do not have this filter */
|
|
|
|
__set_tracing_filter("set_graph_notrace", &ftrace->nograph_funcs);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void reset_tracing_filters(void)
|
|
|
|
{
|
|
|
|
write_tracing_file("set_ftrace_filter", " ");
|
|
|
|
write_tracing_file("set_ftrace_notrace", " ");
|
|
|
|
write_tracing_file("set_graph_function", " ");
|
|
|
|
write_tracing_file("set_graph_notrace", " ");
|
|
|
|
}
|
|
|
|
|
2017-06-18 22:23:02 +08:00
|
|
|
static int set_tracing_depth(struct perf_ftrace *ftrace)
|
|
|
|
{
|
|
|
|
if (ftrace->graph_depth == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (ftrace->graph_depth < 0) {
|
|
|
|
pr_err("invalid graph depth: %d\n", ftrace->graph_depth);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-08-08 10:31:26 +08:00
|
|
|
if (write_tracing_file_int("max_graph_depth", ftrace->graph_depth) < 0)
|
2017-06-18 22:23:02 +08:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-08-08 10:31:27 +08:00
|
|
|
static int set_tracing_percpu_buffer_size(struct perf_ftrace *ftrace)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (ftrace->percpu_buffer_size == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ret = write_tracing_file_int("buffer_size_kb",
|
|
|
|
ftrace->percpu_buffer_size / 1024);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-08-08 10:31:29 +08:00
|
|
|
static int set_tracing_trace_inherit(struct perf_ftrace *ftrace)
|
|
|
|
{
|
|
|
|
if (!ftrace->inherit)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (write_tracing_option_file("function-fork", "1") < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-08-08 10:31:32 +08:00
|
|
|
static int set_tracing_sleep_time(struct perf_ftrace *ftrace)
|
|
|
|
{
|
|
|
|
if (!ftrace->graph_nosleep_time)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (write_tracing_option_file("sleep-time", "0") < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-08-08 10:31:33 +08:00
|
|
|
static int set_tracing_funcgraph_irqs(struct perf_ftrace *ftrace)
|
|
|
|
{
|
|
|
|
if (!ftrace->graph_noirqs)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (write_tracing_option_file("funcgraph-irqs", "0") < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-08-08 10:31:35 +08:00
|
|
|
static int set_tracing_funcgraph_verbose(struct perf_ftrace *ftrace)
|
|
|
|
{
|
|
|
|
if (!ftrace->graph_verbose)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (write_tracing_option_file("funcgraph-proc", "1") < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (write_tracing_option_file("funcgraph-abstime", "1") < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (write_tracing_option_file("latency-format", "1") < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-08-08 10:31:36 +08:00
|
|
|
static int set_tracing_thresh(struct perf_ftrace *ftrace)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (ftrace->graph_thresh == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ret = write_tracing_file_int("tracing_thresh", ftrace->graph_thresh);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-03-07 20:45:20 +08:00
|
|
|
static int __cmd_ftrace(struct perf_ftrace *ftrace, int argc, const char **argv)
|
|
|
|
{
|
|
|
|
char *trace_file;
|
|
|
|
int trace_fd;
|
|
|
|
char buf[4096];
|
|
|
|
struct pollfd pollfd = {
|
|
|
|
.events = POLLIN,
|
|
|
|
};
|
|
|
|
|
perf tools: Support CAP_PERFMON capability
Extend error messages to mention CAP_PERFMON capability as an option to
substitute CAP_SYS_ADMIN capability for secure system performance
monitoring and observability operations. Make
perf_event_paranoid_check() and __cmd_ftrace() to be aware of
CAP_PERFMON capability.
CAP_PERFMON implements the principle of least privilege for performance
monitoring and observability operations (POSIX IEEE 1003.1e 2.2.2.39
principle of least privilege: A security design principle that states
that a process or program be granted only those privileges (e.g.,
capabilities) necessary to accomplish its legitimate function, and only
for the time that such privileges are actually required)
For backward compatibility reasons access to perf_events subsystem remains
open for CAP_SYS_ADMIN privileged processes but CAP_SYS_ADMIN usage for
secure perf_events monitoring is discouraged with respect to CAP_PERFMON
capability.
Committer testing:
Using a libcap with this patch:
diff --git a/libcap/include/uapi/linux/capability.h b/libcap/include/uapi/linux/capability.h
index 78b2fd4c8a95..89b5b0279b60 100644
--- a/libcap/include/uapi/linux/capability.h
+++ b/libcap/include/uapi/linux/capability.h
@@ -366,8 +366,9 @@ struct vfs_ns_cap_data {
#define CAP_AUDIT_READ 37
+#define CAP_PERFMON 38
-#define CAP_LAST_CAP CAP_AUDIT_READ
+#define CAP_LAST_CAP CAP_PERFMON
#define cap_valid(x) ((x) >= 0 && (x) <= CAP_LAST_CAP)
Note that using '38' in place of 'cap_perfmon' works to some degree with
an old libcap, its only when cap_get_flag() is called that libcap
performs an error check based on the maximum value known for
capabilities that it will fail.
This makes determining the default of perf_event_attr.exclude_kernel to
fail, as it can't determine if CAP_PERFMON is in place.
Using 'perf top -e cycles' avoids the default check and sets
perf_event_attr.exclude_kernel to 1.
As root, with a libcap supporting CAP_PERFMON:
# groupadd perf_users
# adduser perf -g perf_users
# mkdir ~perf/bin
# cp ~acme/bin/perf ~perf/bin/
# chgrp perf_users ~perf/bin/perf
# setcap "cap_perfmon,cap_sys_ptrace,cap_syslog=ep" ~perf/bin/perf
# getcap ~perf/bin/perf
/home/perf/bin/perf = cap_sys_ptrace,cap_syslog,cap_perfmon+ep
# ls -la ~perf/bin/perf
-rwxr-xr-x. 1 root perf_users 16968552 Apr 9 13:10 /home/perf/bin/perf
As the 'perf' user in the 'perf_users' group:
$ perf top -a --stdio
Error:
Failed to mmap with 1 (Operation not permitted)
$
Either add the cap_ipc_lock capability to the perf binary or reduce the
ring buffer size to some smaller value:
$ perf top -m10 -a --stdio
rounding mmap pages size to 64K (16 pages)
Error:
Failed to mmap with 1 (Operation not permitted)
$ perf top -m4 -a --stdio
Error:
Failed to mmap with 1 (Operation not permitted)
$ perf top -m2 -a --stdio
PerfTop: 762 irqs/sec kernel:49.7% exact: 100.0% lost: 0/0 drop: 0/0 [4000Hz cycles], (all, 4 CPUs)
------------------------------------------------------------------------------------------------------
9.83% perf [.] __symbols__insert
8.58% perf [.] rb_next
5.91% [kernel] [k] module_get_kallsym
5.66% [kernel] [k] kallsyms_expand_symbol.constprop.0
3.98% libc-2.29.so [.] __GI_____strtoull_l_internal
3.66% perf [.] rb_insert_color
2.34% [kernel] [k] vsnprintf
2.30% [kernel] [k] string_nocheck
2.16% libc-2.29.so [.] _IO_getdelim
2.15% [kernel] [k] number
2.13% [kernel] [k] format_decode
1.58% libc-2.29.so [.] _IO_feof
1.52% libc-2.29.so [.] __strcmp_avx2
1.50% perf [.] rb_set_parent_color
1.47% libc-2.29.so [.] __libc_calloc
1.24% [kernel] [k] do_syscall_64
1.17% [kernel] [k] __x86_indirect_thunk_rax
$ perf record -a sleep 1
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.552 MB perf.data (74 samples) ]
$ perf evlist
cycles
$ perf evlist -v
cycles: size: 120, { sample_period, sample_freq }: 4000, sample_type: IP|TID|TIME|CPU|PERIOD, read_format: ID, disabled: 1, inherit: 1, mmap: 1, comm: 1, freq: 1, task: 1, precise_ip: 3, sample_id_all: 1, exclude_guest: 1, mmap2: 1, comm_exec: 1, ksymbol: 1, bpf_event: 1
$ perf report | head -20
# To display the perf.data header info, please use --header/--header-only options.
#
#
# Total Lost Samples: 0
#
# Samples: 74 of event 'cycles'
# Event count (approx.): 15694834
#
# Overhead Command Shared Object Symbol
# ........ ............... .......................... ......................................
#
19.62% perf [kernel.vmlinux] [k] strnlen_user
13.88% swapper [kernel.vmlinux] [k] intel_idle
13.83% ksoftirqd/0 [kernel.vmlinux] [k] pfifo_fast_dequeue
13.51% swapper [kernel.vmlinux] [k] kmem_cache_free
6.31% gnome-shell [kernel.vmlinux] [k] kmem_cache_free
5.66% kworker/u8:3+ix [kernel.vmlinux] [k] delay_tsc
4.42% perf [kernel.vmlinux] [k] __set_cpus_allowed_ptr
3.45% kworker/2:1-eve [kernel.vmlinux] [k] shmem_truncate_range
2.29% gnome-shell libgobject-2.0.so.0.6000.7 [.] g_closure_ref
$
Signed-off-by: Alexey Budankov <alexey.budankov@linux.intel.com>
Reviewed-by: James Morris <jamorris@linux.microsoft.com>
Acked-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Igor Lubashev <ilubashe@akamai.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Serge Hallyn <serge@hallyn.com>
Cc: Song Liu <songliubraving@fb.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: intel-gfx@lists.freedesktop.org
Cc: linux-doc@vger.kernel.org
Cc: linux-man@vger.kernel.org
Cc: linux-security-module@vger.kernel.org
Cc: selinux@vger.kernel.org
Link: http://lore.kernel.org/lkml/a66d5648-2b8e-577e-e1f2-1d56c017ab5e@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-04-02 16:47:35 +08:00
|
|
|
if (!(perf_cap__capable(CAP_PERFMON) ||
|
|
|
|
perf_cap__capable(CAP_SYS_ADMIN))) {
|
2019-08-13 04:27:11 +08:00
|
|
|
pr_err("ftrace only works for %s!\n",
|
|
|
|
#ifdef HAVE_LIBCAP_SUPPORT
|
perf tools: Support CAP_PERFMON capability
Extend error messages to mention CAP_PERFMON capability as an option to
substitute CAP_SYS_ADMIN capability for secure system performance
monitoring and observability operations. Make
perf_event_paranoid_check() and __cmd_ftrace() to be aware of
CAP_PERFMON capability.
CAP_PERFMON implements the principle of least privilege for performance
monitoring and observability operations (POSIX IEEE 1003.1e 2.2.2.39
principle of least privilege: A security design principle that states
that a process or program be granted only those privileges (e.g.,
capabilities) necessary to accomplish its legitimate function, and only
for the time that such privileges are actually required)
For backward compatibility reasons access to perf_events subsystem remains
open for CAP_SYS_ADMIN privileged processes but CAP_SYS_ADMIN usage for
secure perf_events monitoring is discouraged with respect to CAP_PERFMON
capability.
Committer testing:
Using a libcap with this patch:
diff --git a/libcap/include/uapi/linux/capability.h b/libcap/include/uapi/linux/capability.h
index 78b2fd4c8a95..89b5b0279b60 100644
--- a/libcap/include/uapi/linux/capability.h
+++ b/libcap/include/uapi/linux/capability.h
@@ -366,8 +366,9 @@ struct vfs_ns_cap_data {
#define CAP_AUDIT_READ 37
+#define CAP_PERFMON 38
-#define CAP_LAST_CAP CAP_AUDIT_READ
+#define CAP_LAST_CAP CAP_PERFMON
#define cap_valid(x) ((x) >= 0 && (x) <= CAP_LAST_CAP)
Note that using '38' in place of 'cap_perfmon' works to some degree with
an old libcap, its only when cap_get_flag() is called that libcap
performs an error check based on the maximum value known for
capabilities that it will fail.
This makes determining the default of perf_event_attr.exclude_kernel to
fail, as it can't determine if CAP_PERFMON is in place.
Using 'perf top -e cycles' avoids the default check and sets
perf_event_attr.exclude_kernel to 1.
As root, with a libcap supporting CAP_PERFMON:
# groupadd perf_users
# adduser perf -g perf_users
# mkdir ~perf/bin
# cp ~acme/bin/perf ~perf/bin/
# chgrp perf_users ~perf/bin/perf
# setcap "cap_perfmon,cap_sys_ptrace,cap_syslog=ep" ~perf/bin/perf
# getcap ~perf/bin/perf
/home/perf/bin/perf = cap_sys_ptrace,cap_syslog,cap_perfmon+ep
# ls -la ~perf/bin/perf
-rwxr-xr-x. 1 root perf_users 16968552 Apr 9 13:10 /home/perf/bin/perf
As the 'perf' user in the 'perf_users' group:
$ perf top -a --stdio
Error:
Failed to mmap with 1 (Operation not permitted)
$
Either add the cap_ipc_lock capability to the perf binary or reduce the
ring buffer size to some smaller value:
$ perf top -m10 -a --stdio
rounding mmap pages size to 64K (16 pages)
Error:
Failed to mmap with 1 (Operation not permitted)
$ perf top -m4 -a --stdio
Error:
Failed to mmap with 1 (Operation not permitted)
$ perf top -m2 -a --stdio
PerfTop: 762 irqs/sec kernel:49.7% exact: 100.0% lost: 0/0 drop: 0/0 [4000Hz cycles], (all, 4 CPUs)
------------------------------------------------------------------------------------------------------
9.83% perf [.] __symbols__insert
8.58% perf [.] rb_next
5.91% [kernel] [k] module_get_kallsym
5.66% [kernel] [k] kallsyms_expand_symbol.constprop.0
3.98% libc-2.29.so [.] __GI_____strtoull_l_internal
3.66% perf [.] rb_insert_color
2.34% [kernel] [k] vsnprintf
2.30% [kernel] [k] string_nocheck
2.16% libc-2.29.so [.] _IO_getdelim
2.15% [kernel] [k] number
2.13% [kernel] [k] format_decode
1.58% libc-2.29.so [.] _IO_feof
1.52% libc-2.29.so [.] __strcmp_avx2
1.50% perf [.] rb_set_parent_color
1.47% libc-2.29.so [.] __libc_calloc
1.24% [kernel] [k] do_syscall_64
1.17% [kernel] [k] __x86_indirect_thunk_rax
$ perf record -a sleep 1
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.552 MB perf.data (74 samples) ]
$ perf evlist
cycles
$ perf evlist -v
cycles: size: 120, { sample_period, sample_freq }: 4000, sample_type: IP|TID|TIME|CPU|PERIOD, read_format: ID, disabled: 1, inherit: 1, mmap: 1, comm: 1, freq: 1, task: 1, precise_ip: 3, sample_id_all: 1, exclude_guest: 1, mmap2: 1, comm_exec: 1, ksymbol: 1, bpf_event: 1
$ perf report | head -20
# To display the perf.data header info, please use --header/--header-only options.
#
#
# Total Lost Samples: 0
#
# Samples: 74 of event 'cycles'
# Event count (approx.): 15694834
#
# Overhead Command Shared Object Symbol
# ........ ............... .......................... ......................................
#
19.62% perf [kernel.vmlinux] [k] strnlen_user
13.88% swapper [kernel.vmlinux] [k] intel_idle
13.83% ksoftirqd/0 [kernel.vmlinux] [k] pfifo_fast_dequeue
13.51% swapper [kernel.vmlinux] [k] kmem_cache_free
6.31% gnome-shell [kernel.vmlinux] [k] kmem_cache_free
5.66% kworker/u8:3+ix [kernel.vmlinux] [k] delay_tsc
4.42% perf [kernel.vmlinux] [k] __set_cpus_allowed_ptr
3.45% kworker/2:1-eve [kernel.vmlinux] [k] shmem_truncate_range
2.29% gnome-shell libgobject-2.0.so.0.6000.7 [.] g_closure_ref
$
Signed-off-by: Alexey Budankov <alexey.budankov@linux.intel.com>
Reviewed-by: James Morris <jamorris@linux.microsoft.com>
Acked-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Igor Lubashev <ilubashe@akamai.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Serge Hallyn <serge@hallyn.com>
Cc: Song Liu <songliubraving@fb.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: intel-gfx@lists.freedesktop.org
Cc: linux-doc@vger.kernel.org
Cc: linux-man@vger.kernel.org
Cc: linux-security-module@vger.kernel.org
Cc: selinux@vger.kernel.org
Link: http://lore.kernel.org/lkml/a66d5648-2b8e-577e-e1f2-1d56c017ab5e@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-04-02 16:47:35 +08:00
|
|
|
"users with the CAP_PERFMON or CAP_SYS_ADMIN capability"
|
2019-08-13 04:27:11 +08:00
|
|
|
#else
|
|
|
|
"root"
|
|
|
|
#endif
|
|
|
|
);
|
2013-03-07 20:45:20 +08:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
signal(SIGINT, sig_handler);
|
|
|
|
signal(SIGUSR1, sig_handler);
|
|
|
|
signal(SIGCHLD, sig_handler);
|
2017-02-24 09:12:51 +08:00
|
|
|
signal(SIGPIPE, sig_handler);
|
2013-03-07 20:45:20 +08:00
|
|
|
|
2020-08-08 10:31:25 +08:00
|
|
|
if (ftrace->list_avail_functions)
|
|
|
|
return read_tracing_file_to_stdout("available_filter_functions");
|
|
|
|
|
2018-02-19 10:33:29 +08:00
|
|
|
if (reset_tracing_files(ftrace) < 0) {
|
|
|
|
pr_err("failed to reset ftrace\n");
|
2017-02-24 09:12:48 +08:00
|
|
|
goto out;
|
2018-02-19 10:33:29 +08:00
|
|
|
}
|
2013-03-07 20:45:20 +08:00
|
|
|
|
|
|
|
/* reset ftrace buffer */
|
|
|
|
if (write_tracing_file("trace", "0") < 0)
|
|
|
|
goto out;
|
|
|
|
|
2017-02-24 09:12:48 +08:00
|
|
|
if (argc && perf_evlist__prepare_workload(ftrace->evlist,
|
|
|
|
&ftrace->target, argv, false,
|
|
|
|
ftrace__workload_exec_failed_signal) < 0) {
|
2013-03-07 20:45:20 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2017-02-24 09:12:48 +08:00
|
|
|
if (set_tracing_pid(ftrace) < 0) {
|
|
|
|
pr_err("failed to set ftrace pid\n");
|
|
|
|
goto out_reset;
|
2013-03-07 20:45:20 +08:00
|
|
|
}
|
|
|
|
|
2017-02-24 09:12:50 +08:00
|
|
|
if (set_tracing_cpu(ftrace) < 0) {
|
|
|
|
pr_err("failed to set tracing cpumask\n");
|
|
|
|
goto out_reset;
|
|
|
|
}
|
|
|
|
|
2020-08-08 10:31:31 +08:00
|
|
|
if (set_tracing_func_stack_trace(ftrace) < 0) {
|
|
|
|
pr_err("failed to set tracing option func_stack_trace\n");
|
|
|
|
goto out_reset;
|
|
|
|
}
|
|
|
|
|
2020-08-08 10:31:34 +08:00
|
|
|
if (set_tracing_func_irqinfo(ftrace) < 0) {
|
|
|
|
pr_err("failed to set tracing option irq-info\n");
|
|
|
|
goto out_reset;
|
|
|
|
}
|
|
|
|
|
2017-06-18 22:23:01 +08:00
|
|
|
if (set_tracing_filters(ftrace) < 0) {
|
|
|
|
pr_err("failed to set tracing filters\n");
|
|
|
|
goto out_reset;
|
|
|
|
}
|
|
|
|
|
2017-06-18 22:23:02 +08:00
|
|
|
if (set_tracing_depth(ftrace) < 0) {
|
|
|
|
pr_err("failed to set graph depth\n");
|
|
|
|
goto out_reset;
|
|
|
|
}
|
|
|
|
|
2020-08-08 10:31:27 +08:00
|
|
|
if (set_tracing_percpu_buffer_size(ftrace) < 0) {
|
|
|
|
pr_err("failed to set tracing per-cpu buffer size\n");
|
|
|
|
goto out_reset;
|
|
|
|
}
|
|
|
|
|
2020-08-08 10:31:29 +08:00
|
|
|
if (set_tracing_trace_inherit(ftrace) < 0) {
|
|
|
|
pr_err("failed to set tracing option function-fork\n");
|
|
|
|
goto out_reset;
|
|
|
|
}
|
|
|
|
|
2020-08-08 10:31:32 +08:00
|
|
|
if (set_tracing_sleep_time(ftrace) < 0) {
|
|
|
|
pr_err("failed to set tracing option sleep-time\n");
|
|
|
|
goto out_reset;
|
|
|
|
}
|
|
|
|
|
2020-08-08 10:31:33 +08:00
|
|
|
if (set_tracing_funcgraph_irqs(ftrace) < 0) {
|
|
|
|
pr_err("failed to set tracing option funcgraph-irqs\n");
|
|
|
|
goto out_reset;
|
|
|
|
}
|
|
|
|
|
2020-08-08 10:31:35 +08:00
|
|
|
if (set_tracing_funcgraph_verbose(ftrace) < 0) {
|
|
|
|
pr_err("failed to set tracing option funcgraph-proc/funcgraph-abstime\n");
|
|
|
|
goto out_reset;
|
|
|
|
}
|
|
|
|
|
2020-08-08 10:31:36 +08:00
|
|
|
if (set_tracing_thresh(ftrace) < 0) {
|
|
|
|
pr_err("failed to set tracing thresh\n");
|
|
|
|
goto out_reset;
|
|
|
|
}
|
|
|
|
|
2017-02-24 09:12:48 +08:00
|
|
|
if (write_tracing_file("current_tracer", ftrace->tracer) < 0) {
|
|
|
|
pr_err("failed to set current_tracer to %s\n", ftrace->tracer);
|
|
|
|
goto out_reset;
|
2013-03-07 20:45:20 +08:00
|
|
|
}
|
|
|
|
|
2017-06-18 22:23:00 +08:00
|
|
|
setup_pager();
|
|
|
|
|
2013-03-07 20:45:20 +08:00
|
|
|
trace_file = get_tracing_file("trace_pipe");
|
|
|
|
if (!trace_file) {
|
|
|
|
pr_err("failed to open trace_pipe\n");
|
2017-02-24 09:12:48 +08:00
|
|
|
goto out_reset;
|
2013-03-07 20:45:20 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
trace_fd = open(trace_file, O_RDONLY);
|
|
|
|
|
|
|
|
put_tracing_file(trace_file);
|
|
|
|
|
|
|
|
if (trace_fd < 0) {
|
|
|
|
pr_err("failed to open trace_pipe\n");
|
2017-02-24 09:12:48 +08:00
|
|
|
goto out_reset;
|
2013-03-07 20:45:20 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
fcntl(trace_fd, F_SETFL, O_NONBLOCK);
|
|
|
|
pollfd.fd = trace_fd;
|
|
|
|
|
2020-08-08 10:31:28 +08:00
|
|
|
/* display column headers */
|
|
|
|
read_tracing_file_to_stdout("trace");
|
|
|
|
|
2020-08-08 10:31:38 +08:00
|
|
|
if (!ftrace->initial_delay) {
|
|
|
|
if (write_tracing_file("tracing_on", "1") < 0) {
|
|
|
|
pr_err("can't enable tracing\n");
|
|
|
|
goto out_close_fd;
|
|
|
|
}
|
2013-03-07 20:45:20 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
perf_evlist__start_workload(ftrace->evlist);
|
|
|
|
|
2020-08-08 10:31:38 +08:00
|
|
|
if (ftrace->initial_delay) {
|
|
|
|
usleep(ftrace->initial_delay * 1000);
|
|
|
|
if (write_tracing_file("tracing_on", "1") < 0) {
|
|
|
|
pr_err("can't enable tracing\n");
|
|
|
|
goto out_close_fd;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-07 20:45:20 +08:00
|
|
|
while (!done) {
|
|
|
|
if (poll(&pollfd, 1, -1) < 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (pollfd.revents & POLLIN) {
|
|
|
|
int n = read(trace_fd, buf, sizeof(buf));
|
|
|
|
if (n < 0)
|
|
|
|
break;
|
|
|
|
if (fwrite(buf, n, 1, stdout) != 1)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
write_tracing_file("tracing_on", "0");
|
|
|
|
|
2020-05-10 23:06:11 +08:00
|
|
|
if (workload_exec_errno) {
|
|
|
|
const char *emsg = str_error_r(workload_exec_errno, buf, sizeof(buf));
|
|
|
|
/* flush stdout first so below error msg appears at the end. */
|
|
|
|
fflush(stdout);
|
|
|
|
pr_err("workload failed: %s\n", emsg);
|
|
|
|
goto out_close_fd;
|
|
|
|
}
|
|
|
|
|
2013-03-07 20:45:20 +08:00
|
|
|
/* read remaining buffer contents */
|
|
|
|
while (true) {
|
|
|
|
int n = read(trace_fd, buf, sizeof(buf));
|
|
|
|
if (n <= 0)
|
|
|
|
break;
|
|
|
|
if (fwrite(buf, n, 1, stdout) != 1)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
out_close_fd:
|
|
|
|
close(trace_fd);
|
2017-02-24 09:12:48 +08:00
|
|
|
out_reset:
|
2013-03-07 20:45:20 +08:00
|
|
|
reset_tracing_files(ftrace);
|
2017-02-24 09:12:48 +08:00
|
|
|
out:
|
2020-05-10 23:06:11 +08:00
|
|
|
return (done && !workload_exec_errno) ? 0 : -1;
|
2013-03-07 20:45:20 +08:00
|
|
|
}
|
|
|
|
|
2017-01-31 19:38:29 +08:00
|
|
|
static int perf_ftrace_config(const char *var, const char *value, void *cb)
|
|
|
|
{
|
|
|
|
struct perf_ftrace *ftrace = cb;
|
|
|
|
|
2017-07-21 02:27:39 +08:00
|
|
|
if (!strstarts(var, "ftrace."))
|
2017-01-31 19:38:29 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (strcmp(var, "ftrace.tracer"))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (!strcmp(value, "function_graph") ||
|
|
|
|
!strcmp(value, "function")) {
|
|
|
|
ftrace->tracer = value;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
pr_err("Please select \"function_graph\" (default) or \"function\"\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2017-06-18 22:23:01 +08:00
|
|
|
static int parse_filter_func(const struct option *opt, const char *str,
|
|
|
|
int unset __maybe_unused)
|
|
|
|
{
|
|
|
|
struct list_head *head = opt->value;
|
|
|
|
struct filter_entry *entry;
|
|
|
|
|
|
|
|
entry = malloc(sizeof(*entry) + strlen(str) + 1);
|
|
|
|
if (entry == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
strcpy(entry->name, str);
|
|
|
|
list_add_tail(&entry->list, head);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void delete_filter_func(struct list_head *head)
|
|
|
|
{
|
|
|
|
struct filter_entry *pos, *tmp;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(pos, tmp, head, list) {
|
2019-07-04 23:13:46 +08:00
|
|
|
list_del_init(&pos->list);
|
2017-06-18 22:23:01 +08:00
|
|
|
free(pos);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-08 10:31:27 +08:00
|
|
|
static int parse_buffer_size(const struct option *opt,
|
|
|
|
const char *str, int unset)
|
|
|
|
{
|
|
|
|
unsigned long *s = (unsigned long *)opt->value;
|
|
|
|
static struct parse_tag tags_size[] = {
|
|
|
|
{ .tag = 'B', .mult = 1 },
|
|
|
|
{ .tag = 'K', .mult = 1 << 10 },
|
|
|
|
{ .tag = 'M', .mult = 1 << 20 },
|
|
|
|
{ .tag = 'G', .mult = 1 << 30 },
|
|
|
|
{ .tag = 0 },
|
|
|
|
};
|
|
|
|
unsigned long val;
|
|
|
|
|
|
|
|
if (unset) {
|
|
|
|
*s = 0;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
val = parse_tag_value(str, tags_size);
|
|
|
|
if (val != (unsigned long) -1) {
|
|
|
|
if (val < 1024) {
|
|
|
|
pr_err("buffer size too small, must larger than 1KB.");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
*s = val;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-08-08 10:31:31 +08:00
|
|
|
static int parse_func_tracer_opts(const struct option *opt,
|
|
|
|
const char *str, int unset)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct perf_ftrace *ftrace = (struct perf_ftrace *) opt->value;
|
|
|
|
struct sublevel_option func_tracer_opts[] = {
|
|
|
|
{ .name = "call-graph", .value_ptr = &ftrace->func_stack_trace },
|
2020-08-08 10:31:34 +08:00
|
|
|
{ .name = "irq-info", .value_ptr = &ftrace->func_irq_info },
|
2020-08-08 10:31:31 +08:00
|
|
|
{ .name = NULL, }
|
|
|
|
};
|
|
|
|
|
|
|
|
if (unset)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ret = perf_parse_sublevel_options(str, func_tracer_opts);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-08-08 10:31:32 +08:00
|
|
|
static int parse_graph_tracer_opts(const struct option *opt,
|
|
|
|
const char *str, int unset)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct perf_ftrace *ftrace = (struct perf_ftrace *) opt->value;
|
|
|
|
struct sublevel_option graph_tracer_opts[] = {
|
|
|
|
{ .name = "nosleep-time", .value_ptr = &ftrace->graph_nosleep_time },
|
2020-08-08 10:31:33 +08:00
|
|
|
{ .name = "noirqs", .value_ptr = &ftrace->graph_noirqs },
|
2020-08-08 10:31:35 +08:00
|
|
|
{ .name = "verbose", .value_ptr = &ftrace->graph_verbose },
|
2020-08-08 10:31:36 +08:00
|
|
|
{ .name = "thresh", .value_ptr = &ftrace->graph_thresh },
|
2020-08-08 10:31:37 +08:00
|
|
|
{ .name = "depth", .value_ptr = &ftrace->graph_depth },
|
2020-08-08 10:31:32 +08:00
|
|
|
{ .name = NULL, }
|
|
|
|
};
|
|
|
|
|
|
|
|
if (unset)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ret = perf_parse_sublevel_options(str, graph_tracer_opts);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-08-08 10:31:24 +08:00
|
|
|
static void select_tracer(struct perf_ftrace *ftrace)
|
|
|
|
{
|
|
|
|
bool graph = !list_empty(&ftrace->graph_funcs) ||
|
|
|
|
!list_empty(&ftrace->nograph_funcs);
|
|
|
|
bool func = !list_empty(&ftrace->filters) ||
|
|
|
|
!list_empty(&ftrace->notrace);
|
|
|
|
|
|
|
|
/* The function_graph has priority over function tracer. */
|
|
|
|
if (graph)
|
|
|
|
ftrace->tracer = "function_graph";
|
|
|
|
else if (func)
|
|
|
|
ftrace->tracer = "function";
|
|
|
|
/* Otherwise, the default tracer is used. */
|
|
|
|
|
|
|
|
pr_debug("%s tracer is used\n", ftrace->tracer);
|
|
|
|
}
|
|
|
|
|
2017-03-27 22:47:20 +08:00
|
|
|
int cmd_ftrace(int argc, const char **argv)
|
2013-03-07 20:45:20 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct perf_ftrace ftrace = {
|
2017-01-26 17:35:37 +08:00
|
|
|
.tracer = DEFAULT_TRACER,
|
2013-03-07 20:45:20 +08:00
|
|
|
.target = { .uid = UINT_MAX, },
|
|
|
|
};
|
|
|
|
const char * const ftrace_usage[] = {
|
2017-02-24 09:12:48 +08:00
|
|
|
"perf ftrace [<options>] [<command>]",
|
2013-03-07 20:45:20 +08:00
|
|
|
"perf ftrace [<options>] -- <command> [<options>]",
|
|
|
|
NULL
|
|
|
|
};
|
|
|
|
const struct option ftrace_options[] = {
|
|
|
|
OPT_STRING('t', "tracer", &ftrace.tracer, "tracer",
|
2017-01-19 08:49:14 +08:00
|
|
|
"tracer to use: function_graph(default) or function"),
|
2020-08-08 10:31:25 +08:00
|
|
|
OPT_BOOLEAN('F', "funcs", &ftrace.list_avail_functions,
|
|
|
|
"Show available functions to filter"),
|
2017-02-24 09:12:48 +08:00
|
|
|
OPT_STRING('p', "pid", &ftrace.target.pid, "pid",
|
|
|
|
"trace on existing process id"),
|
2020-08-08 10:31:39 +08:00
|
|
|
/* TODO: Add short option -t after -t/--tracer can be removed. */
|
|
|
|
OPT_STRING(0, "tid", &ftrace.target.tid, "tid",
|
|
|
|
"trace on existing thread id (exclusive to --pid)"),
|
2013-03-07 20:45:20 +08:00
|
|
|
OPT_INCR('v', "verbose", &verbose,
|
|
|
|
"be more verbose"),
|
2017-02-24 09:12:50 +08:00
|
|
|
OPT_BOOLEAN('a', "all-cpus", &ftrace.target.system_wide,
|
|
|
|
"system-wide collection from all CPUs"),
|
|
|
|
OPT_STRING('C', "cpu", &ftrace.target.cpu_list, "cpu",
|
|
|
|
"list of cpus to monitor"),
|
2017-06-18 22:23:01 +08:00
|
|
|
OPT_CALLBACK('T', "trace-funcs", &ftrace.filters, "func",
|
2020-08-08 10:31:24 +08:00
|
|
|
"trace given functions using function tracer",
|
|
|
|
parse_filter_func),
|
2017-06-18 22:23:01 +08:00
|
|
|
OPT_CALLBACK('N', "notrace-funcs", &ftrace.notrace, "func",
|
|
|
|
"do not trace given functions", parse_filter_func),
|
2020-08-08 10:31:31 +08:00
|
|
|
OPT_CALLBACK(0, "func-opts", &ftrace, "options",
|
2020-08-08 10:31:34 +08:00
|
|
|
"function tracer options, available options: call-graph,irq-info",
|
2020-08-08 10:31:31 +08:00
|
|
|
parse_func_tracer_opts),
|
2017-06-18 22:23:01 +08:00
|
|
|
OPT_CALLBACK('G', "graph-funcs", &ftrace.graph_funcs, "func",
|
2020-08-08 10:31:24 +08:00
|
|
|
"trace given functions using function_graph tracer",
|
|
|
|
parse_filter_func),
|
2017-06-18 22:23:01 +08:00
|
|
|
OPT_CALLBACK('g', "nograph-funcs", &ftrace.nograph_funcs, "func",
|
|
|
|
"Set nograph filter on given functions", parse_filter_func),
|
2020-08-08 10:31:32 +08:00
|
|
|
OPT_CALLBACK(0, "graph-opts", &ftrace, "options",
|
2020-08-08 10:31:37 +08:00
|
|
|
"graph tracer options, available options: nosleep-time,noirqs,verbose,thresh=<n>,depth=<n>",
|
2020-08-08 10:31:32 +08:00
|
|
|
parse_graph_tracer_opts),
|
2020-08-08 10:31:27 +08:00
|
|
|
OPT_CALLBACK('m', "buffer-size", &ftrace.percpu_buffer_size, "size",
|
|
|
|
"size of per cpu buffer", parse_buffer_size),
|
2020-08-08 10:31:29 +08:00
|
|
|
OPT_BOOLEAN(0, "inherit", &ftrace.inherit,
|
|
|
|
"trace children processes"),
|
2020-08-08 10:31:38 +08:00
|
|
|
OPT_UINTEGER('D', "delay", &ftrace.initial_delay,
|
|
|
|
"ms to wait before starting tracing after program start"),
|
2013-03-07 20:45:20 +08:00
|
|
|
OPT_END()
|
|
|
|
};
|
|
|
|
|
2017-06-18 22:23:01 +08:00
|
|
|
INIT_LIST_HEAD(&ftrace.filters);
|
|
|
|
INIT_LIST_HEAD(&ftrace.notrace);
|
|
|
|
INIT_LIST_HEAD(&ftrace.graph_funcs);
|
|
|
|
INIT_LIST_HEAD(&ftrace.nograph_funcs);
|
|
|
|
|
2017-01-31 19:38:29 +08:00
|
|
|
ret = perf_config(perf_ftrace_config, &ftrace);
|
|
|
|
if (ret < 0)
|
|
|
|
return -1;
|
|
|
|
|
2013-03-07 20:45:20 +08:00
|
|
|
argc = parse_options(argc, argv, ftrace_options, ftrace_usage,
|
|
|
|
PARSE_OPT_STOP_AT_NON_OPTION);
|
2017-02-24 09:12:48 +08:00
|
|
|
if (!argc && target__none(&ftrace.target))
|
2020-05-10 23:06:10 +08:00
|
|
|
ftrace.target.system_wide = true;
|
2013-03-07 20:45:20 +08:00
|
|
|
|
2020-08-08 10:31:24 +08:00
|
|
|
select_tracer(&ftrace);
|
|
|
|
|
2017-02-24 09:12:48 +08:00
|
|
|
ret = target__validate(&ftrace.target);
|
|
|
|
if (ret) {
|
|
|
|
char errbuf[512];
|
|
|
|
|
|
|
|
target__strerror(&ftrace.target, ret, errbuf, 512);
|
|
|
|
pr_err("%s\n", errbuf);
|
2017-06-18 22:23:01 +08:00
|
|
|
goto out_delete_filters;
|
2017-02-24 09:12:48 +08:00
|
|
|
}
|
|
|
|
|
2019-07-21 19:23:55 +08:00
|
|
|
ftrace.evlist = evlist__new();
|
2017-06-18 22:23:01 +08:00
|
|
|
if (ftrace.evlist == NULL) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out_delete_filters;
|
|
|
|
}
|
2013-03-07 20:45:20 +08:00
|
|
|
|
|
|
|
ret = perf_evlist__create_maps(ftrace.evlist, &ftrace.target);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out_delete_evlist;
|
|
|
|
|
|
|
|
ret = __cmd_ftrace(&ftrace, argc, argv);
|
|
|
|
|
|
|
|
out_delete_evlist:
|
2019-07-21 19:23:56 +08:00
|
|
|
evlist__delete(ftrace.evlist);
|
2013-03-07 20:45:20 +08:00
|
|
|
|
2017-06-18 22:23:01 +08:00
|
|
|
out_delete_filters:
|
|
|
|
delete_filter_func(&ftrace.filters);
|
|
|
|
delete_filter_func(&ftrace.notrace);
|
|
|
|
delete_filter_func(&ftrace.graph_funcs);
|
|
|
|
delete_filter_func(&ftrace.nograph_funcs);
|
|
|
|
|
2013-03-07 20:45:20 +08:00
|
|
|
return ret;
|
|
|
|
}
|