2019-05-29 22:12:25 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2011-01-30 20:46:46 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
|
|
|
|
*
|
|
|
|
* Parts came from builtin-{top,stat,record}.c, see those files for further
|
|
|
|
* copyright notes.
|
|
|
|
*/
|
2014-12-12 05:03:01 +08:00
|
|
|
#include <api/fs/fs.h>
|
2017-04-18 21:46:11 +08:00
|
|
|
#include <errno.h>
|
2017-04-18 02:23:08 +08:00
|
|
|
#include <inttypes.h>
|
2011-01-12 08:30:02 +08:00
|
|
|
#include <poll.h>
|
2011-01-30 20:46:46 +08:00
|
|
|
#include "cpumap.h"
|
2019-09-23 23:20:38 +08:00
|
|
|
#include "util/mmap.h"
|
2011-01-30 20:46:46 +08:00
|
|
|
#include "thread_map.h"
|
2012-04-26 13:15:22 +08:00
|
|
|
#include "target.h"
|
2011-01-12 06:56:53 +08:00
|
|
|
#include "evlist.h"
|
|
|
|
#include "evsel.h"
|
2013-08-14 20:48:24 +08:00
|
|
|
#include "debug.h"
|
2017-04-20 03:05:56 +08:00
|
|
|
#include "units.h"
|
2019-08-06 21:25:25 +08:00
|
|
|
#include <internal/lib.h> // page_size
|
2019-11-21 08:15:17 +08:00
|
|
|
#include "affinity.h"
|
2019-08-30 02:20:59 +08:00
|
|
|
#include "../perf.h"
|
2016-07-14 16:34:42 +08:00
|
|
|
#include "asm/bug.h"
|
2019-03-12 13:30:50 +08:00
|
|
|
#include "bpf-event.h"
|
2019-10-15 07:10:50 +08:00
|
|
|
#include "util/string2.h"
|
2020-05-05 22:49:08 +08:00
|
|
|
#include "util/perf_api_probe.h"
|
2017-04-20 02:49:18 +08:00
|
|
|
#include <signal.h>
|
2011-11-09 18:47:15 +08:00
|
|
|
#include <unistd.h>
|
perf evlist: Use unshare(CLONE_FS) in sb threads to let setns(CLONE_NEWNS) work
When we started using a thread to catch the PERF_RECORD_BPF_EVENT meta
data events to then ask the kernel for further info (BTF, etc) for BPF
programs shortly after they get loaded, we forgot to use
unshare(CLONE_FS) as was done in:
868a832918f6 ("perf top: Support lookup of symbols in other mount namespaces.")
Do it so that we can enter the namespaces to read the build-ids at the
end of a 'perf record' session for the DSOs that had hits.
Before:
Starting a 'stress-ng --cpus 8' inside a container and then, outside the
container running:
# perf record -a --namespaces sleep 5
# perf buildid-list | grep stress-ng
#
We would end up with a 'perf.data' file that had no entry in its
build-id table for the /usr/bin/stress-ng binary inside the container
that got tons of PERF_RECORD_SAMPLEs.
After:
# perf buildid-list | grep stress-ng
f2ed02c68341183a124b9b0f6e2e6c493c465b29 /usr/bin/stress-ng
#
Then its just a matter of making sure that that binary debuginfo package
gets available in a place that 'perf report' will look at build-id keyed
ELF files, which, in my case, on a f30 notebook, was a matter of
installing the debuginfo file for the distro used in the container,
fedora 31:
# rpm -ivh http://fedora.c3sl.ufpr.br/linux/development/31/Everything/x86_64/debug/tree/Packages/s/stress-ng-debuginfo-0.07.29-10.fc31.x86_64.rpm
Then, because perf currently looks for those debuginfo files (richer ELF
symtab) inside that namespace (look at the setns calls):
openat(AT_FDCWD, "/proc/self/ns/mnt", O_RDONLY) = 137
openat(AT_FDCWD, "/proc/13169/ns/mnt", O_RDONLY) = 139
setns(139, CLONE_NEWNS) = 0
stat("/usr/bin/stress-ng", {st_mode=S_IFREG|0755, st_size=3065416, ...}) = 0
openat(AT_FDCWD, "/usr/bin/stress-ng", O_RDONLY) = 140
fcntl(140, F_GETFD) = 0
fstat(140, {st_mode=S_IFREG|0755, st_size=3065416, ...}) = 0
mmap(NULL, 3065416, PROT_READ, MAP_PRIVATE, 140, 0) = 0x7ff2fdc5b000
munmap(0x7ff2fdc5b000, 3065416) = 0
close(140) = 0
stat("stress-ng-0.07.29-10.fc31.x86_64.debug", 0x7fff45d71260) = -1 ENOENT (No such file or directory)
stat("/usr/bin/stress-ng-0.07.29-10.fc31.x86_64.debug", 0x7fff45d71260) = -1 ENOENT (No such file or directory)
stat("/usr/bin/.debug/stress-ng-0.07.29-10.fc31.x86_64.debug", 0x7fff45d71260) = -1 ENOENT (No such file or directory)
stat("/usr/lib/debug/usr/bin/stress-ng-0.07.29-10.fc31.x86_64.debug", 0x7fff45d71260) = -1 ENOENT (No such file or directory)
stat("/root/.debug/.build-id/f2/ed02c68341183a124b9b0f6e2e6c493c465b29", 0x7fff45d711e0) = -1 ENOENT (No such file or directory)
To only then go back to the "host" namespace to look just in the users's
~/.debug cache:
setns(137, CLONE_NEWNS) = 0
chdir("/root") = 0
close(137) = 0
close(139) = 0
stat("/root/.debug/.build-id/f2/ed02c68341183a124b9b0f6e2e6c493c465b29/elf", 0x7fff45d732e0) = -1 ENOENT (No such file or directory)
It continues to fail to resolve symbols:
# perf report | grep stress-ng | head -5
9.50% stress-ng-cpu stress-ng [.] 0x0000000000021ac1
8.58% stress-ng-cpu stress-ng [.] 0x0000000000021ab4
8.51% stress-ng-cpu stress-ng [.] 0x0000000000021489
7.17% stress-ng-cpu stress-ng [.] 0x00000000000219b6
3.93% stress-ng-cpu stress-ng [.] 0x0000000000021478
#
To overcome that we use:
# perf buildid-cache -v --add /usr/lib/debug/usr/bin/stress-ng-0.07.29-10.fc31.x86_64.debug
Adding f2ed02c68341183a124b9b0f6e2e6c493c465b29 /usr/lib/debug/usr/bin/stress-ng-0.07.29-10.fc31.x86_64.debug: Ok
#
# ls -la /root/.debug/.build-id/f2/ed02c68341183a124b9b0f6e2e6c493c465b29/elf
-rw-r--r--. 3 root root 2401184 Jul 27 07:03 /root/.debug/.build-id/f2/ed02c68341183a124b9b0f6e2e6c493c465b29/elf
# file /root/.debug/.build-id/f2/ed02c68341183a124b9b0f6e2e6c493c465b29/elf
/root/.debug/.build-id/f2/ed02c68341183a124b9b0f6e2e6c493c465b29/elf: ELF 64-bit LSB shared object, x86-64, version 1 (SYSV), dynamically linked, interpreter \004, BuildID[sha1]=f2ed02c68341183a124b9b0f6e2e6c493c465b29, for GNU/Linux 3.2.0, with debug_info, not stripped, too many notes (256)
#
Now it finally works:
# perf report | grep stress-ng | head -5
23.59% stress-ng-cpu stress-ng [.] ackermann
23.33% stress-ng-cpu stress-ng [.] is_prime
17.36% stress-ng-cpu stress-ng [.] stress_cpu_sieve
6.08% stress-ng-cpu stress-ng [.] stress_cpu_correlate
3.55% stress-ng-cpu stress-ng [.] queens_try
#
I'll make sure that it looks for the build-id keyed files in both the
"host" namespace (the namespace the user running 'perf record' was a the
time of the recording) and in the container namespace, as it shouldn't
matter where a content based key lookup finds the ELF file to use in
resolving symbols, etc.
Reported-by: Karl Rister <krister@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Brendan Gregg <brendan.d.gregg@gmail.com>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: Krister Johansen <kjlx@templeofstupid.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Song Liu <songliubraving@fb.com>
Cc: Stanislav Fomichev <sdf@google.com>
Cc: Thomas-Mich Richter <tmricht@linux.vnet.ibm.com>
Fixes: 657ee5531903 ("perf evlist: Introduce side band thread")
Link: https://lkml.kernel.org/n/tip-g79k0jz41adiaeuqud742t2l@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-08-29 03:48:50 +08:00
|
|
|
#include <sched.h>
|
2019-08-31 01:45:20 +08:00
|
|
|
#include <stdlib.h>
|
2011-01-12 06:56:53 +08:00
|
|
|
|
2011-11-04 19:10:59 +08:00
|
|
|
#include "parse-events.h"
|
2015-12-15 23:39:39 +08:00
|
|
|
#include <subcmd/parse-options.h>
|
2011-11-04 19:10:59 +08:00
|
|
|
|
2018-01-23 03:42:16 +08:00
|
|
|
#include <fcntl.h>
|
2017-04-20 06:03:14 +08:00
|
|
|
#include <sys/ioctl.h>
|
2011-01-30 20:46:46 +08:00
|
|
|
#include <sys/mman.h>
|
|
|
|
|
2011-01-13 08:39:13 +08:00
|
|
|
#include <linux/bitops.h>
|
|
|
|
#include <linux/hash.h>
|
2014-12-16 03:04:11 +08:00
|
|
|
#include <linux/log2.h>
|
2015-09-07 16:38:06 +08:00
|
|
|
#include <linux/err.h>
|
2019-08-30 03:18:59 +08:00
|
|
|
#include <linux/string.h>
|
2019-07-04 22:32:27 +08:00
|
|
|
#include <linux/zalloc.h>
|
libperf: Add perf_evlist__init() function
Add the perf_evlist__init() function to initialize a perf_evlist struct.
Committer testing:
Fix a change in init ordering that was causing this backtrace:
(gdb) run stat sleep 1
Starting program: /root/bin/perf stat sleep 1
Program received signal SIGSEGV, Segmentation fault.
0x00000000004f6b55 in __perf_evlist__propagate_maps (evlist=0xbb34c0, evsel=0x0) at util/evlist.c:161
161 if (!evsel->own_cpus || evlist->has_user_cpus) {
Missing separate debuginfos, use: dnf debuginfo-install bzip2-libs-1.0.6-29.fc30.x86_64 elfutils-libelf-0.176-3.fc30.x86_64 elfutils-libs-0.176-3.fc30.x86_64 glib2-2.60.4-1.fc30.x86_64 libbabeltrace-1.5.6-2.fc30.x86_64 libgcc-9.1.1-1.fc30.x86_64 libunwind-1.3.1-2.fc30.x86_64 libuuid-2.33.2-1.fc30.x86_64 libxcrypt-4.4.6-2.fc30.x86_64 libzstd-1.4.0-1.fc30.x86_64 numactl-libs-2.0.12-2.fc30.x86_64 pcre-8.43-2.fc30.x86_64 perl-libs-5.28.2-436.fc30.x86_64 popt-1.16-17.fc30.x86_64 python2-libs-2.7.16-2.fc30.x86_64 slang-2.3.2-5.fc30.x86_64 xz-libs-5.2.4-5.fc30.x86_64 zlib-1.2.11-15.fc30.x86_64
(gdb) bt
#0 0x00000000004f6b55 in __perf_evlist__propagate_maps (evlist=0xbb34c0, evsel=0x0) at util/evlist.c:161
#1 0x00000000004f6c7a in perf_evlist__propagate_maps (evlist=0xbb34c0) at util/evlist.c:178
#2 0x00000000004f955e in perf_evlist__set_maps (evlist=0xbb34c0, cpus=0x0, threads=0x0) at util/evlist.c:1128
#3 0x00000000004f66f8 in evlist__init (evlist=0xbb34c0, cpus=0x0, threads=0x0) at util/evlist.c:52
#4 0x00000000004f6790 in evlist__new () at util/evlist.c:64
#5 0x0000000000456071 in cmd_stat (argc=3, argv=0x7fffffffd670) at builtin-stat.c:1705
#6 0x00000000004dd0fa in run_builtin (p=0xa21e00 <commands+288>, argc=3, argv=0x7fffffffd670) at perf.c:304
#7 0x00000000004dd367 in handle_internal_command (argc=3, argv=0x7fffffffd670) at perf.c:356
#8 0x00000000004dd4ae in run_argv (argcp=0x7fffffffd4cc, argv=0x7fffffffd4c0) at perf.c:400
#9 0x00000000004dd81a in main (argc=3, argv=0x7fffffffd670) at perf.c:522
(gdb) bt
So move the initialization of the core evlist (calling
perf_evlist__init()) to before perf_evlist__set_maps() in
evlist__init().
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexey Budankov <alexey.budankov@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Michael Petlan <mpetlan@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20190721112506.12306-39-jolsa@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-07-21 19:24:25 +08:00
|
|
|
#include <perf/evlist.h>
|
2019-07-21 19:24:50 +08:00
|
|
|
#include <perf/evsel.h>
|
2019-07-21 19:24:30 +08:00
|
|
|
#include <perf/cpumap.h>
|
2019-10-07 20:53:17 +08:00
|
|
|
#include <perf/mmap.h>
|
2011-01-13 08:39:13 +08:00
|
|
|
|
2019-08-22 00:58:12 +08:00
|
|
|
#include <internal/xyarray.h>
|
|
|
|
|
2018-12-12 02:48:47 +08:00
|
|
|
#ifdef LACKS_SIGQUEUE_PROTOTYPE
|
|
|
|
int sigqueue(pid_t pid, int sig, const union sigval value);
|
|
|
|
#endif
|
|
|
|
|
2019-07-21 19:24:45 +08:00
|
|
|
#define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y))
|
2019-09-03 04:04:12 +08:00
|
|
|
#define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y)
|
2011-01-30 20:46:46 +08:00
|
|
|
|
2019-07-21 19:23:54 +08:00
|
|
|
void evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus,
|
|
|
|
struct perf_thread_map *threads)
|
2011-01-19 07:41:45 +08:00
|
|
|
{
|
libperf: Add perf_evlist__init() function
Add the perf_evlist__init() function to initialize a perf_evlist struct.
Committer testing:
Fix a change in init ordering that was causing this backtrace:
(gdb) run stat sleep 1
Starting program: /root/bin/perf stat sleep 1
Program received signal SIGSEGV, Segmentation fault.
0x00000000004f6b55 in __perf_evlist__propagate_maps (evlist=0xbb34c0, evsel=0x0) at util/evlist.c:161
161 if (!evsel->own_cpus || evlist->has_user_cpus) {
Missing separate debuginfos, use: dnf debuginfo-install bzip2-libs-1.0.6-29.fc30.x86_64 elfutils-libelf-0.176-3.fc30.x86_64 elfutils-libs-0.176-3.fc30.x86_64 glib2-2.60.4-1.fc30.x86_64 libbabeltrace-1.5.6-2.fc30.x86_64 libgcc-9.1.1-1.fc30.x86_64 libunwind-1.3.1-2.fc30.x86_64 libuuid-2.33.2-1.fc30.x86_64 libxcrypt-4.4.6-2.fc30.x86_64 libzstd-1.4.0-1.fc30.x86_64 numactl-libs-2.0.12-2.fc30.x86_64 pcre-8.43-2.fc30.x86_64 perl-libs-5.28.2-436.fc30.x86_64 popt-1.16-17.fc30.x86_64 python2-libs-2.7.16-2.fc30.x86_64 slang-2.3.2-5.fc30.x86_64 xz-libs-5.2.4-5.fc30.x86_64 zlib-1.2.11-15.fc30.x86_64
(gdb) bt
#0 0x00000000004f6b55 in __perf_evlist__propagate_maps (evlist=0xbb34c0, evsel=0x0) at util/evlist.c:161
#1 0x00000000004f6c7a in perf_evlist__propagate_maps (evlist=0xbb34c0) at util/evlist.c:178
#2 0x00000000004f955e in perf_evlist__set_maps (evlist=0xbb34c0, cpus=0x0, threads=0x0) at util/evlist.c:1128
#3 0x00000000004f66f8 in evlist__init (evlist=0xbb34c0, cpus=0x0, threads=0x0) at util/evlist.c:52
#4 0x00000000004f6790 in evlist__new () at util/evlist.c:64
#5 0x0000000000456071 in cmd_stat (argc=3, argv=0x7fffffffd670) at builtin-stat.c:1705
#6 0x00000000004dd0fa in run_builtin (p=0xa21e00 <commands+288>, argc=3, argv=0x7fffffffd670) at perf.c:304
#7 0x00000000004dd367 in handle_internal_command (argc=3, argv=0x7fffffffd670) at perf.c:356
#8 0x00000000004dd4ae in run_argv (argcp=0x7fffffffd4cc, argv=0x7fffffffd4c0) at perf.c:400
#9 0x00000000004dd81a in main (argc=3, argv=0x7fffffffd670) at perf.c:522
(gdb) bt
So move the initialization of the core evlist (calling
perf_evlist__init()) to before perf_evlist__set_maps() in
evlist__init().
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexey Budankov <alexey.budankov@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Michael Petlan <mpetlan@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20190721112506.12306-39-jolsa@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-07-21 19:24:25 +08:00
|
|
|
perf_evlist__init(&evlist->core);
|
2019-07-21 19:24:43 +08:00
|
|
|
perf_evlist__set_maps(&evlist->core, cpus, threads);
|
2011-11-09 18:47:15 +08:00
|
|
|
evlist->workload.pid = -1;
|
2016-07-14 16:34:42 +08:00
|
|
|
evlist->bkw_mmap_state = BKW_MMAP_NOTREADY;
|
2020-07-17 15:00:47 +08:00
|
|
|
evlist->ctl_fd.fd = -1;
|
|
|
|
evlist->ctl_fd.ack = -1;
|
|
|
|
evlist->ctl_fd.pos = -1;
|
2011-01-19 07:41:45 +08:00
|
|
|
}
|
|
|
|
|
2019-07-21 19:23:55 +08:00
|
|
|
struct evlist *evlist__new(void)
|
2011-01-12 06:56:53 +08:00
|
|
|
{
|
2019-07-21 19:23:52 +08:00
|
|
|
struct evlist *evlist = zalloc(sizeof(*evlist));
|
2011-01-12 06:56:53 +08:00
|
|
|
|
2011-01-19 07:41:45 +08:00
|
|
|
if (evlist != NULL)
|
2019-07-21 19:23:54 +08:00
|
|
|
evlist__init(evlist, NULL, NULL);
|
2011-01-12 06:56:53 +08:00
|
|
|
|
|
|
|
return evlist;
|
|
|
|
}
|
|
|
|
|
2019-07-21 19:23:52 +08:00
|
|
|
struct evlist *perf_evlist__new_default(void)
|
2013-09-01 18:36:14 +08:00
|
|
|
{
|
2019-07-21 19:23:55 +08:00
|
|
|
struct evlist *evlist = evlist__new();
|
2013-09-01 18:36:14 +08:00
|
|
|
|
2020-06-17 20:16:20 +08:00
|
|
|
if (evlist && evlist__add_default(evlist)) {
|
2019-07-21 19:23:56 +08:00
|
|
|
evlist__delete(evlist);
|
2013-09-01 18:36:14 +08:00
|
|
|
evlist = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return evlist;
|
|
|
|
}
|
|
|
|
|
2019-07-21 19:23:52 +08:00
|
|
|
struct evlist *perf_evlist__new_dummy(void)
|
2016-01-08 00:14:56 +08:00
|
|
|
{
|
2019-07-21 19:23:55 +08:00
|
|
|
struct evlist *evlist = evlist__new();
|
2016-01-08 00:14:56 +08:00
|
|
|
|
2020-06-17 20:16:20 +08:00
|
|
|
if (evlist && evlist__add_dummy(evlist)) {
|
2019-07-21 19:23:56 +08:00
|
|
|
evlist__delete(evlist);
|
2016-01-08 00:14:56 +08:00
|
|
|
evlist = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return evlist;
|
|
|
|
}
|
|
|
|
|
2013-08-27 16:23:09 +08:00
|
|
|
/**
|
2020-12-01 01:44:40 +08:00
|
|
|
* evlist__set_id_pos - set the positions of event ids.
|
2013-08-27 16:23:09 +08:00
|
|
|
* @evlist: selected event list
|
|
|
|
*
|
|
|
|
* Events with compatible sample types all have the same id_pos
|
|
|
|
* and is_pos. For convenience, put a copy on evlist.
|
|
|
|
*/
|
2020-12-01 01:44:40 +08:00
|
|
|
void evlist__set_id_pos(struct evlist *evlist)
|
2013-08-27 16:23:09 +08:00
|
|
|
{
|
2019-09-03 16:39:52 +08:00
|
|
|
struct evsel *first = evlist__first(evlist);
|
2013-08-27 16:23:09 +08:00
|
|
|
|
|
|
|
evlist->id_pos = first->id_pos;
|
|
|
|
evlist->is_pos = first->is_pos;
|
|
|
|
}
|
|
|
|
|
2020-12-01 01:44:40 +08:00
|
|
|
static void evlist__update_id_pos(struct evlist *evlist)
|
2013-09-07 03:40:11 +08:00
|
|
|
{
|
2019-07-21 19:23:51 +08:00
|
|
|
struct evsel *evsel;
|
2013-09-07 03:40:11 +08:00
|
|
|
|
2016-06-23 22:26:15 +08:00
|
|
|
evlist__for_each_entry(evlist, evsel)
|
2020-04-30 02:58:40 +08:00
|
|
|
evsel__calc_id_pos(evsel);
|
2013-09-07 03:40:11 +08:00
|
|
|
|
2020-12-01 01:44:40 +08:00
|
|
|
evlist__set_id_pos(evlist);
|
2013-09-07 03:40:11 +08:00
|
|
|
}
|
|
|
|
|
2019-09-05 16:11:37 +08:00
|
|
|
static void evlist__purge(struct evlist *evlist)
|
2011-01-12 06:56:53 +08:00
|
|
|
{
|
2019-07-21 19:23:51 +08:00
|
|
|
struct evsel *pos, *n;
|
2011-01-12 06:56:53 +08:00
|
|
|
|
2016-06-23 22:26:15 +08:00
|
|
|
evlist__for_each_entry_safe(evlist, n, pos) {
|
2019-07-21 19:24:22 +08:00
|
|
|
list_del_init(&pos->core.node);
|
2015-08-27 20:07:40 +08:00
|
|
|
pos->evlist = NULL;
|
2019-07-21 19:23:57 +08:00
|
|
|
evsel__delete(pos);
|
2011-01-12 06:56:53 +08:00
|
|
|
}
|
|
|
|
|
2019-07-21 19:24:28 +08:00
|
|
|
evlist->core.nr_entries = 0;
|
2011-01-12 06:56:53 +08:00
|
|
|
}
|
|
|
|
|
2019-09-02 20:34:52 +08:00
|
|
|
void evlist__exit(struct evlist *evlist)
|
2011-01-12 06:56:53 +08:00
|
|
|
{
|
2013-12-27 04:41:15 +08:00
|
|
|
zfree(&evlist->mmap);
|
2017-12-05 00:51:07 +08:00
|
|
|
zfree(&evlist->overwrite_mmap);
|
2019-10-07 20:53:32 +08:00
|
|
|
perf_evlist__exit(&evlist->core);
|
2011-01-19 07:41:45 +08:00
|
|
|
}
|
|
|
|
|
2019-07-21 19:23:56 +08:00
|
|
|
void evlist__delete(struct evlist *evlist)
|
2011-01-19 07:41:45 +08:00
|
|
|
{
|
2016-06-22 05:15:45 +08:00
|
|
|
if (evlist == NULL)
|
|
|
|
return;
|
|
|
|
|
2019-08-16 22:19:55 +08:00
|
|
|
evlist__munmap(evlist);
|
2019-07-21 19:24:07 +08:00
|
|
|
evlist__close(evlist);
|
2019-09-05 16:11:37 +08:00
|
|
|
evlist__purge(evlist);
|
2019-09-02 20:34:52 +08:00
|
|
|
evlist__exit(evlist);
|
2011-01-12 06:56:53 +08:00
|
|
|
free(evlist);
|
|
|
|
}
|
|
|
|
|
2019-07-21 19:23:59 +08:00
|
|
|
void evlist__add(struct evlist *evlist, struct evsel *entry)
|
2011-01-12 06:56:53 +08:00
|
|
|
{
|
2015-08-27 20:07:40 +08:00
|
|
|
entry->evlist = evlist;
|
2019-07-21 19:24:28 +08:00
|
|
|
entry->idx = evlist->core.nr_entries;
|
2014-07-31 14:00:52 +08:00
|
|
|
entry->tracking = !entry->idx;
|
2013-11-08 03:41:19 +08:00
|
|
|
|
2019-07-21 19:24:28 +08:00
|
|
|
perf_evlist__add(&evlist->core, &entry->core);
|
|
|
|
|
|
|
|
if (evlist->core.nr_entries == 1)
|
2020-12-01 01:44:40 +08:00
|
|
|
evlist__set_id_pos(evlist);
|
2011-01-12 06:56:53 +08:00
|
|
|
}
|
|
|
|
|
2019-07-21 19:24:00 +08:00
|
|
|
void evlist__remove(struct evlist *evlist, struct evsel *evsel)
|
2015-09-25 21:15:53 +08:00
|
|
|
{
|
|
|
|
evsel->evlist = NULL;
|
2019-07-21 19:24:27 +08:00
|
|
|
perf_evlist__remove(&evlist->core, &evsel->core);
|
2015-09-25 21:15:53 +08:00
|
|
|
}
|
|
|
|
|
2020-12-01 01:52:44 +08:00
|
|
|
void evlist__splice_list_tail(struct evlist *evlist, struct list_head *list)
|
2011-11-04 19:10:59 +08:00
|
|
|
{
|
2019-07-21 19:23:51 +08:00
|
|
|
struct evsel *evsel, *temp;
|
2013-08-27 16:23:09 +08:00
|
|
|
|
2016-06-23 22:26:15 +08:00
|
|
|
__evlist__for_each_entry_safe(list, temp, evsel) {
|
2019-07-21 19:24:22 +08:00
|
|
|
list_del_init(&evsel->core.node);
|
2019-07-21 19:23:59 +08:00
|
|
|
evlist__add(evlist, evsel);
|
2015-09-08 15:58:53 +08:00
|
|
|
}
|
2011-11-04 19:10:59 +08:00
|
|
|
}
|
|
|
|
|
2019-10-01 22:14:26 +08:00
|
|
|
int __evlist__set_tracepoints_handlers(struct evlist *evlist,
|
|
|
|
const struct evsel_str_handler *assocs, size_t nr_assocs)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
for (i = 0; i < nr_assocs; i++) {
|
|
|
|
// Adding a handler for an event not in this evlist, just ignore it.
|
2020-11-30 20:48:07 +08:00
|
|
|
struct evsel *evsel = evlist__find_tracepoint_by_name(evlist, assocs[i].name);
|
2019-10-01 22:14:26 +08:00
|
|
|
if (evsel == NULL)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
err = -EEXIST;
|
|
|
|
if (evsel->handler != NULL)
|
|
|
|
goto out;
|
|
|
|
evsel->handler = assocs[i].handler;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = 0;
|
|
|
|
out:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-11-30 20:22:07 +08:00
|
|
|
void __evlist__set_leader(struct list_head *list)
|
2012-08-15 03:35:48 +08:00
|
|
|
{
|
2019-07-21 19:23:51 +08:00
|
|
|
struct evsel *evsel, *leader;
|
2012-08-15 03:35:48 +08:00
|
|
|
|
2019-07-21 19:24:22 +08:00
|
|
|
leader = list_entry(list->next, struct evsel, core.node);
|
|
|
|
evsel = list_entry(list->prev, struct evsel, core.node);
|
2013-01-22 17:09:29 +08:00
|
|
|
|
2019-07-21 19:24:46 +08:00
|
|
|
leader->core.nr_members = evsel->idx - leader->idx + 1;
|
2012-08-15 03:35:48 +08:00
|
|
|
|
2016-06-23 22:26:15 +08:00
|
|
|
__evlist__for_each_entry(list, evsel) {
|
2013-01-31 20:54:37 +08:00
|
|
|
evsel->leader = leader;
|
2012-08-15 03:35:48 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-30 20:22:07 +08:00
|
|
|
void evlist__set_leader(struct evlist *evlist)
|
perf tools: Enable grouping logic for parsed events
This patch adds a functionality that allows to create event groups
based on the way they are specified on the command line. Adding
functionality to the '{}' group syntax introduced in earlier patch.
The current '--group/-g' option behaviour remains intact. If you
specify it for record/stat/top command, all the specified events
become members of a single group with the first event as a group
leader.
With the new '{}' group syntax you can create group like:
# perf record -e '{cycles,faults}' ls
resulting in single event group containing 'cycles' and 'faults'
events, with cycles event as group leader.
All groups are created with regards to threads and cpus. Thus
recording an event group within a 2 threads on server with
4 CPUs will create 8 separate groups.
Examples (first event in brackets is group leader):
# 1 group (cpu-clock,task-clock)
perf record --group -e cpu-clock,task-clock ls
perf record -e '{cpu-clock,task-clock}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock},{minor-faults,major-faults}' ls
# 1 group (cpu-clock,task-clock,minor-faults,major-faults)
perf record --group -e cpu-clock,task-clock -e minor-faults,major-faults ls
perf record -e '{cpu-clock,task-clock,minor-faults,major-faults}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock} -e '{minor-faults,major-faults}' \
-e instructions ls
# 1 group
# (cpu-clock,task-clock,minor-faults,major-faults,instructions)
perf record --group -e cpu-clock,task-clock \
-e minor-faults,major-faults -e instructions ls perf record -e
'{cpu-clock,task-clock,minor-faults,major-faults,instructions}' ls
It's possible to use standard event modifier for a group, which spans
over all events in the group and updates each event modifier settings,
for example:
# perf record -r '{faults:k,cache-references}:p'
resulting in ':kp' modifier being used for 'faults' and ':p' modifier
being used for 'cache-references' event.
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ulrich Drepper <drepper@gmail.com>
Link: http://lkml.kernel.org/n/tip-ho42u0wcr8mn1otkalqi13qp@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-08-08 18:22:36 +08:00
|
|
|
{
|
2019-07-21 19:24:28 +08:00
|
|
|
if (evlist->core.nr_entries) {
|
|
|
|
evlist->nr_groups = evlist->core.nr_entries > 1 ? 1 : 0;
|
2020-11-30 20:22:07 +08:00
|
|
|
__evlist__set_leader(&evlist->core.entries);
|
2013-01-22 17:09:29 +08:00
|
|
|
}
|
perf tools: Enable grouping logic for parsed events
This patch adds a functionality that allows to create event groups
based on the way they are specified on the command line. Adding
functionality to the '{}' group syntax introduced in earlier patch.
The current '--group/-g' option behaviour remains intact. If you
specify it for record/stat/top command, all the specified events
become members of a single group with the first event as a group
leader.
With the new '{}' group syntax you can create group like:
# perf record -e '{cycles,faults}' ls
resulting in single event group containing 'cycles' and 'faults'
events, with cycles event as group leader.
All groups are created with regards to threads and cpus. Thus
recording an event group within a 2 threads on server with
4 CPUs will create 8 separate groups.
Examples (first event in brackets is group leader):
# 1 group (cpu-clock,task-clock)
perf record --group -e cpu-clock,task-clock ls
perf record -e '{cpu-clock,task-clock}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock},{minor-faults,major-faults}' ls
# 1 group (cpu-clock,task-clock,minor-faults,major-faults)
perf record --group -e cpu-clock,task-clock -e minor-faults,major-faults ls
perf record -e '{cpu-clock,task-clock,minor-faults,major-faults}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock} -e '{minor-faults,major-faults}' \
-e instructions ls
# 1 group
# (cpu-clock,task-clock,minor-faults,major-faults,instructions)
perf record --group -e cpu-clock,task-clock \
-e minor-faults,major-faults -e instructions ls perf record -e
'{cpu-clock,task-clock,minor-faults,major-faults,instructions}' ls
It's possible to use standard event modifier for a group, which spans
over all events in the group and updates each event modifier settings,
for example:
# perf record -r '{faults:k,cache-references}:p'
resulting in ':kp' modifier being used for 'faults' and ':p' modifier
being used for 'cache-references' event.
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ulrich Drepper <drepper@gmail.com>
Link: http://lkml.kernel.org/n/tip-ho42u0wcr8mn1otkalqi13qp@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-08-08 18:22:36 +08:00
|
|
|
}
|
|
|
|
|
2020-06-17 20:16:20 +08:00
|
|
|
int __evlist__add_default(struct evlist *evlist, bool precise)
|
2011-01-12 06:56:53 +08:00
|
|
|
{
|
2020-05-07 00:27:04 +08:00
|
|
|
struct evsel *evsel = evsel__new_cycles(precise);
|
2012-01-05 00:54:20 +08:00
|
|
|
|
2011-01-12 06:56:53 +08:00
|
|
|
if (evsel == NULL)
|
2016-07-29 05:33:20 +08:00
|
|
|
return -ENOMEM;
|
2011-01-12 06:56:53 +08:00
|
|
|
|
2019-07-21 19:23:59 +08:00
|
|
|
evlist__add(evlist, evsel);
|
2011-01-12 06:56:53 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2011-01-12 08:30:02 +08:00
|
|
|
|
2020-06-17 20:16:20 +08:00
|
|
|
int evlist__add_dummy(struct evlist *evlist)
|
2016-01-08 00:14:56 +08:00
|
|
|
{
|
|
|
|
struct perf_event_attr attr = {
|
|
|
|
.type = PERF_TYPE_SOFTWARE,
|
|
|
|
.config = PERF_COUNT_SW_DUMMY,
|
|
|
|
.size = sizeof(attr), /* to capture ABI version */
|
|
|
|
};
|
2020-05-07 00:27:04 +08:00
|
|
|
struct evsel *evsel = evsel__new_idx(&attr, evlist->core.nr_entries);
|
2016-01-08 00:14:56 +08:00
|
|
|
|
|
|
|
if (evsel == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2019-07-21 19:23:59 +08:00
|
|
|
evlist__add(evlist, evsel);
|
2016-01-08 00:14:56 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-06-17 20:16:20 +08:00
|
|
|
static int evlist__add_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs)
|
2011-11-04 19:10:59 +08:00
|
|
|
{
|
2019-07-21 19:23:51 +08:00
|
|
|
struct evsel *evsel, *n;
|
2011-11-04 19:10:59 +08:00
|
|
|
LIST_HEAD(head);
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < nr_attrs; i++) {
|
2020-05-07 00:27:04 +08:00
|
|
|
evsel = evsel__new_idx(attrs + i, evlist->core.nr_entries + i);
|
2011-11-04 19:10:59 +08:00
|
|
|
if (evsel == NULL)
|
|
|
|
goto out_delete_partial_list;
|
2019-07-21 19:24:22 +08:00
|
|
|
list_add_tail(&evsel->core.node, &head);
|
2011-11-04 19:10:59 +08:00
|
|
|
}
|
|
|
|
|
2020-12-01 01:52:44 +08:00
|
|
|
evlist__splice_list_tail(evlist, &head);
|
2011-11-04 19:10:59 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_delete_partial_list:
|
2016-06-23 22:26:15 +08:00
|
|
|
__evlist__for_each_entry_safe(&head, n, evsel)
|
2019-07-21 19:23:57 +08:00
|
|
|
evsel__delete(evsel);
|
2011-11-04 19:10:59 +08:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-06-17 20:16:20 +08:00
|
|
|
int __evlist__add_default_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs)
|
perf stat: Initialize default events wrt exclude_{guest,host}
When no event is specified the tools use perf_evlist__add_default(), that will
call event_attr_init to initialize the KVM exclusion bits.
When the change was made to the tools so that by default guest samples would be
excluded, the changes were made just to the parsing routines and to
perf_evlist__add_default(), not to perf_evlist__add_attrs, that is used so far
just by perf stat to add multiple events, according to the level of detail
specified.
Recently the tools were changed to reconstruct the event name from all the
details in perf_event_attr, not just from .type and .config, but taking into
account all the feature bits (.exclude_{guest,host,user,kernel,etc},
.precise_ip, etc).
That is when we noticed that the default for perf stat wasn't the one for the
rest of the tools, i.e. the .exclude_guest bit wasn't being set.
I.e. the default, that doesn't call event_attr_init was showing the :HG
modifier:
$ perf stat usleep 1
Performance counter stats for 'usleep 1':
0.942119 task-clock # 0.454 CPUs utilized
1 context-switches # 0.001 M/sec
0 CPU-migrations # 0.000 K/sec
126 page-faults # 0.134 M/sec
693,193 cycles:HG # 0.736 GHz [40.11%]
407,461 stalled-cycles-frontend:HG # 58.78% frontend cycles idle [72.29%]
365,403 stalled-cycles-backend:HG # 52.71% backend cycles idle
465,982 instructions:HG # 0.67 insns per cycle
# 0.87 stalled cycles per insn
89,760 branches:HG # 95.275 M/sec
6,178 branch-misses:HG # 6.88% of all branches
0.002077228 seconds time elapsed
While if one explicitely specifies the same events, which will make the parsing code
to be called and thus event_attr_init is called:
$ perf stat -e task-clock,context-switches,migrations,page-faults,cycles,stalled-cycles-frontend,stalled-cycles-backend,instructions,branches,branch-misses usleep 1
Performance counter stats for 'usleep 1':
1.040349 task-clock # 0.500 CPUs utilized
2 context-switches # 0.002 M/sec
0 CPU-migrations # 0.000 K/sec
127 page-faults # 0.122 M/sec
587,966 cycles # 0.565 GHz [13.18%]
459,167 stalled-cycles-frontend # 78.09% frontend cycles idle
390,249 stalled-cycles-backend # 66.37% backend cycles idle
504,006 instructions # 0.86 insns per cycle
# 0.91 stalled cycles per insn
96,455 branches # 92.714 M/sec
6,522 branch-misses # 6.76% of all branches [96.12%]
0.002078681 seconds time elapsed
Fix it by introducing a perf_evlist__add_default_attrs method that will call
evlist_attr_init in all the perf_event_attr entries before adding the events.
Reported-by: Ingo Molnar <mingo@kernel.org>
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Namhyung Kim <namhyung@gmail.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-4eysr236r0pgiyum9epwxw7s@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-05-31 00:53:54 +08:00
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < nr_attrs; i++)
|
|
|
|
event_attr_init(attrs + i);
|
|
|
|
|
2019-07-21 19:23:59 +08:00
|
|
|
return evlist__add_attrs(evlist, attrs, nr_attrs);
|
perf stat: Initialize default events wrt exclude_{guest,host}
When no event is specified the tools use perf_evlist__add_default(), that will
call event_attr_init to initialize the KVM exclusion bits.
When the change was made to the tools so that by default guest samples would be
excluded, the changes were made just to the parsing routines and to
perf_evlist__add_default(), not to perf_evlist__add_attrs, that is used so far
just by perf stat to add multiple events, according to the level of detail
specified.
Recently the tools were changed to reconstruct the event name from all the
details in perf_event_attr, not just from .type and .config, but taking into
account all the feature bits (.exclude_{guest,host,user,kernel,etc},
.precise_ip, etc).
That is when we noticed that the default for perf stat wasn't the one for the
rest of the tools, i.e. the .exclude_guest bit wasn't being set.
I.e. the default, that doesn't call event_attr_init was showing the :HG
modifier:
$ perf stat usleep 1
Performance counter stats for 'usleep 1':
0.942119 task-clock # 0.454 CPUs utilized
1 context-switches # 0.001 M/sec
0 CPU-migrations # 0.000 K/sec
126 page-faults # 0.134 M/sec
693,193 cycles:HG # 0.736 GHz [40.11%]
407,461 stalled-cycles-frontend:HG # 58.78% frontend cycles idle [72.29%]
365,403 stalled-cycles-backend:HG # 52.71% backend cycles idle
465,982 instructions:HG # 0.67 insns per cycle
# 0.87 stalled cycles per insn
89,760 branches:HG # 95.275 M/sec
6,178 branch-misses:HG # 6.88% of all branches
0.002077228 seconds time elapsed
While if one explicitely specifies the same events, which will make the parsing code
to be called and thus event_attr_init is called:
$ perf stat -e task-clock,context-switches,migrations,page-faults,cycles,stalled-cycles-frontend,stalled-cycles-backend,instructions,branches,branch-misses usleep 1
Performance counter stats for 'usleep 1':
1.040349 task-clock # 0.500 CPUs utilized
2 context-switches # 0.002 M/sec
0 CPU-migrations # 0.000 K/sec
127 page-faults # 0.122 M/sec
587,966 cycles # 0.565 GHz [13.18%]
459,167 stalled-cycles-frontend # 78.09% frontend cycles idle
390,249 stalled-cycles-backend # 66.37% backend cycles idle
504,006 instructions # 0.86 insns per cycle
# 0.91 stalled cycles per insn
96,455 branches # 92.714 M/sec
6,522 branch-misses # 6.76% of all branches [96.12%]
0.002078681 seconds time elapsed
Fix it by introducing a perf_evlist__add_default_attrs method that will call
evlist_attr_init in all the perf_event_attr entries before adding the events.
Reported-by: Ingo Molnar <mingo@kernel.org>
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Namhyung Kim <namhyung@gmail.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-4eysr236r0pgiyum9epwxw7s@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-05-31 00:53:54 +08:00
|
|
|
}
|
|
|
|
|
2020-11-30 20:48:07 +08:00
|
|
|
struct evsel *evlist__find_tracepoint_by_id(struct evlist *evlist, int id)
|
perf tools: Save some loops using perf_evlist__id2evsel
Since we already ask for PERF_SAMPLE_ID and use it to quickly find the
associated evsel, add handler func + data to struct perf_evsel to avoid
using chains of if(strcmp(event_name)) and also to avoid all the linear
list searches via trace_event_find.
To demonstrate the technique convert 'perf sched' to it:
# perf sched record sleep 5m
And then:
Performance counter stats for '/tmp/oldperf sched lat':
646.929438 task-clock # 0.999 CPUs utilized
9 context-switches # 0.000 M/sec
0 CPU-migrations # 0.000 M/sec
20,901 page-faults # 0.032 M/sec
1,290,144,450 cycles # 1.994 GHz
<not supported> stalled-cycles-frontend
<not supported> stalled-cycles-backend
1,606,158,439 instructions # 1.24 insns per cycle
339,088,395 branches # 524.151 M/sec
4,550,735 branch-misses # 1.34% of all branches
0.647524759 seconds time elapsed
Versus:
Performance counter stats for 'perf sched lat':
473.564691 task-clock # 0.999 CPUs utilized
9 context-switches # 0.000 M/sec
0 CPU-migrations # 0.000 M/sec
20,903 page-faults # 0.044 M/sec
944,367,984 cycles # 1.994 GHz
<not supported> stalled-cycles-frontend
<not supported> stalled-cycles-backend
1,442,385,571 instructions # 1.53 insns per cycle
308,383,106 branches # 651.195 M/sec
4,481,784 branch-misses # 1.45% of all branches
0.474215751 seconds time elapsed
[root@emilia ~]#
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-1kbzpl74lwi6lavpqke2u2p3@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2011-11-29 03:57:40 +08:00
|
|
|
{
|
2019-07-21 19:23:51 +08:00
|
|
|
struct evsel *evsel;
|
perf tools: Save some loops using perf_evlist__id2evsel
Since we already ask for PERF_SAMPLE_ID and use it to quickly find the
associated evsel, add handler func + data to struct perf_evsel to avoid
using chains of if(strcmp(event_name)) and also to avoid all the linear
list searches via trace_event_find.
To demonstrate the technique convert 'perf sched' to it:
# perf sched record sleep 5m
And then:
Performance counter stats for '/tmp/oldperf sched lat':
646.929438 task-clock # 0.999 CPUs utilized
9 context-switches # 0.000 M/sec
0 CPU-migrations # 0.000 M/sec
20,901 page-faults # 0.032 M/sec
1,290,144,450 cycles # 1.994 GHz
<not supported> stalled-cycles-frontend
<not supported> stalled-cycles-backend
1,606,158,439 instructions # 1.24 insns per cycle
339,088,395 branches # 524.151 M/sec
4,550,735 branch-misses # 1.34% of all branches
0.647524759 seconds time elapsed
Versus:
Performance counter stats for 'perf sched lat':
473.564691 task-clock # 0.999 CPUs utilized
9 context-switches # 0.000 M/sec
0 CPU-migrations # 0.000 M/sec
20,903 page-faults # 0.044 M/sec
944,367,984 cycles # 1.994 GHz
<not supported> stalled-cycles-frontend
<not supported> stalled-cycles-backend
1,442,385,571 instructions # 1.53 insns per cycle
308,383,106 branches # 651.195 M/sec
4,481,784 branch-misses # 1.45% of all branches
0.474215751 seconds time elapsed
[root@emilia ~]#
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-1kbzpl74lwi6lavpqke2u2p3@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2011-11-29 03:57:40 +08:00
|
|
|
|
2016-06-23 22:26:15 +08:00
|
|
|
evlist__for_each_entry(evlist, evsel) {
|
2019-07-21 19:24:29 +08:00
|
|
|
if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT &&
|
|
|
|
(int)evsel->core.attr.config == id)
|
perf tools: Save some loops using perf_evlist__id2evsel
Since we already ask for PERF_SAMPLE_ID and use it to quickly find the
associated evsel, add handler func + data to struct perf_evsel to avoid
using chains of if(strcmp(event_name)) and also to avoid all the linear
list searches via trace_event_find.
To demonstrate the technique convert 'perf sched' to it:
# perf sched record sleep 5m
And then:
Performance counter stats for '/tmp/oldperf sched lat':
646.929438 task-clock # 0.999 CPUs utilized
9 context-switches # 0.000 M/sec
0 CPU-migrations # 0.000 M/sec
20,901 page-faults # 0.032 M/sec
1,290,144,450 cycles # 1.994 GHz
<not supported> stalled-cycles-frontend
<not supported> stalled-cycles-backend
1,606,158,439 instructions # 1.24 insns per cycle
339,088,395 branches # 524.151 M/sec
4,550,735 branch-misses # 1.34% of all branches
0.647524759 seconds time elapsed
Versus:
Performance counter stats for 'perf sched lat':
473.564691 task-clock # 0.999 CPUs utilized
9 context-switches # 0.000 M/sec
0 CPU-migrations # 0.000 M/sec
20,903 page-faults # 0.044 M/sec
944,367,984 cycles # 1.994 GHz
<not supported> stalled-cycles-frontend
<not supported> stalled-cycles-backend
1,442,385,571 instructions # 1.53 insns per cycle
308,383,106 branches # 651.195 M/sec
4,481,784 branch-misses # 1.45% of all branches
0.474215751 seconds time elapsed
[root@emilia ~]#
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-1kbzpl74lwi6lavpqke2u2p3@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2011-11-29 03:57:40 +08:00
|
|
|
return evsel;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-11-30 20:48:07 +08:00
|
|
|
struct evsel *evlist__find_tracepoint_by_name(struct evlist *evlist, const char *name)
|
2013-08-29 12:29:51 +08:00
|
|
|
{
|
2019-07-21 19:23:51 +08:00
|
|
|
struct evsel *evsel;
|
2013-08-29 12:29:51 +08:00
|
|
|
|
2016-06-23 22:26:15 +08:00
|
|
|
evlist__for_each_entry(evlist, evsel) {
|
2019-07-21 19:24:29 +08:00
|
|
|
if ((evsel->core.attr.type == PERF_TYPE_TRACEPOINT) &&
|
2013-08-29 12:29:51 +08:00
|
|
|
(strcmp(evsel->name, name) == 0))
|
|
|
|
return evsel;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-06-17 20:16:20 +08:00
|
|
|
int evlist__add_newtp(struct evlist *evlist, const char *sys, const char *name, void *handler)
|
2012-10-03 22:40:22 +08:00
|
|
|
{
|
2020-05-07 00:27:04 +08:00
|
|
|
struct evsel *evsel = evsel__newtp(sys, name);
|
2012-10-03 22:40:22 +08:00
|
|
|
|
2015-09-07 16:38:06 +08:00
|
|
|
if (IS_ERR(evsel))
|
2012-10-03 22:40:22 +08:00
|
|
|
return -1;
|
|
|
|
|
2013-11-06 21:17:38 +08:00
|
|
|
evsel->handler = handler;
|
2019-07-21 19:23:59 +08:00
|
|
|
evlist__add(evlist, evsel);
|
2012-10-03 22:40:22 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-07-21 19:23:52 +08:00
|
|
|
static int perf_evlist__nr_threads(struct evlist *evlist,
|
2019-07-21 19:23:51 +08:00
|
|
|
struct evsel *evsel)
|
2014-07-31 14:00:51 +08:00
|
|
|
{
|
2019-08-06 17:35:19 +08:00
|
|
|
if (evsel->core.system_wide)
|
2014-07-31 14:00:51 +08:00
|
|
|
return 1;
|
|
|
|
else
|
2019-08-22 19:11:41 +08:00
|
|
|
return perf_thread_map__nr(evlist->core.threads);
|
2014-07-31 14:00:51 +08:00
|
|
|
}
|
|
|
|
|
2019-11-21 08:15:15 +08:00
|
|
|
void evlist__cpu_iter_start(struct evlist *evlist)
|
|
|
|
{
|
|
|
|
struct evsel *pos;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Reset the per evsel cpu_iter. This is needed because
|
|
|
|
* each evsel's cpumap may have a different index space,
|
|
|
|
* and some operations need the index to modify
|
|
|
|
* the FD xyarray (e.g. open, close)
|
|
|
|
*/
|
|
|
|
evlist__for_each_entry(evlist, pos)
|
|
|
|
pos->cpu_iter = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool evsel__cpu_iter_skip_no_inc(struct evsel *ev, int cpu)
|
|
|
|
{
|
|
|
|
if (ev->cpu_iter >= ev->core.cpus->nr)
|
|
|
|
return true;
|
|
|
|
if (cpu >= 0 && ev->core.cpus->map[ev->cpu_iter] != cpu)
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool evsel__cpu_iter_skip(struct evsel *ev, int cpu)
|
|
|
|
{
|
|
|
|
if (!evsel__cpu_iter_skip_no_inc(ev, cpu)) {
|
|
|
|
ev->cpu_iter++;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-07-21 19:24:09 +08:00
|
|
|
void evlist__disable(struct evlist *evlist)
|
2011-07-25 22:06:19 +08:00
|
|
|
{
|
2019-07-21 19:23:51 +08:00
|
|
|
struct evsel *pos;
|
2019-11-21 08:15:22 +08:00
|
|
|
struct affinity affinity;
|
2020-05-12 20:19:17 +08:00
|
|
|
int cpu, i, imm = 0;
|
|
|
|
bool has_imm = false;
|
2015-12-03 17:06:42 +08:00
|
|
|
|
2019-11-21 08:15:22 +08:00
|
|
|
if (affinity__setup(&affinity) < 0)
|
|
|
|
return;
|
|
|
|
|
2020-05-12 20:19:17 +08:00
|
|
|
/* Disable 'immediate' events last */
|
|
|
|
for (imm = 0; imm <= 1; imm++) {
|
|
|
|
evlist__for_each_cpu(evlist, i, cpu) {
|
|
|
|
affinity__set(&affinity, cpu);
|
|
|
|
|
|
|
|
evlist__for_each_entry(evlist, pos) {
|
|
|
|
if (evsel__cpu_iter_skip(pos, cpu))
|
|
|
|
continue;
|
|
|
|
if (pos->disabled || !evsel__is_group_leader(pos) || !pos->core.fd)
|
|
|
|
continue;
|
|
|
|
if (pos->immediate)
|
|
|
|
has_imm = true;
|
|
|
|
if (pos->immediate != imm)
|
|
|
|
continue;
|
|
|
|
evsel__disable_cpu(pos, pos->cpu_iter - 1);
|
|
|
|
}
|
2019-11-21 08:15:22 +08:00
|
|
|
}
|
2020-05-12 20:19:17 +08:00
|
|
|
if (!has_imm)
|
|
|
|
break;
|
2019-11-21 08:15:22 +08:00
|
|
|
}
|
2020-05-12 20:19:17 +08:00
|
|
|
|
2019-11-21 08:15:22 +08:00
|
|
|
affinity__cleanup(&affinity);
|
2016-06-23 22:26:15 +08:00
|
|
|
evlist__for_each_entry(evlist, pos) {
|
2020-04-30 21:51:16 +08:00
|
|
|
if (!evsel__is_group_leader(pos) || !pos->core.fd)
|
2015-12-03 17:06:42 +08:00
|
|
|
continue;
|
2019-11-21 08:15:22 +08:00
|
|
|
pos->disabled = true;
|
2011-07-25 22:06:19 +08:00
|
|
|
}
|
2015-06-18 03:40:26 +08:00
|
|
|
|
|
|
|
evlist->enabled = false;
|
2011-07-25 22:06:19 +08:00
|
|
|
}
|
|
|
|
|
2019-07-21 19:24:08 +08:00
|
|
|
void evlist__enable(struct evlist *evlist)
|
2011-08-26 00:17:55 +08:00
|
|
|
{
|
2019-07-21 19:23:51 +08:00
|
|
|
struct evsel *pos;
|
2019-11-21 08:15:22 +08:00
|
|
|
struct affinity affinity;
|
|
|
|
int cpu, i;
|
2015-12-03 17:06:42 +08:00
|
|
|
|
2019-11-21 08:15:22 +08:00
|
|
|
if (affinity__setup(&affinity) < 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
evlist__for_each_cpu(evlist, i, cpu) {
|
|
|
|
affinity__set(&affinity, cpu);
|
|
|
|
|
|
|
|
evlist__for_each_entry(evlist, pos) {
|
|
|
|
if (evsel__cpu_iter_skip(pos, cpu))
|
|
|
|
continue;
|
2020-04-30 21:51:16 +08:00
|
|
|
if (!evsel__is_group_leader(pos) || !pos->core.fd)
|
2019-11-21 08:15:22 +08:00
|
|
|
continue;
|
|
|
|
evsel__enable_cpu(pos, pos->cpu_iter - 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
affinity__cleanup(&affinity);
|
2016-06-23 22:26:15 +08:00
|
|
|
evlist__for_each_entry(evlist, pos) {
|
2020-04-30 21:51:16 +08:00
|
|
|
if (!evsel__is_group_leader(pos) || !pos->core.fd)
|
2015-12-03 17:06:42 +08:00
|
|
|
continue;
|
2019-11-21 08:15:22 +08:00
|
|
|
pos->disabled = false;
|
2011-08-26 00:17:55 +08:00
|
|
|
}
|
2015-06-18 03:40:26 +08:00
|
|
|
|
|
|
|
evlist->enabled = true;
|
|
|
|
}
|
|
|
|
|
2020-11-30 20:33:55 +08:00
|
|
|
void evlist__toggle_enable(struct evlist *evlist)
|
2015-06-18 03:40:26 +08:00
|
|
|
{
|
2019-07-21 19:24:09 +08:00
|
|
|
(evlist->enabled ? evlist__disable : evlist__enable)(evlist);
|
2011-08-26 00:17:55 +08:00
|
|
|
}
|
|
|
|
|
2020-12-01 01:47:05 +08:00
|
|
|
static int evlist__enable_event_cpu(struct evlist *evlist, struct evsel *evsel, int cpu)
|
2014-07-31 14:00:56 +08:00
|
|
|
{
|
2016-10-03 22:07:24 +08:00
|
|
|
int thread;
|
2014-07-31 14:00:56 +08:00
|
|
|
int nr_threads = perf_evlist__nr_threads(evlist, evsel);
|
|
|
|
|
2019-07-21 19:24:45 +08:00
|
|
|
if (!evsel->core.fd)
|
2014-07-31 14:00:56 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
for (thread = 0; thread < nr_threads; thread++) {
|
2016-10-03 22:07:24 +08:00
|
|
|
int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
|
2014-07-31 14:00:56 +08:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-12-01 01:47:05 +08:00
|
|
|
static int evlist__enable_event_thread(struct evlist *evlist, struct evsel *evsel, int thread)
|
2014-07-31 14:00:56 +08:00
|
|
|
{
|
2016-10-03 22:07:24 +08:00
|
|
|
int cpu;
|
2019-08-22 19:11:38 +08:00
|
|
|
int nr_cpus = perf_cpu_map__nr(evlist->core.cpus);
|
2014-07-31 14:00:56 +08:00
|
|
|
|
2019-07-21 19:24:45 +08:00
|
|
|
if (!evsel->core.fd)
|
2014-07-31 14:00:56 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
for (cpu = 0; cpu < nr_cpus; cpu++) {
|
2016-10-03 22:07:24 +08:00
|
|
|
int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
|
2014-07-31 14:00:56 +08:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-12-01 01:47:05 +08:00
|
|
|
int evlist__enable_event_idx(struct evlist *evlist, struct evsel *evsel, int idx)
|
2014-07-31 14:00:56 +08:00
|
|
|
{
|
2019-08-22 19:11:39 +08:00
|
|
|
bool per_cpu_mmaps = !perf_cpu_map__empty(evlist->core.cpus);
|
2014-07-31 14:00:56 +08:00
|
|
|
|
|
|
|
if (per_cpu_mmaps)
|
2020-12-01 01:47:05 +08:00
|
|
|
return evlist__enable_event_cpu(evlist, evsel, idx);
|
|
|
|
|
|
|
|
return evlist__enable_event_thread(evlist, evsel, idx);
|
2014-07-31 14:00:56 +08:00
|
|
|
}
|
|
|
|
|
2019-08-16 22:00:45 +08:00
|
|
|
int evlist__add_pollfd(struct evlist *evlist, int fd)
|
2011-01-13 03:03:24 +08:00
|
|
|
{
|
2020-07-17 14:59:45 +08:00
|
|
|
return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN, fdarray_flag__default);
|
2014-09-08 22:27:49 +08:00
|
|
|
}
|
|
|
|
|
2019-08-16 22:00:45 +08:00
|
|
|
int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask)
|
2014-08-13 10:04:11 +08:00
|
|
|
{
|
2019-10-07 20:53:34 +08:00
|
|
|
return perf_evlist__filter_pollfd(&evlist->core, revents_and_mask);
|
2014-08-13 10:04:11 +08:00
|
|
|
}
|
|
|
|
|
2019-09-01 04:48:33 +08:00
|
|
|
int evlist__poll(struct evlist *evlist, int timeout)
|
2014-08-19 04:25:59 +08:00
|
|
|
{
|
2019-09-01 04:48:33 +08:00
|
|
|
return perf_evlist__poll(&evlist->core, timeout);
|
2014-08-19 04:25:59 +08:00
|
|
|
}
|
|
|
|
|
2020-12-01 01:17:57 +08:00
|
|
|
struct perf_sample_id *evlist__id2sid(struct evlist *evlist, u64 id)
|
2011-01-13 08:39:13 +08:00
|
|
|
{
|
|
|
|
struct hlist_head *head;
|
|
|
|
struct perf_sample_id *sid;
|
|
|
|
int hash;
|
|
|
|
|
|
|
|
hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
|
2019-09-03 04:20:12 +08:00
|
|
|
head = &evlist->core.heads[hash];
|
2011-01-13 08:39:13 +08:00
|
|
|
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
|
|
|
hlist_for_each_entry(sid, head, node)
|
2011-01-13 08:39:13 +08:00
|
|
|
if (sid->id == id)
|
2012-10-11 20:10:35 +08:00
|
|
|
return sid;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-12-01 01:17:57 +08:00
|
|
|
struct evsel *evlist__id2evsel(struct evlist *evlist, u64 id)
|
2012-10-11 20:10:35 +08:00
|
|
|
{
|
|
|
|
struct perf_sample_id *sid;
|
|
|
|
|
2019-07-21 19:24:28 +08:00
|
|
|
if (evlist->core.nr_entries == 1 || !id)
|
2019-09-03 16:39:52 +08:00
|
|
|
return evlist__first(evlist);
|
2012-10-11 20:10:35 +08:00
|
|
|
|
2020-12-01 01:17:57 +08:00
|
|
|
sid = evlist__id2sid(evlist, id);
|
2012-10-11 20:10:35 +08:00
|
|
|
if (sid)
|
2019-09-03 16:34:29 +08:00
|
|
|
return container_of(sid->evsel, struct evsel, core);
|
2012-02-20 09:47:26 +08:00
|
|
|
|
2020-06-17 20:29:48 +08:00
|
|
|
if (!evlist__sample_id_all(evlist))
|
2019-09-03 16:39:52 +08:00
|
|
|
return evlist__first(evlist);
|
2012-02-20 09:47:26 +08:00
|
|
|
|
2011-01-13 08:39:13 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
2011-01-15 20:40:59 +08:00
|
|
|
|
2020-12-01 01:17:57 +08:00
|
|
|
struct evsel *evlist__id2evsel_strict(struct evlist *evlist, u64 id)
|
2015-09-25 21:15:52 +08:00
|
|
|
{
|
|
|
|
struct perf_sample_id *sid;
|
|
|
|
|
|
|
|
if (!id)
|
|
|
|
return NULL;
|
|
|
|
|
2020-12-01 01:17:57 +08:00
|
|
|
sid = evlist__id2sid(evlist, id);
|
2015-09-25 21:15:52 +08:00
|
|
|
if (sid)
|
2019-09-03 16:34:29 +08:00
|
|
|
return container_of(sid->evsel, struct evsel, core);
|
2015-09-25 21:15:52 +08:00
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-12-01 01:17:57 +08:00
|
|
|
static int evlist__event2id(struct evlist *evlist, union perf_event *event, u64 *id)
|
2013-08-27 16:23:09 +08:00
|
|
|
{
|
2019-08-26 02:17:52 +08:00
|
|
|
const __u64 *array = event->sample.array;
|
2013-08-27 16:23:09 +08:00
|
|
|
ssize_t n;
|
|
|
|
|
|
|
|
n = (event->header.size - sizeof(event->header)) >> 3;
|
|
|
|
|
|
|
|
if (event->header.type == PERF_RECORD_SAMPLE) {
|
|
|
|
if (evlist->id_pos >= n)
|
|
|
|
return -1;
|
|
|
|
*id = array[evlist->id_pos];
|
|
|
|
} else {
|
|
|
|
if (evlist->is_pos > n)
|
|
|
|
return -1;
|
|
|
|
n -= evlist->is_pos;
|
|
|
|
*id = array[n];
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-12-01 01:17:57 +08:00
|
|
|
struct evsel *evlist__event2evsel(struct evlist *evlist, union perf_event *event)
|
2013-08-27 16:23:09 +08:00
|
|
|
{
|
2019-09-03 16:39:52 +08:00
|
|
|
struct evsel *first = evlist__first(evlist);
|
2013-08-27 16:23:09 +08:00
|
|
|
struct hlist_head *head;
|
|
|
|
struct perf_sample_id *sid;
|
|
|
|
int hash;
|
|
|
|
u64 id;
|
|
|
|
|
2019-07-21 19:24:28 +08:00
|
|
|
if (evlist->core.nr_entries == 1)
|
2013-09-05 04:18:17 +08:00
|
|
|
return first;
|
|
|
|
|
2019-07-21 19:24:29 +08:00
|
|
|
if (!first->core.attr.sample_id_all &&
|
2013-09-05 04:18:17 +08:00
|
|
|
event->header.type != PERF_RECORD_SAMPLE)
|
|
|
|
return first;
|
2013-08-27 16:23:09 +08:00
|
|
|
|
2020-12-01 01:17:57 +08:00
|
|
|
if (evlist__event2id(evlist, event, &id))
|
2013-08-27 16:23:09 +08:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/* Synthesized events have an id of zero */
|
|
|
|
if (!id)
|
2013-09-05 04:18:17 +08:00
|
|
|
return first;
|
2013-08-27 16:23:09 +08:00
|
|
|
|
|
|
|
hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
|
2019-09-03 04:20:12 +08:00
|
|
|
head = &evlist->core.heads[hash];
|
2013-08-27 16:23:09 +08:00
|
|
|
|
|
|
|
hlist_for_each_entry(sid, head, node) {
|
|
|
|
if (sid->id == id)
|
2019-09-03 16:34:29 +08:00
|
|
|
return container_of(sid->evsel, struct evsel, core);
|
2013-08-27 16:23:09 +08:00
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-12-01 01:49:05 +08:00
|
|
|
static int evlist__set_paused(struct evlist *evlist, bool value)
|
2016-05-23 15:13:38 +08:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2017-12-05 00:51:07 +08:00
|
|
|
if (!evlist->overwrite_mmap)
|
2016-07-14 16:34:40 +08:00
|
|
|
return 0;
|
|
|
|
|
2019-07-30 19:04:59 +08:00
|
|
|
for (i = 0; i < evlist->core.nr_mmaps; i++) {
|
2019-07-28 04:31:17 +08:00
|
|
|
int fd = evlist->overwrite_mmap[i].core.fd;
|
2016-05-23 15:13:38 +08:00
|
|
|
int err;
|
|
|
|
|
|
|
|
if (fd < 0)
|
|
|
|
continue;
|
|
|
|
err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-12-01 01:49:05 +08:00
|
|
|
static int evlist__pause(struct evlist *evlist)
|
2016-05-23 15:13:38 +08:00
|
|
|
{
|
2020-12-01 01:49:05 +08:00
|
|
|
return evlist__set_paused(evlist, true);
|
2016-05-23 15:13:38 +08:00
|
|
|
}
|
|
|
|
|
2020-12-01 01:49:05 +08:00
|
|
|
static int evlist__resume(struct evlist *evlist)
|
2016-05-23 15:13:38 +08:00
|
|
|
{
|
2020-12-01 01:49:05 +08:00
|
|
|
return evlist__set_paused(evlist, false);
|
2016-05-23 15:13:38 +08:00
|
|
|
}
|
|
|
|
|
2019-08-16 22:19:55 +08:00
|
|
|
static void evlist__munmap_nofree(struct evlist *evlist)
|
2011-01-30 20:46:46 +08:00
|
|
|
{
|
perf evlist: Fix per thread mmap setup
The PERF_EVENT_IOC_SET_OUTPUT ioctl was returning -EINVAL when using
--pid when monitoring multithreaded apps, as we can only share a ring
buffer for events on the same thread if not doing per cpu.
Fix it by using per thread ring buffers.
Tested with:
[root@felicio ~]# tuna -t 26131 -CP | nl
1 thread ctxt_switches
2 pid SCHED_ rtpri affinity voluntary nonvoluntary cmd
3 26131 OTHER 0 0,1 10814276 2397830 chromium-browse
4 642 OTHER 0 0,1 14688 0 chromium-browse
5 26148 OTHER 0 0,1 713602 115479 chromium-browse
6 26149 OTHER 0 0,1 801958 2262 chromium-browse
7 26150 OTHER 0 0,1 1271128 248 chromium-browse
8 26151 OTHER 0 0,1 3 0 chromium-browse
9 27049 OTHER 0 0,1 36796 9 chromium-browse
10 618 OTHER 0 0,1 14711 0 chromium-browse
11 661 OTHER 0 0,1 14593 0 chromium-browse
12 29048 OTHER 0 0,1 28125 0 chromium-browse
13 26143 OTHER 0 0,1 2202789 781 chromium-browse
[root@felicio ~]#
So 11 threads under pid 26131, then:
[root@felicio ~]# perf record -F 50000 --pid 26131
[root@felicio ~]# grep perf_event /proc/`pidof perf`/maps | nl
1 7fa4a2538000-7fa4a25b9000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
2 7fa4a25b9000-7fa4a263a000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
3 7fa4a263a000-7fa4a26bb000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
4 7fa4a26bb000-7fa4a273c000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
5 7fa4a273c000-7fa4a27bd000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
6 7fa4a27bd000-7fa4a283e000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
7 7fa4a283e000-7fa4a28bf000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
8 7fa4a28bf000-7fa4a2940000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
9 7fa4a2940000-7fa4a29c1000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
10 7fa4a29c1000-7fa4a2a42000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
11 7fa4a2a42000-7fa4a2ac3000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
[root@felicio ~]#
11 mmaps, one per thread since we didn't specify any CPU list, so we need one
mmap per thread and:
[root@felicio ~]# perf record -F 50000 --pid 26131
^M
^C[ perf record: Woken up 79 times to write data ]
[ perf record: Captured and wrote 20.614 MB perf.data (~900639 samples) ]
[root@felicio ~]# perf report -D | grep PERF_RECORD_SAMPLE | cut -d/ -f2 | cut -d: -f1 | sort -n | uniq -c | sort -nr | nl
1 371310 26131
2 96516 26148
3 95694 26149
4 95203 26150
5 7291 26143
6 87 27049
7 76 661
8 60 29048
9 47 618
10 43 642
[root@felicio ~]#
Ok, one of the threads, 26151 was quiescent, so no samples there, but all the
others are there.
Then, if I specify one CPU:
[root@felicio ~]# perf record -F 50000 --pid 26131 --cpu 1
^C[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.680 MB perf.data (~29730 samples) ]
[root@felicio ~]# perf report -D | grep PERF_RECORD_SAMPLE | cut -d/ -f2 | cut -d: -f1 | sort -n | uniq -c | sort -nr | nl
1 8444 26131
2 2584 26149
3 2518 26148
4 2324 26150
5 123 26143
6 9 661
7 9 29048
[root@felicio ~]#
This machine has two cores, so fewer threads appeared on the radar, and:
[root@felicio ~]# grep perf_event /proc/`pidof perf`/maps | nl
1 7f484b922000-7f484b9a3000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
[root@felicio ~]#
Just one mmap, as now we can use just one per-cpu buffer instead of the
per-thread needed in the previous case.
For global profiling:
[root@felicio ~]# perf record -F 50000 -a
^C[ perf record: Woken up 26 times to write data ]
[ perf record: Captured and wrote 7.128 MB perf.data (~311412 samples) ]
[root@felicio ~]# grep perf_event /proc/`pidof perf`/maps | nl
1 7fb49b435000-7fb49b4b6000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
2 7fb49b4b6000-7fb49b537000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
[root@felicio ~]#
It uses per-cpu buffers.
For just one thread:
[root@felicio ~]# perf record -F 50000 --tid 26148
^C[ perf record: Woken up 2 times to write data ]
[ perf record: Captured and wrote 0.330 MB perf.data (~14426 samples) ]
[root@felicio ~]# perf report -D | grep PERF_RECORD_SAMPLE | cut -d/ -f2 | cut -d: -f1 | sort -n | uniq -c | sort -nr | nl
1 9969 26148
[root@felicio ~]#
[root@felicio ~]# grep perf_event /proc/`pidof perf`/maps | nl
1 7f286a51b000-7f286a59c000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
[root@felicio ~]#
Tested-by: David Ahern <dsahern@gmail.com>
Tested-by: Lin Ming <ming.m.lin@intel.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Tom Zanussi <tzanussi@gmail.com>
Link: http://lkml.kernel.org/r/20110426204401.GB1746@ghostprotocols.net
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2011-05-15 20:39:00 +08:00
|
|
|
int i;
|
2011-01-30 20:46:46 +08:00
|
|
|
|
2016-07-14 16:34:39 +08:00
|
|
|
if (evlist->mmap)
|
2019-07-30 19:04:59 +08:00
|
|
|
for (i = 0; i < evlist->core.nr_mmaps; i++)
|
2019-10-07 20:53:15 +08:00
|
|
|
perf_mmap__munmap(&evlist->mmap[i].core);
|
2014-01-04 04:25:49 +08:00
|
|
|
|
2017-12-05 00:51:07 +08:00
|
|
|
if (evlist->overwrite_mmap)
|
2019-07-30 19:04:59 +08:00
|
|
|
for (i = 0; i < evlist->core.nr_mmaps; i++)
|
2019-10-07 20:53:15 +08:00
|
|
|
perf_mmap__munmap(&evlist->overwrite_mmap[i].core);
|
2016-07-14 16:34:38 +08:00
|
|
|
}
|
perf evlist: Fix per thread mmap setup
The PERF_EVENT_IOC_SET_OUTPUT ioctl was returning -EINVAL when using
--pid when monitoring multithreaded apps, as we can only share a ring
buffer for events on the same thread if not doing per cpu.
Fix it by using per thread ring buffers.
Tested with:
[root@felicio ~]# tuna -t 26131 -CP | nl
1 thread ctxt_switches
2 pid SCHED_ rtpri affinity voluntary nonvoluntary cmd
3 26131 OTHER 0 0,1 10814276 2397830 chromium-browse
4 642 OTHER 0 0,1 14688 0 chromium-browse
5 26148 OTHER 0 0,1 713602 115479 chromium-browse
6 26149 OTHER 0 0,1 801958 2262 chromium-browse
7 26150 OTHER 0 0,1 1271128 248 chromium-browse
8 26151 OTHER 0 0,1 3 0 chromium-browse
9 27049 OTHER 0 0,1 36796 9 chromium-browse
10 618 OTHER 0 0,1 14711 0 chromium-browse
11 661 OTHER 0 0,1 14593 0 chromium-browse
12 29048 OTHER 0 0,1 28125 0 chromium-browse
13 26143 OTHER 0 0,1 2202789 781 chromium-browse
[root@felicio ~]#
So 11 threads under pid 26131, then:
[root@felicio ~]# perf record -F 50000 --pid 26131
[root@felicio ~]# grep perf_event /proc/`pidof perf`/maps | nl
1 7fa4a2538000-7fa4a25b9000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
2 7fa4a25b9000-7fa4a263a000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
3 7fa4a263a000-7fa4a26bb000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
4 7fa4a26bb000-7fa4a273c000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
5 7fa4a273c000-7fa4a27bd000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
6 7fa4a27bd000-7fa4a283e000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
7 7fa4a283e000-7fa4a28bf000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
8 7fa4a28bf000-7fa4a2940000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
9 7fa4a2940000-7fa4a29c1000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
10 7fa4a29c1000-7fa4a2a42000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
11 7fa4a2a42000-7fa4a2ac3000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
[root@felicio ~]#
11 mmaps, one per thread since we didn't specify any CPU list, so we need one
mmap per thread and:
[root@felicio ~]# perf record -F 50000 --pid 26131
^M
^C[ perf record: Woken up 79 times to write data ]
[ perf record: Captured and wrote 20.614 MB perf.data (~900639 samples) ]
[root@felicio ~]# perf report -D | grep PERF_RECORD_SAMPLE | cut -d/ -f2 | cut -d: -f1 | sort -n | uniq -c | sort -nr | nl
1 371310 26131
2 96516 26148
3 95694 26149
4 95203 26150
5 7291 26143
6 87 27049
7 76 661
8 60 29048
9 47 618
10 43 642
[root@felicio ~]#
Ok, one of the threads, 26151 was quiescent, so no samples there, but all the
others are there.
Then, if I specify one CPU:
[root@felicio ~]# perf record -F 50000 --pid 26131 --cpu 1
^C[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.680 MB perf.data (~29730 samples) ]
[root@felicio ~]# perf report -D | grep PERF_RECORD_SAMPLE | cut -d/ -f2 | cut -d: -f1 | sort -n | uniq -c | sort -nr | nl
1 8444 26131
2 2584 26149
3 2518 26148
4 2324 26150
5 123 26143
6 9 661
7 9 29048
[root@felicio ~]#
This machine has two cores, so fewer threads appeared on the radar, and:
[root@felicio ~]# grep perf_event /proc/`pidof perf`/maps | nl
1 7f484b922000-7f484b9a3000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
[root@felicio ~]#
Just one mmap, as now we can use just one per-cpu buffer instead of the
per-thread needed in the previous case.
For global profiling:
[root@felicio ~]# perf record -F 50000 -a
^C[ perf record: Woken up 26 times to write data ]
[ perf record: Captured and wrote 7.128 MB perf.data (~311412 samples) ]
[root@felicio ~]# grep perf_event /proc/`pidof perf`/maps | nl
1 7fb49b435000-7fb49b4b6000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
2 7fb49b4b6000-7fb49b537000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
[root@felicio ~]#
It uses per-cpu buffers.
For just one thread:
[root@felicio ~]# perf record -F 50000 --tid 26148
^C[ perf record: Woken up 2 times to write data ]
[ perf record: Captured and wrote 0.330 MB perf.data (~14426 samples) ]
[root@felicio ~]# perf report -D | grep PERF_RECORD_SAMPLE | cut -d/ -f2 | cut -d: -f1 | sort -n | uniq -c | sort -nr | nl
1 9969 26148
[root@felicio ~]#
[root@felicio ~]# grep perf_event /proc/`pidof perf`/maps | nl
1 7f286a51b000-7f286a59c000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
[root@felicio ~]#
Tested-by: David Ahern <dsahern@gmail.com>
Tested-by: Lin Ming <ming.m.lin@intel.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Tom Zanussi <tzanussi@gmail.com>
Link: http://lkml.kernel.org/r/20110426204401.GB1746@ghostprotocols.net
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2011-05-15 20:39:00 +08:00
|
|
|
|
2019-08-16 22:19:55 +08:00
|
|
|
void evlist__munmap(struct evlist *evlist)
|
2016-07-14 16:34:38 +08:00
|
|
|
{
|
2019-08-16 22:19:55 +08:00
|
|
|
evlist__munmap_nofree(evlist);
|
2013-12-27 04:41:15 +08:00
|
|
|
zfree(&evlist->mmap);
|
2017-12-05 00:51:07 +08:00
|
|
|
zfree(&evlist->overwrite_mmap);
|
2011-01-30 20:46:46 +08:00
|
|
|
}
|
|
|
|
|
2019-10-07 20:53:15 +08:00
|
|
|
static void perf_mmap__unmap_cb(struct perf_mmap *map)
|
|
|
|
{
|
|
|
|
struct mmap *m = container_of(map, struct mmap, core);
|
|
|
|
|
|
|
|
mmap__munmap(m);
|
|
|
|
}
|
|
|
|
|
2019-08-16 22:21:46 +08:00
|
|
|
static struct mmap *evlist__alloc_mmap(struct evlist *evlist,
|
|
|
|
bool overwrite)
|
2011-01-30 20:46:46 +08:00
|
|
|
{
|
2016-05-21 00:38:24 +08:00
|
|
|
int i;
|
2019-07-28 02:30:53 +08:00
|
|
|
struct mmap *map;
|
2016-05-21 00:38:24 +08:00
|
|
|
|
2019-07-30 19:04:59 +08:00
|
|
|
map = zalloc(evlist->core.nr_mmaps * sizeof(struct mmap));
|
2016-07-14 16:34:35 +08:00
|
|
|
if (!map)
|
|
|
|
return NULL;
|
2016-05-31 21:06:15 +08:00
|
|
|
|
2019-07-30 19:04:59 +08:00
|
|
|
for (i = 0; i < evlist->core.nr_mmaps; i++) {
|
2019-10-17 18:59:09 +08:00
|
|
|
struct perf_mmap *prev = i ? &map[i - 1].core : NULL;
|
|
|
|
|
2017-02-24 00:24:34 +08:00
|
|
|
/*
|
|
|
|
* When the perf_mmap() call is made we grab one refcount, plus
|
2018-03-02 07:09:11 +08:00
|
|
|
* one extra to let perf_mmap__consume() get the last
|
2017-02-24 00:24:34 +08:00
|
|
|
* events after all real references (perf_mmap__get()) are
|
|
|
|
* dropped.
|
|
|
|
*
|
|
|
|
* Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
|
|
|
|
* thus does perf_mmap__get() on it.
|
|
|
|
*/
|
2019-10-17 18:59:09 +08:00
|
|
|
perf_mmap__init(&map[i].core, prev, overwrite, perf_mmap__unmap_cb);
|
2017-02-24 00:24:34 +08:00
|
|
|
}
|
2019-10-07 20:53:09 +08:00
|
|
|
|
2016-07-14 16:34:35 +08:00
|
|
|
return map;
|
2011-01-30 20:46:46 +08:00
|
|
|
}
|
|
|
|
|
2019-10-07 20:53:26 +08:00
|
|
|
static void
|
|
|
|
perf_evlist__mmap_cb_idx(struct perf_evlist *_evlist,
|
|
|
|
struct perf_mmap_param *_mp,
|
|
|
|
int idx, bool per_cpu)
|
|
|
|
{
|
|
|
|
struct evlist *evlist = container_of(_evlist, struct evlist, core);
|
|
|
|
struct mmap_params *mp = container_of(_mp, struct mmap_params, core);
|
|
|
|
|
|
|
|
auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, idx, per_cpu);
|
|
|
|
}
|
|
|
|
|
2019-10-07 20:53:27 +08:00
|
|
|
static struct perf_mmap*
|
|
|
|
perf_evlist__mmap_cb_get(struct perf_evlist *_evlist, bool overwrite, int idx)
|
|
|
|
{
|
|
|
|
struct evlist *evlist = container_of(_evlist, struct evlist, core);
|
2019-10-17 18:59:10 +08:00
|
|
|
struct mmap *maps;
|
2019-10-07 20:53:27 +08:00
|
|
|
|
2019-10-17 18:59:10 +08:00
|
|
|
maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
|
2019-10-07 20:53:27 +08:00
|
|
|
|
2019-10-17 18:59:10 +08:00
|
|
|
if (!maps) {
|
|
|
|
maps = evlist__alloc_mmap(evlist, overwrite);
|
|
|
|
if (!maps)
|
|
|
|
return NULL;
|
2019-10-07 20:53:27 +08:00
|
|
|
|
2019-10-17 18:59:10 +08:00
|
|
|
if (overwrite) {
|
2019-10-07 20:53:27 +08:00
|
|
|
evlist->overwrite_mmap = maps;
|
|
|
|
if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY)
|
2020-11-30 20:33:55 +08:00
|
|
|
evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
|
2019-10-17 18:59:10 +08:00
|
|
|
} else {
|
|
|
|
evlist->mmap = maps;
|
2019-10-07 20:53:27 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return &maps[idx].core;
|
|
|
|
}
|
|
|
|
|
2019-10-07 20:53:28 +08:00
|
|
|
static int
|
|
|
|
perf_evlist__mmap_cb_mmap(struct perf_mmap *_map, struct perf_mmap_param *_mp,
|
|
|
|
int output, int cpu)
|
|
|
|
{
|
|
|
|
struct mmap *map = container_of(_map, struct mmap, core);
|
|
|
|
struct mmap_params *mp = container_of(_mp, struct mmap_params, core);
|
|
|
|
|
|
|
|
return mmap__mmap(map, mp, output, cpu);
|
|
|
|
}
|
|
|
|
|
2016-04-16 04:46:31 +08:00
|
|
|
unsigned long perf_event_mlock_kb_in_pages(void)
|
2013-09-01 18:36:12 +08:00
|
|
|
{
|
2016-04-16 04:46:31 +08:00
|
|
|
unsigned long pages;
|
|
|
|
int max;
|
2014-12-13 04:46:45 +08:00
|
|
|
|
2016-04-16 04:46:31 +08:00
|
|
|
if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) {
|
|
|
|
/*
|
|
|
|
* Pick a once upon a time good value, i.e. things look
|
|
|
|
* strange since we can't read a sysctl value, but lets not
|
|
|
|
* die yet...
|
|
|
|
*/
|
|
|
|
max = 512;
|
|
|
|
} else {
|
|
|
|
max -= (page_size / 1024);
|
|
|
|
}
|
2014-12-13 04:46:45 +08:00
|
|
|
|
2016-04-16 04:46:31 +08:00
|
|
|
pages = (max * 1024) / page_size;
|
|
|
|
if (!is_power_of_2(pages))
|
|
|
|
pages = rounddown_pow_of_two(pages);
|
|
|
|
|
|
|
|
return pages;
|
|
|
|
}
|
|
|
|
|
2019-07-28 18:45:35 +08:00
|
|
|
size_t evlist__mmap_size(unsigned long pages)
|
2016-04-16 04:46:31 +08:00
|
|
|
{
|
|
|
|
if (pages == UINT_MAX)
|
|
|
|
pages = perf_event_mlock_kb_in_pages();
|
|
|
|
else if (!is_power_of_2(pages))
|
2013-09-01 18:36:12 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
return (pages + 1) * page_size;
|
|
|
|
}
|
|
|
|
|
2013-11-12 22:46:55 +08:00
|
|
|
static long parse_pages_arg(const char *str, unsigned long min,
|
|
|
|
unsigned long max)
|
2013-09-01 18:36:12 +08:00
|
|
|
{
|
2013-10-22 15:34:18 +08:00
|
|
|
unsigned long pages, val;
|
2013-09-01 18:36:13 +08:00
|
|
|
static struct parse_tag tags[] = {
|
|
|
|
{ .tag = 'B', .mult = 1 },
|
|
|
|
{ .tag = 'K', .mult = 1 << 10 },
|
|
|
|
{ .tag = 'M', .mult = 1 << 20 },
|
|
|
|
{ .tag = 'G', .mult = 1 << 30 },
|
|
|
|
{ .tag = 0 },
|
|
|
|
};
|
2013-09-01 18:36:12 +08:00
|
|
|
|
2013-11-12 22:46:53 +08:00
|
|
|
if (str == NULL)
|
2013-11-12 22:46:55 +08:00
|
|
|
return -EINVAL;
|
2013-11-12 22:46:53 +08:00
|
|
|
|
2013-09-01 18:36:13 +08:00
|
|
|
val = parse_tag_value(str, tags);
|
2013-10-22 15:34:18 +08:00
|
|
|
if (val != (unsigned long) -1) {
|
2013-09-01 18:36:13 +08:00
|
|
|
/* we got file size value */
|
|
|
|
pages = PERF_ALIGN(val, page_size) / page_size;
|
|
|
|
} else {
|
|
|
|
/* we got pages count value */
|
|
|
|
char *eptr;
|
|
|
|
pages = strtoul(str, &eptr, 10);
|
2013-11-12 22:46:55 +08:00
|
|
|
if (*eptr != '\0')
|
|
|
|
return -EINVAL;
|
2013-09-01 18:36:12 +08:00
|
|
|
}
|
|
|
|
|
2013-12-09 21:18:37 +08:00
|
|
|
if (pages == 0 && min == 0) {
|
2013-11-12 22:46:55 +08:00
|
|
|
/* leave number of pages at 0 */
|
2013-12-09 21:18:39 +08:00
|
|
|
} else if (!is_power_of_2(pages)) {
|
2017-01-09 17:51:55 +08:00
|
|
|
char buf[100];
|
|
|
|
|
2013-11-12 22:46:55 +08:00
|
|
|
/* round pages up to next power of 2 */
|
2014-12-17 00:24:41 +08:00
|
|
|
pages = roundup_pow_of_two(pages);
|
2013-12-09 21:18:39 +08:00
|
|
|
if (!pages)
|
|
|
|
return -EINVAL;
|
2017-01-09 17:51:55 +08:00
|
|
|
|
|
|
|
unit_number__scnprintf(buf, sizeof(buf), pages * page_size);
|
|
|
|
pr_info("rounding mmap pages size to %s (%lu pages)\n",
|
|
|
|
buf, pages);
|
2013-10-22 15:34:18 +08:00
|
|
|
}
|
|
|
|
|
2013-11-12 22:46:55 +08:00
|
|
|
if (pages > max)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return pages;
|
|
|
|
}
|
|
|
|
|
2015-04-09 23:53:46 +08:00
|
|
|
int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str)
|
2013-11-12 22:46:55 +08:00
|
|
|
{
|
|
|
|
unsigned long max = UINT_MAX;
|
|
|
|
long pages;
|
|
|
|
|
2013-12-09 21:18:38 +08:00
|
|
|
if (max > SIZE_MAX / page_size)
|
2013-11-12 22:46:55 +08:00
|
|
|
max = SIZE_MAX / page_size;
|
|
|
|
|
|
|
|
pages = parse_pages_arg(str, 1, max);
|
|
|
|
if (pages < 0) {
|
|
|
|
pr_err("Invalid argument for --mmap_pages/-m\n");
|
2013-09-01 18:36:12 +08:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
*mmap_pages = pages;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-04-09 23:53:46 +08:00
|
|
|
int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
|
|
|
|
int unset __maybe_unused)
|
|
|
|
{
|
|
|
|
return __perf_evlist__parse_mmap_pages(opt->value, str);
|
|
|
|
}
|
|
|
|
|
2013-10-18 20:29:12 +08:00
|
|
|
/**
|
2019-07-28 18:45:35 +08:00
|
|
|
* evlist__mmap_ex - Create mmaps to receive events.
|
2013-10-18 20:29:12 +08:00
|
|
|
* @evlist: list of events
|
|
|
|
* @pages: map length in pages
|
|
|
|
* @overwrite: overwrite older events?
|
2015-04-09 23:53:42 +08:00
|
|
|
* @auxtrace_pages - auxtrace map length in pages
|
|
|
|
* @auxtrace_overwrite - overwrite older auxtrace data?
|
2011-01-30 20:46:46 +08:00
|
|
|
*
|
2013-10-18 20:29:12 +08:00
|
|
|
* If @overwrite is %false the user needs to signal event consumption using
|
2019-07-28 18:45:35 +08:00
|
|
|
* perf_mmap__write_tail(). Using evlist__mmap_read() does this
|
2013-10-18 20:29:12 +08:00
|
|
|
* automatically.
|
2011-01-30 21:59:43 +08:00
|
|
|
*
|
2015-04-09 23:53:42 +08:00
|
|
|
* Similarly, if @auxtrace_overwrite is %false the user needs to signal data
|
|
|
|
* consumption using auxtrace_mmap__write_tail().
|
|
|
|
*
|
2013-10-18 20:29:12 +08:00
|
|
|
* Return: %0 on success, negative error code otherwise.
|
2011-01-30 20:46:46 +08:00
|
|
|
*/
|
2019-07-28 18:45:35 +08:00
|
|
|
int evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
|
2017-12-03 10:00:38 +08:00
|
|
|
unsigned int auxtrace_pages,
|
2019-03-19 01:42:19 +08:00
|
|
|
bool auxtrace_overwrite, int nr_cblocks, int affinity, int flush,
|
|
|
|
int comp_level)
|
2011-01-30 20:46:46 +08:00
|
|
|
{
|
perf mmap: Fix perf backward recording
'perf record' backward recording doesn't work as we expected: it never
overwrites when ring buffer gets full.
Test:
Run a busy python printing task background like this:
while True:
print 123
send SIGUSR2 to perf to capture snapshot, then:
# ./perf record --overwrite -e raw_syscalls:sys_enter -e raw_syscalls:sys_exit --exclude-perf -a --switch-output
[ perf record: dump data: Woken up 1 times ]
[ perf record: Dump perf.data.2017110101520743 ]
[ perf record: dump data: Woken up 1 times ]
[ perf record: Dump perf.data.2017110101521251 ]
[ perf record: dump data: Woken up 1 times ]
[ perf record: Dump perf.data.2017110101521692 ]
^C[ perf record: Woken up 1 times to write data ]
[ perf record: Dump perf.data.2017110101521936 ]
[ perf record: Captured and wrote 0.826 MB perf.data.<timestamp> ]
# ./perf script -i ./perf.data.2017110101520743 | head -n3
perf 2717 [000] 12449.310785: raw_syscalls:sys_enter: NR 16 (5, 2400, 0, 59, 100, 0)
perf 2717 [000] 12449.310790: raw_syscalls:sys_enter: NR 7 (4112340, 2, ffffffff, 3df, 100, 0)
python 2545 [000] 12449.310800: raw_syscalls:sys_exit: NR 1 = 4
# ./perf script -i ./perf.data.2017110101521251 | head -n3
perf 2717 [000] 12449.310785: raw_syscalls:sys_enter: NR 16 (5, 2400, 0, 59, 100, 0)
perf 2717 [000] 12449.310790: raw_syscalls:sys_enter: NR 7 (4112340, 2, ffffffff, 3df, 100, 0)
python 2545 [000] 12449.310800: raw_syscalls:sys_exit: NR 1 = 4
# ./perf script -i ./perf.data.2017110101521692 | head -n3
perf 2717 [000] 12449.310785: raw_syscalls:sys_enter: NR 16 (5, 2400, 0, 59, 100, 0)
perf 2717 [000] 12449.310790: raw_syscalls:sys_enter: NR 7 (4112340, 2, ffffffff, 3df, 100, 0)
python 2545 [000] 12449.310800: raw_syscalls:sys_exit: NR 1 = 4
Timestamps never change, but my background task is a dead loop, can
easily overwhelm the ring buffer.
This patch fixes it by forcing unsetting PROT_WRITE for a backward ring
buffer, so all backward ring buffers become overwrite ring buffers.
Test result:
# ./perf record --overwrite -e raw_syscalls:sys_enter -e raw_syscalls:sys_exit --exclude-perf -a --switch-output
[ perf record: dump data: Woken up 1 times ]
[ perf record: Dump perf.data.2017110101285323 ]
[ perf record: dump data: Woken up 1 times ]
[ perf record: Dump perf.data.2017110101290053 ]
[ perf record: dump data: Woken up 1 times ]
[ perf record: Dump perf.data.2017110101290446 ]
^C[ perf record: Woken up 1 times to write data ]
[ perf record: Dump perf.data.2017110101290837 ]
[ perf record: Captured and wrote 0.826 MB perf.data.<timestamp> ]
# ./perf script -i ./perf.data.2017110101285323 | head -n3
python 2545 [000] 11064.268083: raw_syscalls:sys_exit: NR 1 = 4
python 2545 [000] 11064.268084: raw_syscalls:sys_enter: NR 1 (1, 12cc330, 4, 7fc237280370, 7fc2373d0700, 2c7b0)
python 2545 [000] 11064.268086: raw_syscalls:sys_exit: NR 1 = 4
# ./perf script -i ./perf.data.2017110101290 | head -n3
failed to open ./perf.data.2017110101290: No such file or directory
# ./perf script -i ./perf.data.2017110101290053 | head -n3
python 2545 [000] 11071.564062: raw_syscalls:sys_enter: NR 1 (1, 12cc330, 4, 7fc237280370, 7fc2373d0700, 2c7b0)
python 2545 [000] 11071.564064: raw_syscalls:sys_exit: NR 1 = 4
python 2545 [000] 11071.564066: raw_syscalls:sys_enter: NR 1 (1, 12cc330, 4, 7fc237280370, 7fc2373d0700, 2c7b0)
# ./perf script -i ./perf.data.2017110101290 | head -n3
perf.data.2017110101290053 perf.data.2017110101290446 perf.data.2017110101290837
# ./perf script -i ./perf.data.2017110101290446 | head -n3
sshd 1321 [000] 11075.499473: raw_syscalls:sys_exit: NR 14 = 0
sshd 1321 [000] 11075.499474: raw_syscalls:sys_enter: NR 14 (2, 7ffe98899490, 0, 8, 0, 3000)
sshd 1321 [000] 11075.499474: raw_syscalls:sys_exit: NR 14 = 0
# ./perf script -i ./perf.data.2017110101290837 | head -n3
python 2545 [000] 11079.280844: raw_syscalls:sys_exit: NR 1 = 4
python 2545 [000] 11079.280847: raw_syscalls:sys_enter: NR 1 (1, 12cc330, 4, 7fc237280370, 7fc2373d0700, 2c7b0)
python 2545 [000] 11079.280850: raw_syscalls:sys_exit: NR 1 = 4
Signed-off-by: Wang Nan <wangnan0@huawei.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Kan Liang <kan.liang@intel.com>
Cc: Mengting Zhang <zhangmengting@huawei.com>
Link: http://lkml.kernel.org/r/20171204165107.95327-2-wangnan0@huawei.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-12-05 00:51:05 +08:00
|
|
|
/*
|
|
|
|
* Delay setting mp.prot: set it before calling perf_mmap__mmap.
|
|
|
|
* Its value is decided by evsel's write_backward.
|
|
|
|
* So &mp should not be passed through const pointer.
|
|
|
|
*/
|
2019-10-07 20:53:10 +08:00
|
|
|
struct mmap_params mp = {
|
|
|
|
.nr_cblocks = nr_cblocks,
|
|
|
|
.affinity = affinity,
|
|
|
|
.flush = flush,
|
|
|
|
.comp_level = comp_level
|
|
|
|
};
|
2019-10-07 20:53:29 +08:00
|
|
|
struct perf_evlist_mmap_ops ops = {
|
2019-10-07 20:53:28 +08:00
|
|
|
.idx = perf_evlist__mmap_cb_idx,
|
|
|
|
.get = perf_evlist__mmap_cb_get,
|
|
|
|
.mmap = perf_evlist__mmap_cb_mmap,
|
2019-10-07 20:53:26 +08:00
|
|
|
};
|
2011-11-09 19:10:47 +08:00
|
|
|
|
2019-08-06 21:14:05 +08:00
|
|
|
evlist->core.mmap_len = evlist__mmap_size(pages);
|
|
|
|
pr_debug("mmap size %zuB\n", evlist->core.mmap_len);
|
2011-01-30 20:46:46 +08:00
|
|
|
|
2019-08-06 21:14:05 +08:00
|
|
|
auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->core.mmap_len,
|
2015-04-09 23:53:42 +08:00
|
|
|
auxtrace_pages, auxtrace_overwrite);
|
|
|
|
|
2019-10-07 20:53:29 +08:00
|
|
|
return perf_evlist__mmap_ops(&evlist->core, &ops, &mp.core);
|
2011-01-30 20:46:46 +08:00
|
|
|
}
|
2011-01-30 21:59:43 +08:00
|
|
|
|
2019-07-28 18:45:35 +08:00
|
|
|
int evlist__mmap(struct evlist *evlist, unsigned int pages)
|
2015-04-09 23:53:42 +08:00
|
|
|
{
|
2019-07-28 18:45:35 +08:00
|
|
|
return evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS, 1, 0);
|
2015-04-09 23:53:42 +08:00
|
|
|
}
|
|
|
|
|
2020-12-01 01:56:52 +08:00
|
|
|
int evlist__create_maps(struct evlist *evlist, struct target *target)
|
2011-01-30 21:59:43 +08:00
|
|
|
{
|
2018-02-13 04:32:36 +08:00
|
|
|
bool all_threads = (target->per_thread && target->system_wide);
|
2019-07-21 19:23:49 +08:00
|
|
|
struct perf_cpu_map *cpus;
|
2019-07-21 19:23:50 +08:00
|
|
|
struct perf_thread_map *threads;
|
2011-01-30 21:59:43 +08:00
|
|
|
|
2018-02-13 04:32:36 +08:00
|
|
|
/*
|
|
|
|
* If specify '-a' and '--per-thread' to perf record, perf record
|
|
|
|
* will override '--per-thread'. target->per_thread = false and
|
|
|
|
* target->system_wide = true.
|
|
|
|
*
|
|
|
|
* If specify '--per-thread' only to perf record,
|
|
|
|
* target->per_thread = true and target->system_wide = false.
|
|
|
|
*
|
|
|
|
* So target->per_thread && target->system_wide is false.
|
|
|
|
* For perf record, thread_map__new_str doesn't call
|
|
|
|
* thread_map__new_all_cpus. That will keep perf record's
|
|
|
|
* current behavior.
|
|
|
|
*
|
|
|
|
* For perf stat, it allows the case that target->per_thread and
|
|
|
|
* target->system_wide are all true. It means to collect system-wide
|
|
|
|
* per-thread data. thread_map__new_str will call
|
|
|
|
* thread_map__new_all_cpus to enumerate all threads.
|
|
|
|
*/
|
2017-12-05 22:03:09 +08:00
|
|
|
threads = thread_map__new_str(target->pid, target->tid, target->uid,
|
2018-02-13 04:32:36 +08:00
|
|
|
all_threads);
|
2011-01-30 21:59:43 +08:00
|
|
|
|
2015-09-08 15:58:57 +08:00
|
|
|
if (!threads)
|
2011-01-30 21:59:43 +08:00
|
|
|
return -1;
|
|
|
|
|
2013-12-05 06:56:40 +08:00
|
|
|
if (target__uses_dummy_map(target))
|
2019-07-21 19:24:16 +08:00
|
|
|
cpus = perf_cpu_map__dummy_new();
|
2012-05-16 17:45:48 +08:00
|
|
|
else
|
2019-07-21 19:24:30 +08:00
|
|
|
cpus = perf_cpu_map__new(target->cpu_list);
|
2011-01-30 21:59:43 +08:00
|
|
|
|
2015-09-08 15:58:57 +08:00
|
|
|
if (!cpus)
|
2011-01-30 21:59:43 +08:00
|
|
|
goto out_delete_threads;
|
|
|
|
|
2019-07-21 19:24:40 +08:00
|
|
|
evlist->core.has_user_cpus = !!target->cpu_list;
|
2015-09-08 15:58:52 +08:00
|
|
|
|
2019-07-21 19:24:43 +08:00
|
|
|
perf_evlist__set_maps(&evlist->core, cpus, threads);
|
2015-09-08 15:58:51 +08:00
|
|
|
|
2020-09-15 11:18:11 +08:00
|
|
|
/* as evlist now has references, put count here */
|
|
|
|
perf_cpu_map__put(cpus);
|
|
|
|
perf_thread_map__put(threads);
|
|
|
|
|
2015-09-08 15:58:51 +08:00
|
|
|
return 0;
|
2011-01-30 21:59:43 +08:00
|
|
|
|
|
|
|
out_delete_threads:
|
2019-07-21 19:24:20 +08:00
|
|
|
perf_thread_map__put(threads);
|
2011-01-30 21:59:43 +08:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-11-30 20:38:02 +08:00
|
|
|
int evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel)
|
2011-02-26 11:51:54 +08:00
|
|
|
{
|
2019-07-21 19:23:51 +08:00
|
|
|
struct evsel *evsel;
|
2012-09-27 02:07:39 +08:00
|
|
|
int err = 0;
|
2011-02-26 11:51:54 +08:00
|
|
|
|
2016-06-23 22:26:15 +08:00
|
|
|
evlist__for_each_entry(evlist, evsel) {
|
2012-09-27 02:07:39 +08:00
|
|
|
if (evsel->filter == NULL)
|
2011-02-26 11:51:54 +08:00
|
|
|
continue;
|
2012-09-27 02:07:39 +08:00
|
|
|
|
2015-08-21 14:23:14 +08:00
|
|
|
/*
|
|
|
|
* filters only work for tracepoint event, which doesn't have cpu limit.
|
|
|
|
* So evlist and evsel should always be same.
|
|
|
|
*/
|
2019-07-21 19:24:52 +08:00
|
|
|
err = perf_evsel__apply_filter(&evsel->core, evsel->filter);
|
2015-03-25 06:23:47 +08:00
|
|
|
if (err) {
|
|
|
|
*err_evsel = evsel;
|
2012-09-27 02:07:39 +08:00
|
|
|
break;
|
2015-03-25 06:23:47 +08:00
|
|
|
}
|
2011-02-26 11:51:54 +08:00
|
|
|
}
|
|
|
|
|
2012-09-27 02:07:39 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-11-30 20:38:02 +08:00
|
|
|
int evlist__set_tp_filter(struct evlist *evlist, const char *filter)
|
2012-09-27 02:07:39 +08:00
|
|
|
{
|
2019-07-21 19:23:51 +08:00
|
|
|
struct evsel *evsel;
|
2012-09-27 02:07:39 +08:00
|
|
|
int err = 0;
|
|
|
|
|
2019-10-08 03:43:03 +08:00
|
|
|
if (filter == NULL)
|
|
|
|
return -1;
|
|
|
|
|
2016-06-23 22:26:15 +08:00
|
|
|
evlist__for_each_entry(evlist, evsel) {
|
2019-07-21 19:24:29 +08:00
|
|
|
if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
|
2016-02-26 17:31:53 +08:00
|
|
|
continue;
|
|
|
|
|
2020-04-30 03:19:05 +08:00
|
|
|
err = evsel__set_filter(evsel, filter);
|
2012-09-27 02:07:39 +08:00
|
|
|
if (err)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
2011-02-26 11:51:54 +08:00
|
|
|
}
|
2011-05-21 23:49:00 +08:00
|
|
|
|
2020-11-30 20:38:02 +08:00
|
|
|
int evlist__append_tp_filter(struct evlist *evlist, const char *filter)
|
2019-10-08 03:52:17 +08:00
|
|
|
{
|
|
|
|
struct evsel *evsel;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
if (filter == NULL)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
evlist__for_each_entry(evlist, evsel) {
|
|
|
|
if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
|
|
|
|
continue;
|
|
|
|
|
2020-04-30 03:19:05 +08:00
|
|
|
err = evsel__append_tp_filter(evsel, filter);
|
2019-10-08 03:52:17 +08:00
|
|
|
if (err)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-10-15 07:10:50 +08:00
|
|
|
char *asprintf__tp_filter_pids(size_t npids, pid_t *pids)
|
2015-02-22 02:09:55 +08:00
|
|
|
{
|
|
|
|
char *filter;
|
2015-02-22 03:33:47 +08:00
|
|
|
size_t i;
|
2015-02-22 02:09:55 +08:00
|
|
|
|
2015-02-22 03:33:47 +08:00
|
|
|
for (i = 0; i < npids; ++i) {
|
|
|
|
if (i == 0) {
|
|
|
|
if (asprintf(&filter, "common_pid != %d", pids[i]) < 0)
|
2019-10-08 03:43:03 +08:00
|
|
|
return NULL;
|
2015-02-22 03:33:47 +08:00
|
|
|
} else {
|
|
|
|
char *tmp;
|
|
|
|
|
|
|
|
if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0)
|
|
|
|
goto out_free;
|
|
|
|
|
|
|
|
free(filter);
|
|
|
|
filter = tmp;
|
|
|
|
}
|
|
|
|
}
|
2015-02-22 02:09:55 +08:00
|
|
|
|
2019-10-08 03:43:03 +08:00
|
|
|
return filter;
|
2015-02-22 03:33:47 +08:00
|
|
|
out_free:
|
2019-10-08 03:43:03 +08:00
|
|
|
free(filter);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-11-30 20:38:02 +08:00
|
|
|
int evlist__set_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids)
|
2019-10-08 03:43:03 +08:00
|
|
|
{
|
|
|
|
char *filter = asprintf__tp_filter_pids(npids, pids);
|
2020-11-30 20:38:02 +08:00
|
|
|
int ret = evlist__set_tp_filter(evlist, filter);
|
2019-10-08 03:43:03 +08:00
|
|
|
|
2015-02-22 02:09:55 +08:00
|
|
|
free(filter);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-11-30 20:38:02 +08:00
|
|
|
int evlist__set_tp_filter_pid(struct evlist *evlist, pid_t pid)
|
2015-02-22 03:33:47 +08:00
|
|
|
{
|
2020-11-30 20:38:02 +08:00
|
|
|
return evlist__set_tp_filter_pids(evlist, 1, &pid);
|
2015-02-22 03:33:47 +08:00
|
|
|
}
|
|
|
|
|
2020-11-30 20:38:02 +08:00
|
|
|
int evlist__append_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids)
|
2019-10-08 04:00:34 +08:00
|
|
|
{
|
|
|
|
char *filter = asprintf__tp_filter_pids(npids, pids);
|
2020-11-30 20:38:02 +08:00
|
|
|
int ret = evlist__append_tp_filter(evlist, filter);
|
2019-10-08 04:00:34 +08:00
|
|
|
|
|
|
|
free(filter);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-11-30 20:38:02 +08:00
|
|
|
int evlist__append_tp_filter_pid(struct evlist *evlist, pid_t pid)
|
2019-10-08 04:00:34 +08:00
|
|
|
{
|
2020-11-30 20:38:02 +08:00
|
|
|
return evlist__append_tp_filter_pids(evlist, 1, &pid);
|
2019-10-08 04:00:34 +08:00
|
|
|
}
|
|
|
|
|
2020-06-17 20:24:21 +08:00
|
|
|
bool evlist__valid_sample_type(struct evlist *evlist)
|
2011-05-21 23:49:00 +08:00
|
|
|
{
|
2019-07-21 19:23:51 +08:00
|
|
|
struct evsel *pos;
|
2011-06-02 22:04:54 +08:00
|
|
|
|
2019-07-21 19:24:28 +08:00
|
|
|
if (evlist->core.nr_entries == 1)
|
2013-08-27 16:23:09 +08:00
|
|
|
return true;
|
|
|
|
|
|
|
|
if (evlist->id_pos < 0 || evlist->is_pos < 0)
|
|
|
|
return false;
|
|
|
|
|
2016-06-23 22:26:15 +08:00
|
|
|
evlist__for_each_entry(evlist, pos) {
|
2013-08-27 16:23:09 +08:00
|
|
|
if (pos->id_pos != evlist->id_pos ||
|
|
|
|
pos->is_pos != evlist->is_pos)
|
2011-06-02 22:04:54 +08:00
|
|
|
return false;
|
2011-05-21 23:49:00 +08:00
|
|
|
}
|
|
|
|
|
2011-06-02 22:04:54 +08:00
|
|
|
return true;
|
2011-05-21 23:49:00 +08:00
|
|
|
}
|
|
|
|
|
2020-06-17 20:24:21 +08:00
|
|
|
u64 __evlist__combined_sample_type(struct evlist *evlist)
|
2011-06-02 22:04:54 +08:00
|
|
|
{
|
2019-07-21 19:23:51 +08:00
|
|
|
struct evsel *evsel;
|
2013-08-27 16:23:09 +08:00
|
|
|
|
|
|
|
if (evlist->combined_sample_type)
|
|
|
|
return evlist->combined_sample_type;
|
|
|
|
|
2016-06-23 22:26:15 +08:00
|
|
|
evlist__for_each_entry(evlist, evsel)
|
2019-07-21 19:24:29 +08:00
|
|
|
evlist->combined_sample_type |= evsel->core.attr.sample_type;
|
2013-08-27 16:23:09 +08:00
|
|
|
|
|
|
|
return evlist->combined_sample_type;
|
|
|
|
}
|
|
|
|
|
2020-06-17 20:24:21 +08:00
|
|
|
u64 evlist__combined_sample_type(struct evlist *evlist)
|
2013-08-27 16:23:09 +08:00
|
|
|
{
|
|
|
|
evlist->combined_sample_type = 0;
|
2020-06-17 20:24:21 +08:00
|
|
|
return __evlist__combined_sample_type(evlist);
|
2011-06-02 22:04:54 +08:00
|
|
|
}
|
|
|
|
|
2020-06-17 20:31:25 +08:00
|
|
|
u64 evlist__combined_branch_type(struct evlist *evlist)
|
2015-07-18 23:24:47 +08:00
|
|
|
{
|
2019-07-21 19:23:51 +08:00
|
|
|
struct evsel *evsel;
|
2015-07-18 23:24:47 +08:00
|
|
|
u64 branch_type = 0;
|
|
|
|
|
2016-06-23 22:26:15 +08:00
|
|
|
evlist__for_each_entry(evlist, evsel)
|
2019-07-21 19:24:29 +08:00
|
|
|
branch_type |= evsel->core.attr.branch_sample_type;
|
2015-07-18 23:24:47 +08:00
|
|
|
return branch_type;
|
|
|
|
}
|
|
|
|
|
2019-07-21 19:23:52 +08:00
|
|
|
bool perf_evlist__valid_read_format(struct evlist *evlist)
|
2012-10-10 23:38:13 +08:00
|
|
|
{
|
2019-09-03 16:39:52 +08:00
|
|
|
struct evsel *first = evlist__first(evlist), *pos = first;
|
2019-07-21 19:24:29 +08:00
|
|
|
u64 read_format = first->core.attr.read_format;
|
|
|
|
u64 sample_type = first->core.attr.sample_type;
|
2012-10-10 23:38:13 +08:00
|
|
|
|
2016-06-23 22:26:15 +08:00
|
|
|
evlist__for_each_entry(evlist, pos) {
|
2020-04-01 18:16:12 +08:00
|
|
|
if (read_format != pos->core.attr.read_format) {
|
|
|
|
pr_debug("Read format differs %#" PRIx64 " vs %#" PRIx64 "\n",
|
|
|
|
read_format, (u64)pos->core.attr.read_format);
|
|
|
|
}
|
2012-10-10 23:38:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
|
|
|
|
if ((sample_type & PERF_SAMPLE_READ) &&
|
|
|
|
!(read_format & PERF_FORMAT_ID)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-07-21 19:23:52 +08:00
|
|
|
u16 perf_evlist__id_hdr_size(struct evlist *evlist)
|
2011-11-12 08:28:50 +08:00
|
|
|
{
|
2019-09-03 16:39:52 +08:00
|
|
|
struct evsel *first = evlist__first(evlist);
|
2011-11-12 08:28:50 +08:00
|
|
|
struct perf_sample *data;
|
|
|
|
u64 sample_type;
|
|
|
|
u16 size = 0;
|
|
|
|
|
2019-07-21 19:24:29 +08:00
|
|
|
if (!first->core.attr.sample_id_all)
|
2011-11-12 08:28:50 +08:00
|
|
|
goto out;
|
|
|
|
|
2019-07-21 19:24:29 +08:00
|
|
|
sample_type = first->core.attr.sample_type;
|
2011-11-12 08:28:50 +08:00
|
|
|
|
|
|
|
if (sample_type & PERF_SAMPLE_TID)
|
|
|
|
size += sizeof(data->tid) * 2;
|
|
|
|
|
|
|
|
if (sample_type & PERF_SAMPLE_TIME)
|
|
|
|
size += sizeof(data->time);
|
|
|
|
|
|
|
|
if (sample_type & PERF_SAMPLE_ID)
|
|
|
|
size += sizeof(data->id);
|
|
|
|
|
|
|
|
if (sample_type & PERF_SAMPLE_STREAM_ID)
|
|
|
|
size += sizeof(data->stream_id);
|
|
|
|
|
|
|
|
if (sample_type & PERF_SAMPLE_CPU)
|
|
|
|
size += sizeof(data->cpu) * 2;
|
2013-08-27 16:23:09 +08:00
|
|
|
|
|
|
|
if (sample_type & PERF_SAMPLE_IDENTIFIER)
|
|
|
|
size += sizeof(data->id);
|
2011-11-12 08:28:50 +08:00
|
|
|
out:
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
2020-06-17 20:29:48 +08:00
|
|
|
bool evlist__valid_sample_id_all(struct evlist *evlist)
|
2011-05-21 23:49:00 +08:00
|
|
|
{
|
2019-09-03 16:39:52 +08:00
|
|
|
struct evsel *first = evlist__first(evlist), *pos = first;
|
2011-06-02 22:04:54 +08:00
|
|
|
|
2016-06-23 22:26:15 +08:00
|
|
|
evlist__for_each_entry_continue(evlist, pos) {
|
2019-07-21 19:24:29 +08:00
|
|
|
if (first->core.attr.sample_id_all != pos->core.attr.sample_id_all)
|
2011-06-02 22:04:54 +08:00
|
|
|
return false;
|
2011-05-21 23:49:00 +08:00
|
|
|
}
|
|
|
|
|
2011-06-02 22:04:54 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-06-17 20:29:48 +08:00
|
|
|
bool evlist__sample_id_all(struct evlist *evlist)
|
2011-06-02 22:04:54 +08:00
|
|
|
{
|
2019-09-03 16:39:52 +08:00
|
|
|
struct evsel *first = evlist__first(evlist);
|
2019-07-21 19:24:29 +08:00
|
|
|
return first->core.attr.sample_id_all;
|
2011-05-21 23:49:00 +08:00
|
|
|
}
|
2011-10-06 06:11:32 +08:00
|
|
|
|
2019-07-21 19:23:52 +08:00
|
|
|
void perf_evlist__set_selected(struct evlist *evlist,
|
2019-07-21 19:23:51 +08:00
|
|
|
struct evsel *evsel)
|
2011-10-06 06:11:32 +08:00
|
|
|
{
|
|
|
|
evlist->selected = evsel;
|
|
|
|
}
|
2011-10-25 20:42:19 +08:00
|
|
|
|
2019-07-21 19:24:07 +08:00
|
|
|
void evlist__close(struct evlist *evlist)
|
2013-03-15 13:48:48 +08:00
|
|
|
{
|
2019-07-21 19:23:51 +08:00
|
|
|
struct evsel *evsel;
|
2019-11-21 08:15:17 +08:00
|
|
|
struct affinity affinity;
|
|
|
|
int cpu, i;
|
2013-03-15 13:48:48 +08:00
|
|
|
|
2019-11-21 08:15:17 +08:00
|
|
|
/*
|
|
|
|
* With perf record core.cpus is usually NULL.
|
|
|
|
* Use the old method to handle this for now.
|
|
|
|
*/
|
|
|
|
if (!evlist->core.cpus) {
|
|
|
|
evlist__for_each_entry_reverse(evlist, evsel)
|
|
|
|
evsel__close(evsel);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (affinity__setup(&affinity) < 0)
|
|
|
|
return;
|
|
|
|
evlist__for_each_cpu(evlist, i, cpu) {
|
|
|
|
affinity__set(&affinity, cpu);
|
|
|
|
|
|
|
|
evlist__for_each_entry_reverse(evlist, evsel) {
|
|
|
|
if (evsel__cpu_iter_skip(evsel, cpu))
|
|
|
|
continue;
|
|
|
|
perf_evsel__close_cpu(&evsel->core, evsel->cpu_iter - 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
affinity__cleanup(&affinity);
|
|
|
|
evlist__for_each_entry_reverse(evlist, evsel) {
|
|
|
|
perf_evsel__free_fd(&evsel->core);
|
|
|
|
perf_evsel__free_id(&evsel->core);
|
|
|
|
}
|
2013-03-15 13:48:48 +08:00
|
|
|
}
|
|
|
|
|
2020-12-01 01:56:52 +08:00
|
|
|
static int evlist__create_syswide_maps(struct evlist *evlist)
|
2014-10-11 02:55:15 +08:00
|
|
|
{
|
2019-07-21 19:23:49 +08:00
|
|
|
struct perf_cpu_map *cpus;
|
2019-07-21 19:23:50 +08:00
|
|
|
struct perf_thread_map *threads;
|
2014-10-11 02:55:15 +08:00
|
|
|
int err = -ENOMEM;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Try reading /sys/devices/system/cpu/online to get
|
|
|
|
* an all cpus map.
|
|
|
|
*
|
|
|
|
* FIXME: -ENOMEM is the best we can do here, the cpu_map
|
|
|
|
* code needs an overhaul to properly forward the
|
|
|
|
* error, and we may not want to do that fallback to a
|
|
|
|
* default cpu identity map :-\
|
|
|
|
*/
|
2019-07-21 19:24:30 +08:00
|
|
|
cpus = perf_cpu_map__new(NULL);
|
2015-09-08 15:59:00 +08:00
|
|
|
if (!cpus)
|
2014-10-11 02:55:15 +08:00
|
|
|
goto out;
|
|
|
|
|
2019-07-21 19:24:19 +08:00
|
|
|
threads = perf_thread_map__new_dummy();
|
2015-09-08 15:59:00 +08:00
|
|
|
if (!threads)
|
|
|
|
goto out_put;
|
2014-10-11 02:55:15 +08:00
|
|
|
|
2019-07-21 19:24:43 +08:00
|
|
|
perf_evlist__set_maps(&evlist->core, cpus, threads);
|
2020-09-15 11:18:11 +08:00
|
|
|
|
|
|
|
perf_thread_map__put(threads);
|
2015-09-08 15:59:00 +08:00
|
|
|
out_put:
|
2019-07-21 19:24:17 +08:00
|
|
|
perf_cpu_map__put(cpus);
|
2020-09-15 11:18:11 +08:00
|
|
|
out:
|
|
|
|
return err;
|
2014-10-11 02:55:15 +08:00
|
|
|
}
|
|
|
|
|
2019-07-21 19:24:06 +08:00
|
|
|
int evlist__open(struct evlist *evlist)
|
2011-10-25 20:42:19 +08:00
|
|
|
{
|
2019-07-21 19:23:51 +08:00
|
|
|
struct evsel *evsel;
|
2013-03-15 13:48:48 +08:00
|
|
|
int err;
|
2011-10-25 20:42:19 +08:00
|
|
|
|
2014-10-11 02:55:15 +08:00
|
|
|
/*
|
|
|
|
* Default: one fd per CPU, all threads, aka systemwide
|
|
|
|
* as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
|
|
|
|
*/
|
2019-07-21 19:24:42 +08:00
|
|
|
if (evlist->core.threads == NULL && evlist->core.cpus == NULL) {
|
2020-12-01 01:56:52 +08:00
|
|
|
err = evlist__create_syswide_maps(evlist);
|
2014-10-11 02:55:15 +08:00
|
|
|
if (err < 0)
|
|
|
|
goto out_err;
|
|
|
|
}
|
|
|
|
|
2020-12-01 01:44:40 +08:00
|
|
|
evlist__update_id_pos(evlist);
|
2013-09-07 03:40:11 +08:00
|
|
|
|
2016-06-23 22:26:15 +08:00
|
|
|
evlist__for_each_entry(evlist, evsel) {
|
2019-07-21 19:24:39 +08:00
|
|
|
err = evsel__open(evsel, evsel->core.cpus, evsel->core.threads);
|
2011-10-25 20:42:19 +08:00
|
|
|
if (err < 0)
|
|
|
|
goto out_err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
out_err:
|
2019-07-21 19:24:07 +08:00
|
|
|
evlist__close(evlist);
|
2012-02-23 11:13:36 +08:00
|
|
|
errno = -err;
|
2011-10-25 20:42:19 +08:00
|
|
|
return err;
|
|
|
|
}
|
2011-11-09 18:47:15 +08:00
|
|
|
|
2020-11-30 20:26:54 +08:00
|
|
|
int evlist__prepare_workload(struct evlist *evlist, struct target *target, const char *argv[],
|
|
|
|
bool pipe_output, void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
|
2011-11-09 18:47:15 +08:00
|
|
|
{
|
|
|
|
int child_ready_pipe[2], go_pipe[2];
|
|
|
|
char bf;
|
|
|
|
|
|
|
|
if (pipe(child_ready_pipe) < 0) {
|
|
|
|
perror("failed to create 'ready' pipe");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pipe(go_pipe) < 0) {
|
|
|
|
perror("failed to create 'go' pipe");
|
|
|
|
goto out_close_ready_pipe;
|
|
|
|
}
|
|
|
|
|
|
|
|
evlist->workload.pid = fork();
|
|
|
|
if (evlist->workload.pid < 0) {
|
|
|
|
perror("failed to fork");
|
|
|
|
goto out_close_pipes;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!evlist->workload.pid) {
|
2014-07-28 23:39:50 +08:00
|
|
|
int ret;
|
|
|
|
|
2013-03-11 15:43:16 +08:00
|
|
|
if (pipe_output)
|
2011-11-09 18:47:15 +08:00
|
|
|
dup2(2, 1);
|
|
|
|
|
2013-05-26 07:50:39 +08:00
|
|
|
signal(SIGTERM, SIG_DFL);
|
|
|
|
|
2011-11-09 18:47:15 +08:00
|
|
|
close(child_ready_pipe[0]);
|
|
|
|
close(go_pipe[1]);
|
|
|
|
fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Tell the parent we're ready to go
|
|
|
|
*/
|
|
|
|
close(child_ready_pipe[1]);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Wait until the parent tells us to go.
|
|
|
|
*/
|
2014-07-28 23:39:50 +08:00
|
|
|
ret = read(go_pipe[0], &bf, 1);
|
|
|
|
/*
|
|
|
|
* The parent will ask for the execvp() to be performed by
|
|
|
|
* writing exactly one byte, in workload.cork_fd, usually via
|
2020-11-30 20:26:54 +08:00
|
|
|
* evlist__start_workload().
|
2014-07-28 23:39:50 +08:00
|
|
|
*
|
2015-02-04 00:29:05 +08:00
|
|
|
* For cancelling the workload without actually running it,
|
2014-07-28 23:39:50 +08:00
|
|
|
* the parent will just close workload.cork_fd, without writing
|
|
|
|
* anything, i.e. read will return zero and we just exit()
|
|
|
|
* here.
|
|
|
|
*/
|
|
|
|
if (ret != 1) {
|
|
|
|
if (ret == -1)
|
|
|
|
perror("unable to read pipe");
|
|
|
|
exit(ret);
|
|
|
|
}
|
2011-11-09 18:47:15 +08:00
|
|
|
|
|
|
|
execvp(argv[0], (char **)argv);
|
|
|
|
|
2014-01-04 01:56:49 +08:00
|
|
|
if (exec_error) {
|
2014-01-03 02:11:25 +08:00
|
|
|
union sigval val;
|
|
|
|
|
|
|
|
val.sival_int = errno;
|
|
|
|
if (sigqueue(getppid(), SIGUSR1, val))
|
|
|
|
perror(argv[0]);
|
|
|
|
} else
|
|
|
|
perror(argv[0]);
|
2011-11-09 18:47:15 +08:00
|
|
|
exit(-1);
|
|
|
|
}
|
|
|
|
|
2014-01-04 01:56:49 +08:00
|
|
|
if (exec_error) {
|
|
|
|
struct sigaction act = {
|
|
|
|
.sa_flags = SA_SIGINFO,
|
|
|
|
.sa_sigaction = exec_error,
|
|
|
|
};
|
|
|
|
sigaction(SIGUSR1, &act, NULL);
|
|
|
|
}
|
|
|
|
|
2014-10-11 01:29:49 +08:00
|
|
|
if (target__none(target)) {
|
2019-07-21 19:24:42 +08:00
|
|
|
if (evlist->core.threads == NULL) {
|
2014-10-11 01:29:49 +08:00
|
|
|
fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n",
|
|
|
|
__func__, __LINE__);
|
|
|
|
goto out_close_pipes;
|
|
|
|
}
|
2019-07-21 19:24:42 +08:00
|
|
|
perf_thread_map__set_pid(evlist->core.threads, 0, evlist->workload.pid);
|
2014-10-11 01:29:49 +08:00
|
|
|
}
|
2011-11-09 18:47:15 +08:00
|
|
|
|
|
|
|
close(child_ready_pipe[1]);
|
|
|
|
close(go_pipe[0]);
|
|
|
|
/*
|
|
|
|
* wait for child to settle
|
|
|
|
*/
|
|
|
|
if (read(child_ready_pipe[0], &bf, 1) == -1) {
|
|
|
|
perror("unable to read pipe");
|
|
|
|
goto out_close_pipes;
|
|
|
|
}
|
|
|
|
|
2013-06-26 15:14:15 +08:00
|
|
|
fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
|
2011-11-09 18:47:15 +08:00
|
|
|
evlist->workload.cork_fd = go_pipe[1];
|
|
|
|
close(child_ready_pipe[0]);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_close_pipes:
|
|
|
|
close(go_pipe[0]);
|
|
|
|
close(go_pipe[1]);
|
|
|
|
out_close_ready_pipe:
|
|
|
|
close(child_ready_pipe[0]);
|
|
|
|
close(child_ready_pipe[1]);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-11-30 20:26:54 +08:00
|
|
|
int evlist__start_workload(struct evlist *evlist)
|
2011-11-09 18:47:15 +08:00
|
|
|
{
|
|
|
|
if (evlist->workload.cork_fd > 0) {
|
2013-07-03 03:27:21 +08:00
|
|
|
char bf = 0;
|
2013-06-26 15:14:15 +08:00
|
|
|
int ret;
|
2011-11-09 18:47:15 +08:00
|
|
|
/*
|
|
|
|
* Remove the cork, let it rip!
|
|
|
|
*/
|
2013-06-26 15:14:15 +08:00
|
|
|
ret = write(evlist->workload.cork_fd, &bf, 1);
|
|
|
|
if (ret < 0)
|
2017-01-10 21:41:00 +08:00
|
|
|
perror("unable to write to pipe");
|
2013-06-26 15:14:15 +08:00
|
|
|
|
|
|
|
close(evlist->workload.cork_fd);
|
|
|
|
return ret;
|
2011-11-09 18:47:15 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2012-08-02 22:42:57 +08:00
|
|
|
|
2020-11-30 20:43:07 +08:00
|
|
|
int evlist__parse_sample(struct evlist *evlist, union perf_event *event, struct perf_sample *sample)
|
2012-08-02 22:42:57 +08:00
|
|
|
{
|
2020-12-01 01:17:57 +08:00
|
|
|
struct evsel *evsel = evlist__event2evsel(evlist, event);
|
2013-08-27 16:23:09 +08:00
|
|
|
|
|
|
|
if (!evsel)
|
|
|
|
return -EFAULT;
|
2020-04-30 22:03:49 +08:00
|
|
|
return evsel__parse_sample(evsel, event, sample);
|
2012-08-02 22:42:57 +08:00
|
|
|
}
|
2012-09-07 01:54:11 +08:00
|
|
|
|
2020-11-30 20:43:07 +08:00
|
|
|
int evlist__parse_sample_timestamp(struct evlist *evlist, union perf_event *event, u64 *timestamp)
|
2017-08-03 19:10:28 +08:00
|
|
|
{
|
2020-12-01 01:17:57 +08:00
|
|
|
struct evsel *evsel = evlist__event2evsel(evlist, event);
|
2017-08-03 19:10:28 +08:00
|
|
|
|
|
|
|
if (!evsel)
|
|
|
|
return -EFAULT;
|
2020-04-30 22:03:49 +08:00
|
|
|
return evsel__parse_sample_timestamp(evsel, event, timestamp);
|
2017-08-03 19:10:28 +08:00
|
|
|
}
|
|
|
|
|
2020-06-17 20:19:46 +08:00
|
|
|
int evlist__strerror_open(struct evlist *evlist, int err, char *buf, size_t size)
|
perf trace: Improve messages related to /proc/sys/kernel/perf_event_paranoid
kernel/events/core.c has:
/*
* perf event paranoia level:
* -1 - not paranoid at all
* 0 - disallow raw tracepoint access for unpriv
* 1 - disallow cpu events for unpriv
* 2 - disallow kernel profiling for unpriv
*/
int sysctl_perf_event_paranoid __read_mostly = 1;
So, with the default being 1, a non-root user can trace his stuff:
[acme@zoo ~]$ cat /proc/sys/kernel/perf_event_paranoid
1
[acme@zoo ~]$ yes > /dev/null &
[1] 15338
[acme@zoo ~]$ trace -p 15338 | head -5
0.005 ( 0.005 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.045 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.085 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.125 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.165 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
[acme@zoo ~]$
[acme@zoo ~]$ trace --duration 1 sleep 1
1002.148 (1001.218 ms): nanosleep(rqtp: 0x7fff46c79250 ) = 0
[acme@zoo ~]$
[acme@zoo ~]$ trace -- usleep 1 | tail -5
0.905 ( 0.002 ms): brk( ) = 0x1c82000
0.910 ( 0.003 ms): brk(brk: 0x1ca3000 ) = 0x1ca3000
0.913 ( 0.001 ms): brk( ) = 0x1ca3000
0.990 ( 0.059 ms): nanosleep(rqtp: 0x7fffe31a3280 ) = 0
0.995 ( 0.000 ms): exit_group(
[acme@zoo ~]$
But can't do system wide tracing:
[acme@zoo ~]$ trace
Error: Operation not permitted.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 1.
[acme@zoo ~]$
[acme@zoo ~]$ trace --cpu 0
Error: Operation not permitted.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 1.
[acme@zoo ~]$
If the paranoid level is >= 2, i.e. turn this perf stuff off for !root users:
[acme@zoo ~]$ sudo sh -c 'echo 2 > /proc/sys/kernel/perf_event_paranoid'
[acme@zoo ~]$ cat /proc/sys/kernel/perf_event_paranoid
2
[acme@zoo ~]$
[acme@zoo ~]$ trace usleep 1
Error: Permission denied.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For your workloads it needs to be <= 1
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 2.
[acme@zoo ~]$
[acme@zoo ~]$ trace
Error: Permission denied.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For your workloads it needs to be <= 1
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 2.
[acme@zoo ~]$
[acme@zoo ~]$ trace --cpu 1
Error: Permission denied.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For your workloads it needs to be <= 1
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 2.
[acme@zoo ~]$
If the user manages to get what he/she wants, convincing root not
to be paranoid at all...
[root@zoo ~]# echo -1 > /proc/sys/kernel/perf_event_paranoid
[root@zoo ~]# cat /proc/sys/kernel/perf_event_paranoid
-1
[root@zoo ~]#
[acme@zoo ~]$ ps -eo user,pid,comm | grep Xorg
root 729 Xorg
[acme@zoo ~]$
[acme@zoo ~]$ trace -a --duration 0.001 -e \!select,ioctl,writev | grep Xorg | head -5
23.143 ( 0.003 ms): Xorg/729 setitimer(which: REAL, value: 0x7fffaadf16e0 ) = 0
23.152 ( 0.004 ms): Xorg/729 read(fd: 31, buf: 0x2544af0, count: 4096 ) = 8
23.161 ( 0.002 ms): Xorg/729 read(fd: 31, buf: 0x2544af0, count: 4096 ) = -1 EAGAIN Resource temporarily unavailable
23.175 ( 0.002 ms): Xorg/729 setitimer(which: REAL, value: 0x7fffaadf16e0 ) = 0
23.235 ( 0.002 ms): Xorg/729 setitimer(which: REAL, value: 0x7fffaadf16e0 ) = 0
[acme@zoo ~]$
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-di28olfwd28rvkox7v3hqhu1@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-10-18 04:38:29 +08:00
|
|
|
{
|
|
|
|
int printed, value;
|
tools: Introduce str_error_r()
The tools so far have been using the strerror_r() GNU variant, that
returns a string, be it the buffer passed or something else.
But that, besides being tricky in cases where we expect that the
function using strerror_r() returns the error formatted in a provided
buffer (we have to check if it returned something else and copy that
instead), breaks the build on systems not using glibc, like Alpine
Linux, where musl libc is used.
So, introduce yet another wrapper, str_error_r(), that has the GNU
interface, but uses the portable XSI variant of strerror_r(), so that
users rest asured that the provided buffer is used and it is what is
returned.
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Wang Nan <wangnan0@huawei.com>
Link: http://lkml.kernel.org/n/tip-d4t42fnf48ytlk8rjxs822tf@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-07-06 22:56:20 +08:00
|
|
|
char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
|
perf trace: Improve messages related to /proc/sys/kernel/perf_event_paranoid
kernel/events/core.c has:
/*
* perf event paranoia level:
* -1 - not paranoid at all
* 0 - disallow raw tracepoint access for unpriv
* 1 - disallow cpu events for unpriv
* 2 - disallow kernel profiling for unpriv
*/
int sysctl_perf_event_paranoid __read_mostly = 1;
So, with the default being 1, a non-root user can trace his stuff:
[acme@zoo ~]$ cat /proc/sys/kernel/perf_event_paranoid
1
[acme@zoo ~]$ yes > /dev/null &
[1] 15338
[acme@zoo ~]$ trace -p 15338 | head -5
0.005 ( 0.005 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.045 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.085 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.125 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.165 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
[acme@zoo ~]$
[acme@zoo ~]$ trace --duration 1 sleep 1
1002.148 (1001.218 ms): nanosleep(rqtp: 0x7fff46c79250 ) = 0
[acme@zoo ~]$
[acme@zoo ~]$ trace -- usleep 1 | tail -5
0.905 ( 0.002 ms): brk( ) = 0x1c82000
0.910 ( 0.003 ms): brk(brk: 0x1ca3000 ) = 0x1ca3000
0.913 ( 0.001 ms): brk( ) = 0x1ca3000
0.990 ( 0.059 ms): nanosleep(rqtp: 0x7fffe31a3280 ) = 0
0.995 ( 0.000 ms): exit_group(
[acme@zoo ~]$
But can't do system wide tracing:
[acme@zoo ~]$ trace
Error: Operation not permitted.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 1.
[acme@zoo ~]$
[acme@zoo ~]$ trace --cpu 0
Error: Operation not permitted.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 1.
[acme@zoo ~]$
If the paranoid level is >= 2, i.e. turn this perf stuff off for !root users:
[acme@zoo ~]$ sudo sh -c 'echo 2 > /proc/sys/kernel/perf_event_paranoid'
[acme@zoo ~]$ cat /proc/sys/kernel/perf_event_paranoid
2
[acme@zoo ~]$
[acme@zoo ~]$ trace usleep 1
Error: Permission denied.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For your workloads it needs to be <= 1
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 2.
[acme@zoo ~]$
[acme@zoo ~]$ trace
Error: Permission denied.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For your workloads it needs to be <= 1
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 2.
[acme@zoo ~]$
[acme@zoo ~]$ trace --cpu 1
Error: Permission denied.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For your workloads it needs to be <= 1
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 2.
[acme@zoo ~]$
If the user manages to get what he/she wants, convincing root not
to be paranoid at all...
[root@zoo ~]# echo -1 > /proc/sys/kernel/perf_event_paranoid
[root@zoo ~]# cat /proc/sys/kernel/perf_event_paranoid
-1
[root@zoo ~]#
[acme@zoo ~]$ ps -eo user,pid,comm | grep Xorg
root 729 Xorg
[acme@zoo ~]$
[acme@zoo ~]$ trace -a --duration 0.001 -e \!select,ioctl,writev | grep Xorg | head -5
23.143 ( 0.003 ms): Xorg/729 setitimer(which: REAL, value: 0x7fffaadf16e0 ) = 0
23.152 ( 0.004 ms): Xorg/729 read(fd: 31, buf: 0x2544af0, count: 4096 ) = 8
23.161 ( 0.002 ms): Xorg/729 read(fd: 31, buf: 0x2544af0, count: 4096 ) = -1 EAGAIN Resource temporarily unavailable
23.175 ( 0.002 ms): Xorg/729 setitimer(which: REAL, value: 0x7fffaadf16e0 ) = 0
23.235 ( 0.002 ms): Xorg/729 setitimer(which: REAL, value: 0x7fffaadf16e0 ) = 0
[acme@zoo ~]$
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-di28olfwd28rvkox7v3hqhu1@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-10-18 04:38:29 +08:00
|
|
|
|
|
|
|
switch (err) {
|
|
|
|
case EACCES:
|
|
|
|
case EPERM:
|
|
|
|
printed = scnprintf(buf, size,
|
|
|
|
"Error:\t%s.\n"
|
|
|
|
"Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
|
|
|
|
|
2013-12-11 20:36:23 +08:00
|
|
|
value = perf_event_paranoid();
|
perf trace: Improve messages related to /proc/sys/kernel/perf_event_paranoid
kernel/events/core.c has:
/*
* perf event paranoia level:
* -1 - not paranoid at all
* 0 - disallow raw tracepoint access for unpriv
* 1 - disallow cpu events for unpriv
* 2 - disallow kernel profiling for unpriv
*/
int sysctl_perf_event_paranoid __read_mostly = 1;
So, with the default being 1, a non-root user can trace his stuff:
[acme@zoo ~]$ cat /proc/sys/kernel/perf_event_paranoid
1
[acme@zoo ~]$ yes > /dev/null &
[1] 15338
[acme@zoo ~]$ trace -p 15338 | head -5
0.005 ( 0.005 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.045 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.085 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.125 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.165 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
[acme@zoo ~]$
[acme@zoo ~]$ trace --duration 1 sleep 1
1002.148 (1001.218 ms): nanosleep(rqtp: 0x7fff46c79250 ) = 0
[acme@zoo ~]$
[acme@zoo ~]$ trace -- usleep 1 | tail -5
0.905 ( 0.002 ms): brk( ) = 0x1c82000
0.910 ( 0.003 ms): brk(brk: 0x1ca3000 ) = 0x1ca3000
0.913 ( 0.001 ms): brk( ) = 0x1ca3000
0.990 ( 0.059 ms): nanosleep(rqtp: 0x7fffe31a3280 ) = 0
0.995 ( 0.000 ms): exit_group(
[acme@zoo ~]$
But can't do system wide tracing:
[acme@zoo ~]$ trace
Error: Operation not permitted.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 1.
[acme@zoo ~]$
[acme@zoo ~]$ trace --cpu 0
Error: Operation not permitted.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 1.
[acme@zoo ~]$
If the paranoid level is >= 2, i.e. turn this perf stuff off for !root users:
[acme@zoo ~]$ sudo sh -c 'echo 2 > /proc/sys/kernel/perf_event_paranoid'
[acme@zoo ~]$ cat /proc/sys/kernel/perf_event_paranoid
2
[acme@zoo ~]$
[acme@zoo ~]$ trace usleep 1
Error: Permission denied.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For your workloads it needs to be <= 1
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 2.
[acme@zoo ~]$
[acme@zoo ~]$ trace
Error: Permission denied.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For your workloads it needs to be <= 1
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 2.
[acme@zoo ~]$
[acme@zoo ~]$ trace --cpu 1
Error: Permission denied.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For your workloads it needs to be <= 1
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 2.
[acme@zoo ~]$
If the user manages to get what he/she wants, convincing root not
to be paranoid at all...
[root@zoo ~]# echo -1 > /proc/sys/kernel/perf_event_paranoid
[root@zoo ~]# cat /proc/sys/kernel/perf_event_paranoid
-1
[root@zoo ~]#
[acme@zoo ~]$ ps -eo user,pid,comm | grep Xorg
root 729 Xorg
[acme@zoo ~]$
[acme@zoo ~]$ trace -a --duration 0.001 -e \!select,ioctl,writev | grep Xorg | head -5
23.143 ( 0.003 ms): Xorg/729 setitimer(which: REAL, value: 0x7fffaadf16e0 ) = 0
23.152 ( 0.004 ms): Xorg/729 read(fd: 31, buf: 0x2544af0, count: 4096 ) = 8
23.161 ( 0.002 ms): Xorg/729 read(fd: 31, buf: 0x2544af0, count: 4096 ) = -1 EAGAIN Resource temporarily unavailable
23.175 ( 0.002 ms): Xorg/729 setitimer(which: REAL, value: 0x7fffaadf16e0 ) = 0
23.235 ( 0.002 ms): Xorg/729 setitimer(which: REAL, value: 0x7fffaadf16e0 ) = 0
[acme@zoo ~]$
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-di28olfwd28rvkox7v3hqhu1@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-10-18 04:38:29 +08:00
|
|
|
|
|
|
|
printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
|
|
|
|
|
|
|
|
if (value >= 2) {
|
|
|
|
printed += scnprintf(buf + printed, size - printed,
|
|
|
|
"For your workloads it needs to be <= 1\nHint:\t");
|
|
|
|
}
|
|
|
|
printed += scnprintf(buf + printed, size - printed,
|
2014-06-11 04:18:54 +08:00
|
|
|
"For system wide tracing it needs to be set to -1.\n");
|
perf trace: Improve messages related to /proc/sys/kernel/perf_event_paranoid
kernel/events/core.c has:
/*
* perf event paranoia level:
* -1 - not paranoid at all
* 0 - disallow raw tracepoint access for unpriv
* 1 - disallow cpu events for unpriv
* 2 - disallow kernel profiling for unpriv
*/
int sysctl_perf_event_paranoid __read_mostly = 1;
So, with the default being 1, a non-root user can trace his stuff:
[acme@zoo ~]$ cat /proc/sys/kernel/perf_event_paranoid
1
[acme@zoo ~]$ yes > /dev/null &
[1] 15338
[acme@zoo ~]$ trace -p 15338 | head -5
0.005 ( 0.005 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.045 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.085 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.125 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.165 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
[acme@zoo ~]$
[acme@zoo ~]$ trace --duration 1 sleep 1
1002.148 (1001.218 ms): nanosleep(rqtp: 0x7fff46c79250 ) = 0
[acme@zoo ~]$
[acme@zoo ~]$ trace -- usleep 1 | tail -5
0.905 ( 0.002 ms): brk( ) = 0x1c82000
0.910 ( 0.003 ms): brk(brk: 0x1ca3000 ) = 0x1ca3000
0.913 ( 0.001 ms): brk( ) = 0x1ca3000
0.990 ( 0.059 ms): nanosleep(rqtp: 0x7fffe31a3280 ) = 0
0.995 ( 0.000 ms): exit_group(
[acme@zoo ~]$
But can't do system wide tracing:
[acme@zoo ~]$ trace
Error: Operation not permitted.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 1.
[acme@zoo ~]$
[acme@zoo ~]$ trace --cpu 0
Error: Operation not permitted.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 1.
[acme@zoo ~]$
If the paranoid level is >= 2, i.e. turn this perf stuff off for !root users:
[acme@zoo ~]$ sudo sh -c 'echo 2 > /proc/sys/kernel/perf_event_paranoid'
[acme@zoo ~]$ cat /proc/sys/kernel/perf_event_paranoid
2
[acme@zoo ~]$
[acme@zoo ~]$ trace usleep 1
Error: Permission denied.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For your workloads it needs to be <= 1
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 2.
[acme@zoo ~]$
[acme@zoo ~]$ trace
Error: Permission denied.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For your workloads it needs to be <= 1
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 2.
[acme@zoo ~]$
[acme@zoo ~]$ trace --cpu 1
Error: Permission denied.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For your workloads it needs to be <= 1
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 2.
[acme@zoo ~]$
If the user manages to get what he/she wants, convincing root not
to be paranoid at all...
[root@zoo ~]# echo -1 > /proc/sys/kernel/perf_event_paranoid
[root@zoo ~]# cat /proc/sys/kernel/perf_event_paranoid
-1
[root@zoo ~]#
[acme@zoo ~]$ ps -eo user,pid,comm | grep Xorg
root 729 Xorg
[acme@zoo ~]$
[acme@zoo ~]$ trace -a --duration 0.001 -e \!select,ioctl,writev | grep Xorg | head -5
23.143 ( 0.003 ms): Xorg/729 setitimer(which: REAL, value: 0x7fffaadf16e0 ) = 0
23.152 ( 0.004 ms): Xorg/729 read(fd: 31, buf: 0x2544af0, count: 4096 ) = 8
23.161 ( 0.002 ms): Xorg/729 read(fd: 31, buf: 0x2544af0, count: 4096 ) = -1 EAGAIN Resource temporarily unavailable
23.175 ( 0.002 ms): Xorg/729 setitimer(which: REAL, value: 0x7fffaadf16e0 ) = 0
23.235 ( 0.002 ms): Xorg/729 setitimer(which: REAL, value: 0x7fffaadf16e0 ) = 0
[acme@zoo ~]$
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-di28olfwd28rvkox7v3hqhu1@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-10-18 04:38:29 +08:00
|
|
|
|
|
|
|
printed += scnprintf(buf + printed, size - printed,
|
2014-06-11 04:18:54 +08:00
|
|
|
"Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
|
|
|
|
"Hint:\tThe current value is %d.", value);
|
perf trace: Improve messages related to /proc/sys/kernel/perf_event_paranoid
kernel/events/core.c has:
/*
* perf event paranoia level:
* -1 - not paranoid at all
* 0 - disallow raw tracepoint access for unpriv
* 1 - disallow cpu events for unpriv
* 2 - disallow kernel profiling for unpriv
*/
int sysctl_perf_event_paranoid __read_mostly = 1;
So, with the default being 1, a non-root user can trace his stuff:
[acme@zoo ~]$ cat /proc/sys/kernel/perf_event_paranoid
1
[acme@zoo ~]$ yes > /dev/null &
[1] 15338
[acme@zoo ~]$ trace -p 15338 | head -5
0.005 ( 0.005 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.045 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.085 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.125 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.165 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
[acme@zoo ~]$
[acme@zoo ~]$ trace --duration 1 sleep 1
1002.148 (1001.218 ms): nanosleep(rqtp: 0x7fff46c79250 ) = 0
[acme@zoo ~]$
[acme@zoo ~]$ trace -- usleep 1 | tail -5
0.905 ( 0.002 ms): brk( ) = 0x1c82000
0.910 ( 0.003 ms): brk(brk: 0x1ca3000 ) = 0x1ca3000
0.913 ( 0.001 ms): brk( ) = 0x1ca3000
0.990 ( 0.059 ms): nanosleep(rqtp: 0x7fffe31a3280 ) = 0
0.995 ( 0.000 ms): exit_group(
[acme@zoo ~]$
But can't do system wide tracing:
[acme@zoo ~]$ trace
Error: Operation not permitted.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 1.
[acme@zoo ~]$
[acme@zoo ~]$ trace --cpu 0
Error: Operation not permitted.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 1.
[acme@zoo ~]$
If the paranoid level is >= 2, i.e. turn this perf stuff off for !root users:
[acme@zoo ~]$ sudo sh -c 'echo 2 > /proc/sys/kernel/perf_event_paranoid'
[acme@zoo ~]$ cat /proc/sys/kernel/perf_event_paranoid
2
[acme@zoo ~]$
[acme@zoo ~]$ trace usleep 1
Error: Permission denied.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For your workloads it needs to be <= 1
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 2.
[acme@zoo ~]$
[acme@zoo ~]$ trace
Error: Permission denied.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For your workloads it needs to be <= 1
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 2.
[acme@zoo ~]$
[acme@zoo ~]$ trace --cpu 1
Error: Permission denied.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For your workloads it needs to be <= 1
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 2.
[acme@zoo ~]$
If the user manages to get what he/she wants, convincing root not
to be paranoid at all...
[root@zoo ~]# echo -1 > /proc/sys/kernel/perf_event_paranoid
[root@zoo ~]# cat /proc/sys/kernel/perf_event_paranoid
-1
[root@zoo ~]#
[acme@zoo ~]$ ps -eo user,pid,comm | grep Xorg
root 729 Xorg
[acme@zoo ~]$
[acme@zoo ~]$ trace -a --duration 0.001 -e \!select,ioctl,writev | grep Xorg | head -5
23.143 ( 0.003 ms): Xorg/729 setitimer(which: REAL, value: 0x7fffaadf16e0 ) = 0
23.152 ( 0.004 ms): Xorg/729 read(fd: 31, buf: 0x2544af0, count: 4096 ) = 8
23.161 ( 0.002 ms): Xorg/729 read(fd: 31, buf: 0x2544af0, count: 4096 ) = -1 EAGAIN Resource temporarily unavailable
23.175 ( 0.002 ms): Xorg/729 setitimer(which: REAL, value: 0x7fffaadf16e0 ) = 0
23.235 ( 0.002 ms): Xorg/729 setitimer(which: REAL, value: 0x7fffaadf16e0 ) = 0
[acme@zoo ~]$
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-di28olfwd28rvkox7v3hqhu1@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-10-18 04:38:29 +08:00
|
|
|
break;
|
2016-02-19 00:34:09 +08:00
|
|
|
case EINVAL: {
|
2019-09-03 16:39:52 +08:00
|
|
|
struct evsel *first = evlist__first(evlist);
|
2016-02-19 00:34:09 +08:00
|
|
|
int max_freq;
|
|
|
|
|
|
|
|
if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0)
|
|
|
|
goto out_default;
|
|
|
|
|
2019-07-21 19:24:29 +08:00
|
|
|
if (first->core.attr.sample_freq < (u64)max_freq)
|
2016-02-19 00:34:09 +08:00
|
|
|
goto out_default;
|
|
|
|
|
|
|
|
printed = scnprintf(buf, size,
|
|
|
|
"Error:\t%s.\n"
|
|
|
|
"Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n"
|
|
|
|
"Hint:\tThe current value is %d and %" PRIu64 " is being requested.",
|
2019-07-21 19:24:29 +08:00
|
|
|
emsg, max_freq, first->core.attr.sample_freq);
|
2016-02-19 00:34:09 +08:00
|
|
|
break;
|
|
|
|
}
|
perf trace: Improve messages related to /proc/sys/kernel/perf_event_paranoid
kernel/events/core.c has:
/*
* perf event paranoia level:
* -1 - not paranoid at all
* 0 - disallow raw tracepoint access for unpriv
* 1 - disallow cpu events for unpriv
* 2 - disallow kernel profiling for unpriv
*/
int sysctl_perf_event_paranoid __read_mostly = 1;
So, with the default being 1, a non-root user can trace his stuff:
[acme@zoo ~]$ cat /proc/sys/kernel/perf_event_paranoid
1
[acme@zoo ~]$ yes > /dev/null &
[1] 15338
[acme@zoo ~]$ trace -p 15338 | head -5
0.005 ( 0.005 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.045 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.085 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.125 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.165 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
[acme@zoo ~]$
[acme@zoo ~]$ trace --duration 1 sleep 1
1002.148 (1001.218 ms): nanosleep(rqtp: 0x7fff46c79250 ) = 0
[acme@zoo ~]$
[acme@zoo ~]$ trace -- usleep 1 | tail -5
0.905 ( 0.002 ms): brk( ) = 0x1c82000
0.910 ( 0.003 ms): brk(brk: 0x1ca3000 ) = 0x1ca3000
0.913 ( 0.001 ms): brk( ) = 0x1ca3000
0.990 ( 0.059 ms): nanosleep(rqtp: 0x7fffe31a3280 ) = 0
0.995 ( 0.000 ms): exit_group(
[acme@zoo ~]$
But can't do system wide tracing:
[acme@zoo ~]$ trace
Error: Operation not permitted.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 1.
[acme@zoo ~]$
[acme@zoo ~]$ trace --cpu 0
Error: Operation not permitted.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 1.
[acme@zoo ~]$
If the paranoid level is >= 2, i.e. turn this perf stuff off for !root users:
[acme@zoo ~]$ sudo sh -c 'echo 2 > /proc/sys/kernel/perf_event_paranoid'
[acme@zoo ~]$ cat /proc/sys/kernel/perf_event_paranoid
2
[acme@zoo ~]$
[acme@zoo ~]$ trace usleep 1
Error: Permission denied.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For your workloads it needs to be <= 1
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 2.
[acme@zoo ~]$
[acme@zoo ~]$ trace
Error: Permission denied.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For your workloads it needs to be <= 1
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 2.
[acme@zoo ~]$
[acme@zoo ~]$ trace --cpu 1
Error: Permission denied.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For your workloads it needs to be <= 1
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 2.
[acme@zoo ~]$
If the user manages to get what he/she wants, convincing root not
to be paranoid at all...
[root@zoo ~]# echo -1 > /proc/sys/kernel/perf_event_paranoid
[root@zoo ~]# cat /proc/sys/kernel/perf_event_paranoid
-1
[root@zoo ~]#
[acme@zoo ~]$ ps -eo user,pid,comm | grep Xorg
root 729 Xorg
[acme@zoo ~]$
[acme@zoo ~]$ trace -a --duration 0.001 -e \!select,ioctl,writev | grep Xorg | head -5
23.143 ( 0.003 ms): Xorg/729 setitimer(which: REAL, value: 0x7fffaadf16e0 ) = 0
23.152 ( 0.004 ms): Xorg/729 read(fd: 31, buf: 0x2544af0, count: 4096 ) = 8
23.161 ( 0.002 ms): Xorg/729 read(fd: 31, buf: 0x2544af0, count: 4096 ) = -1 EAGAIN Resource temporarily unavailable
23.175 ( 0.002 ms): Xorg/729 setitimer(which: REAL, value: 0x7fffaadf16e0 ) = 0
23.235 ( 0.002 ms): Xorg/729 setitimer(which: REAL, value: 0x7fffaadf16e0 ) = 0
[acme@zoo ~]$
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-di28olfwd28rvkox7v3hqhu1@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-10-18 04:38:29 +08:00
|
|
|
default:
|
2016-02-19 00:34:09 +08:00
|
|
|
out_default:
|
perf trace: Improve messages related to /proc/sys/kernel/perf_event_paranoid
kernel/events/core.c has:
/*
* perf event paranoia level:
* -1 - not paranoid at all
* 0 - disallow raw tracepoint access for unpriv
* 1 - disallow cpu events for unpriv
* 2 - disallow kernel profiling for unpriv
*/
int sysctl_perf_event_paranoid __read_mostly = 1;
So, with the default being 1, a non-root user can trace his stuff:
[acme@zoo ~]$ cat /proc/sys/kernel/perf_event_paranoid
1
[acme@zoo ~]$ yes > /dev/null &
[1] 15338
[acme@zoo ~]$ trace -p 15338 | head -5
0.005 ( 0.005 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.045 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.085 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.125 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.165 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
[acme@zoo ~]$
[acme@zoo ~]$ trace --duration 1 sleep 1
1002.148 (1001.218 ms): nanosleep(rqtp: 0x7fff46c79250 ) = 0
[acme@zoo ~]$
[acme@zoo ~]$ trace -- usleep 1 | tail -5
0.905 ( 0.002 ms): brk( ) = 0x1c82000
0.910 ( 0.003 ms): brk(brk: 0x1ca3000 ) = 0x1ca3000
0.913 ( 0.001 ms): brk( ) = 0x1ca3000
0.990 ( 0.059 ms): nanosleep(rqtp: 0x7fffe31a3280 ) = 0
0.995 ( 0.000 ms): exit_group(
[acme@zoo ~]$
But can't do system wide tracing:
[acme@zoo ~]$ trace
Error: Operation not permitted.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 1.
[acme@zoo ~]$
[acme@zoo ~]$ trace --cpu 0
Error: Operation not permitted.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 1.
[acme@zoo ~]$
If the paranoid level is >= 2, i.e. turn this perf stuff off for !root users:
[acme@zoo ~]$ sudo sh -c 'echo 2 > /proc/sys/kernel/perf_event_paranoid'
[acme@zoo ~]$ cat /proc/sys/kernel/perf_event_paranoid
2
[acme@zoo ~]$
[acme@zoo ~]$ trace usleep 1
Error: Permission denied.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For your workloads it needs to be <= 1
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 2.
[acme@zoo ~]$
[acme@zoo ~]$ trace
Error: Permission denied.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For your workloads it needs to be <= 1
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 2.
[acme@zoo ~]$
[acme@zoo ~]$ trace --cpu 1
Error: Permission denied.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For your workloads it needs to be <= 1
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 2.
[acme@zoo ~]$
If the user manages to get what he/she wants, convincing root not
to be paranoid at all...
[root@zoo ~]# echo -1 > /proc/sys/kernel/perf_event_paranoid
[root@zoo ~]# cat /proc/sys/kernel/perf_event_paranoid
-1
[root@zoo ~]#
[acme@zoo ~]$ ps -eo user,pid,comm | grep Xorg
root 729 Xorg
[acme@zoo ~]$
[acme@zoo ~]$ trace -a --duration 0.001 -e \!select,ioctl,writev | grep Xorg | head -5
23.143 ( 0.003 ms): Xorg/729 setitimer(which: REAL, value: 0x7fffaadf16e0 ) = 0
23.152 ( 0.004 ms): Xorg/729 read(fd: 31, buf: 0x2544af0, count: 4096 ) = 8
23.161 ( 0.002 ms): Xorg/729 read(fd: 31, buf: 0x2544af0, count: 4096 ) = -1 EAGAIN Resource temporarily unavailable
23.175 ( 0.002 ms): Xorg/729 setitimer(which: REAL, value: 0x7fffaadf16e0 ) = 0
23.235 ( 0.002 ms): Xorg/729 setitimer(which: REAL, value: 0x7fffaadf16e0 ) = 0
[acme@zoo ~]$
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-di28olfwd28rvkox7v3hqhu1@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-10-18 04:38:29 +08:00
|
|
|
scnprintf(buf, size, "%s", emsg);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2013-12-11 20:36:35 +08:00
|
|
|
|
2020-06-17 20:19:46 +08:00
|
|
|
int evlist__strerror_mmap(struct evlist *evlist, int err, char *buf, size_t size)
|
2014-12-12 05:03:01 +08:00
|
|
|
{
|
tools: Introduce str_error_r()
The tools so far have been using the strerror_r() GNU variant, that
returns a string, be it the buffer passed or something else.
But that, besides being tricky in cases where we expect that the
function using strerror_r() returns the error formatted in a provided
buffer (we have to check if it returned something else and copy that
instead), breaks the build on systems not using glibc, like Alpine
Linux, where musl libc is used.
So, introduce yet another wrapper, str_error_r(), that has the GNU
interface, but uses the portable XSI variant of strerror_r(), so that
users rest asured that the provided buffer is used and it is what is
returned.
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Wang Nan <wangnan0@huawei.com>
Link: http://lkml.kernel.org/n/tip-d4t42fnf48ytlk8rjxs822tf@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-07-06 22:56:20 +08:00
|
|
|
char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
|
2019-08-06 21:14:05 +08:00
|
|
|
int pages_attempted = evlist->core.mmap_len / 1024, pages_max_per_user, printed = 0;
|
2014-12-12 05:03:01 +08:00
|
|
|
|
|
|
|
switch (err) {
|
|
|
|
case EPERM:
|
2014-12-13 02:59:51 +08:00
|
|
|
sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user);
|
2014-12-13 03:25:33 +08:00
|
|
|
printed += scnprintf(buf + printed, size - printed,
|
|
|
|
"Error:\t%s.\n"
|
2014-12-12 05:03:01 +08:00
|
|
|
"Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n"
|
2014-12-13 03:25:33 +08:00
|
|
|
"Hint:\tTried using %zd kB.\n",
|
2014-12-13 02:59:51 +08:00
|
|
|
emsg, pages_max_per_user, pages_attempted);
|
2014-12-13 03:25:33 +08:00
|
|
|
|
|
|
|
if (pages_attempted >= pages_max_per_user) {
|
|
|
|
printed += scnprintf(buf + printed, size - printed,
|
|
|
|
"Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n",
|
|
|
|
pages_max_per_user + pages_attempted);
|
|
|
|
}
|
|
|
|
|
|
|
|
printed += scnprintf(buf + printed, size - printed,
|
|
|
|
"Hint:\tTry using a smaller -m/--mmap-pages value.");
|
2014-12-12 05:03:01 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
scnprintf(buf, size, "%s", emsg);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-12-01 01:52:44 +08:00
|
|
|
void evlist__to_front(struct evlist *evlist, struct evsel *move_evsel)
|
2013-12-11 20:36:35 +08:00
|
|
|
{
|
2019-07-21 19:23:51 +08:00
|
|
|
struct evsel *evsel, *n;
|
2013-12-11 20:36:35 +08:00
|
|
|
LIST_HEAD(move);
|
|
|
|
|
2019-09-03 16:39:52 +08:00
|
|
|
if (move_evsel == evlist__first(evlist))
|
2013-12-11 20:36:35 +08:00
|
|
|
return;
|
|
|
|
|
2016-06-23 22:26:15 +08:00
|
|
|
evlist__for_each_entry_safe(evlist, n, evsel) {
|
2013-12-11 20:36:35 +08:00
|
|
|
if (evsel->leader == move_evsel->leader)
|
2019-07-21 19:24:22 +08:00
|
|
|
list_move_tail(&evsel->core.node, &move);
|
2013-12-11 20:36:35 +08:00
|
|
|
}
|
|
|
|
|
2019-07-21 19:24:23 +08:00
|
|
|
list_splice(&move, &evlist->core.entries);
|
2013-12-11 20:36:35 +08:00
|
|
|
}
|
2014-07-31 14:00:52 +08:00
|
|
|
|
2020-12-01 01:39:41 +08:00
|
|
|
struct evsel *evlist__get_tracking_event(struct evlist *evlist)
|
2020-06-29 17:19:51 +08:00
|
|
|
{
|
|
|
|
struct evsel *evsel;
|
|
|
|
|
|
|
|
evlist__for_each_entry(evlist, evsel) {
|
|
|
|
if (evsel->tracking)
|
|
|
|
return evsel;
|
|
|
|
}
|
|
|
|
|
|
|
|
return evlist__first(evlist);
|
|
|
|
}
|
|
|
|
|
2020-12-01 01:39:41 +08:00
|
|
|
void evlist__set_tracking_event(struct evlist *evlist, struct evsel *tracking_evsel)
|
2014-07-31 14:00:52 +08:00
|
|
|
{
|
2019-07-21 19:23:51 +08:00
|
|
|
struct evsel *evsel;
|
2014-07-31 14:00:52 +08:00
|
|
|
|
|
|
|
if (tracking_evsel->tracking)
|
|
|
|
return;
|
|
|
|
|
2016-06-23 22:26:15 +08:00
|
|
|
evlist__for_each_entry(evlist, evsel) {
|
2014-07-31 14:00:52 +08:00
|
|
|
if (evsel != tracking_evsel)
|
|
|
|
evsel->tracking = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
tracking_evsel->tracking = true;
|
|
|
|
}
|
perf tools: Enable passing event to BPF object
A new syntax is added to the parser so that the user can access
predefined perf events in BPF objects.
After this patch, BPF programs for perf are finally able to utilize
bpf_perf_event_read() introduced in commit 35578d798400 ("bpf: Implement
function bpf_perf_event_read() that get the selected hardware PMU
counter").
Test result:
# cat test_bpf_map_2.c
/************************ BEGIN **************************/
#include <uapi/linux/bpf.h>
#define SEC(NAME) __attribute__((section(NAME), used))
struct bpf_map_def {
unsigned int type;
unsigned int key_size;
unsigned int value_size;
unsigned int max_entries;
};
static int (*trace_printk)(const char *fmt, int fmt_size, ...) =
(void *)BPF_FUNC_trace_printk;
static int (*get_smp_processor_id)(void) =
(void *)BPF_FUNC_get_smp_processor_id;
static int (*perf_event_read)(struct bpf_map_def *, int) =
(void *)BPF_FUNC_perf_event_read;
struct bpf_map_def SEC("maps") pmu_map = {
.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
.key_size = sizeof(int),
.value_size = sizeof(int),
.max_entries = __NR_CPUS__,
};
SEC("func_write=sys_write")
int func_write(void *ctx)
{
unsigned long long val;
char fmt[] = "sys_write: pmu=%llu\n";
val = perf_event_read(&pmu_map, get_smp_processor_id());
trace_printk(fmt, sizeof(fmt), val);
return 0;
}
SEC("func_write_return=sys_write%return")
int func_write_return(void *ctx)
{
unsigned long long val = 0;
char fmt[] = "sys_write_return: pmu=%llu\n";
val = perf_event_read(&pmu_map, get_smp_processor_id());
trace_printk(fmt, sizeof(fmt), val);
return 0;
}
char _license[] SEC("license") = "GPL";
int _version SEC("version") = LINUX_VERSION_CODE;
/************************* END ***************************/
Normal case:
# echo "" > /sys/kernel/debug/tracing/trace
# perf record -i -e cycles -e './test_bpf_map_2.c/map:pmu_map.event=cycles/' ls /
[SNIP]
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.013 MB perf.data (7 samples) ]
# cat /sys/kernel/debug/tracing/trace | grep ls
ls-17066 [000] d... 938449.863301: : sys_write: pmu=1157327
ls-17066 [000] dN.. 938449.863342: : sys_write_return: pmu=1225218
ls-17066 [000] d... 938449.863349: : sys_write: pmu=1241922
ls-17066 [000] dN.. 938449.863369: : sys_write_return: pmu=1267445
Normal case (system wide):
# echo "" > /sys/kernel/debug/tracing/trace
# perf record -i -e cycles -e './test_bpf_map_2.c/map:pmu_map.event=cycles/' -a
^C[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.811 MB perf.data (120 samples) ]
# cat /sys/kernel/debug/tracing/trace | grep -v '18446744073709551594' | grep -v perf | head -n 20
[SNIP]
# TASK-PID CPU# |||| TIMESTAMP FUNCTION
# | | | |||| | |
gmain-30828 [002] d... 2740551.068992: : sys_write: pmu=84373
gmain-30828 [002] d... 2740551.068992: : sys_write_return: pmu=87696
gmain-30828 [002] d... 2740551.068996: : sys_write: pmu=100658
gmain-30828 [002] d... 2740551.068997: : sys_write_return: pmu=102572
Error case 1:
# perf record -e './test_bpf_map_2.c' ls /
[SNIP]
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.014 MB perf.data ]
# cat /sys/kernel/debug/tracing/trace | grep ls
ls-17115 [007] d... 2724279.665625: : sys_write: pmu=18446744073709551614
ls-17115 [007] dN.. 2724279.665651: : sys_write_return: pmu=18446744073709551614
ls-17115 [007] d... 2724279.665658: : sys_write: pmu=18446744073709551614
ls-17115 [007] dN.. 2724279.665677: : sys_write_return: pmu=18446744073709551614
(18446744073709551614 is 0xfffffffffffffffe (-2))
Error case 2:
# perf record -e cycles -e './test_bpf_map_2.c/map:pmu_map.event=evt/' -a
event syntax error: '..ps:pmu_map.event=evt/'
\___ Event not found for map setting
Hint: Valid config terms:
map:[<arraymap>].value=[value]
map:[<eventmap>].event=[event]
[SNIP]
Error case 3:
# ls /proc/2348/task/
2348 2505 2506 2507 2508
# perf record -i -e cycles -e './test_bpf_map_2.c/map:pmu_map.event=cycles/' -p 2348
ERROR: Apply config to BPF failed: Cannot set event to BPF map in multi-thread tracing
Error case 4:
# perf record -e cycles -e './test_bpf_map_2.c/map:pmu_map.event=cycles/' ls /
ERROR: Apply config to BPF failed: Doesn't support inherit event (Hint: use -i to turn off inherit)
Error case 5:
# perf record -i -e raw_syscalls:sys_enter -e './test_bpf_map_2.c/map:pmu_map.event=raw_syscalls:sys_enter/' ls
ERROR: Apply config to BPF failed: Can only put raw, hardware and BPF output event into a BPF map
Error case 6:
# perf record -i -e './test_bpf_map_2.c/map:pmu_map.event=123/' ls /
event syntax error: '.._map.event=123/'
\___ Incorrect value type for map
[SNIP]
Signed-off-by: Wang Nan <wangnan0@huawei.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Brendan Gregg <brendan.d.gregg@gmail.com>
Cc: Cody P Schafer <dev@codyps.com>
Cc: He Kuang <hekuang@huawei.com>
Cc: Jeremie Galarneau <jeremie.galarneau@efficios.com>
Cc: Kirill Smelkov <kirr@nexedi.com>
Cc: Li Zefan <lizefan@huawei.com>
Cc: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Zefan Li <lizefan@huawei.com>
Cc: pi3orama@163.com
Link: http://lkml.kernel.org/r/1456132275-98875-7-git-send-email-wangnan0@huawei.com
Signed-off-by: He Kuang <hekuang@huawei.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-02-22 17:10:33 +08:00
|
|
|
|
2020-11-30 20:48:07 +08:00
|
|
|
struct evsel *evlist__find_evsel_by_str(struct evlist *evlist, const char *str)
|
perf tools: Enable passing event to BPF object
A new syntax is added to the parser so that the user can access
predefined perf events in BPF objects.
After this patch, BPF programs for perf are finally able to utilize
bpf_perf_event_read() introduced in commit 35578d798400 ("bpf: Implement
function bpf_perf_event_read() that get the selected hardware PMU
counter").
Test result:
# cat test_bpf_map_2.c
/************************ BEGIN **************************/
#include <uapi/linux/bpf.h>
#define SEC(NAME) __attribute__((section(NAME), used))
struct bpf_map_def {
unsigned int type;
unsigned int key_size;
unsigned int value_size;
unsigned int max_entries;
};
static int (*trace_printk)(const char *fmt, int fmt_size, ...) =
(void *)BPF_FUNC_trace_printk;
static int (*get_smp_processor_id)(void) =
(void *)BPF_FUNC_get_smp_processor_id;
static int (*perf_event_read)(struct bpf_map_def *, int) =
(void *)BPF_FUNC_perf_event_read;
struct bpf_map_def SEC("maps") pmu_map = {
.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
.key_size = sizeof(int),
.value_size = sizeof(int),
.max_entries = __NR_CPUS__,
};
SEC("func_write=sys_write")
int func_write(void *ctx)
{
unsigned long long val;
char fmt[] = "sys_write: pmu=%llu\n";
val = perf_event_read(&pmu_map, get_smp_processor_id());
trace_printk(fmt, sizeof(fmt), val);
return 0;
}
SEC("func_write_return=sys_write%return")
int func_write_return(void *ctx)
{
unsigned long long val = 0;
char fmt[] = "sys_write_return: pmu=%llu\n";
val = perf_event_read(&pmu_map, get_smp_processor_id());
trace_printk(fmt, sizeof(fmt), val);
return 0;
}
char _license[] SEC("license") = "GPL";
int _version SEC("version") = LINUX_VERSION_CODE;
/************************* END ***************************/
Normal case:
# echo "" > /sys/kernel/debug/tracing/trace
# perf record -i -e cycles -e './test_bpf_map_2.c/map:pmu_map.event=cycles/' ls /
[SNIP]
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.013 MB perf.data (7 samples) ]
# cat /sys/kernel/debug/tracing/trace | grep ls
ls-17066 [000] d... 938449.863301: : sys_write: pmu=1157327
ls-17066 [000] dN.. 938449.863342: : sys_write_return: pmu=1225218
ls-17066 [000] d... 938449.863349: : sys_write: pmu=1241922
ls-17066 [000] dN.. 938449.863369: : sys_write_return: pmu=1267445
Normal case (system wide):
# echo "" > /sys/kernel/debug/tracing/trace
# perf record -i -e cycles -e './test_bpf_map_2.c/map:pmu_map.event=cycles/' -a
^C[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.811 MB perf.data (120 samples) ]
# cat /sys/kernel/debug/tracing/trace | grep -v '18446744073709551594' | grep -v perf | head -n 20
[SNIP]
# TASK-PID CPU# |||| TIMESTAMP FUNCTION
# | | | |||| | |
gmain-30828 [002] d... 2740551.068992: : sys_write: pmu=84373
gmain-30828 [002] d... 2740551.068992: : sys_write_return: pmu=87696
gmain-30828 [002] d... 2740551.068996: : sys_write: pmu=100658
gmain-30828 [002] d... 2740551.068997: : sys_write_return: pmu=102572
Error case 1:
# perf record -e './test_bpf_map_2.c' ls /
[SNIP]
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.014 MB perf.data ]
# cat /sys/kernel/debug/tracing/trace | grep ls
ls-17115 [007] d... 2724279.665625: : sys_write: pmu=18446744073709551614
ls-17115 [007] dN.. 2724279.665651: : sys_write_return: pmu=18446744073709551614
ls-17115 [007] d... 2724279.665658: : sys_write: pmu=18446744073709551614
ls-17115 [007] dN.. 2724279.665677: : sys_write_return: pmu=18446744073709551614
(18446744073709551614 is 0xfffffffffffffffe (-2))
Error case 2:
# perf record -e cycles -e './test_bpf_map_2.c/map:pmu_map.event=evt/' -a
event syntax error: '..ps:pmu_map.event=evt/'
\___ Event not found for map setting
Hint: Valid config terms:
map:[<arraymap>].value=[value]
map:[<eventmap>].event=[event]
[SNIP]
Error case 3:
# ls /proc/2348/task/
2348 2505 2506 2507 2508
# perf record -i -e cycles -e './test_bpf_map_2.c/map:pmu_map.event=cycles/' -p 2348
ERROR: Apply config to BPF failed: Cannot set event to BPF map in multi-thread tracing
Error case 4:
# perf record -e cycles -e './test_bpf_map_2.c/map:pmu_map.event=cycles/' ls /
ERROR: Apply config to BPF failed: Doesn't support inherit event (Hint: use -i to turn off inherit)
Error case 5:
# perf record -i -e raw_syscalls:sys_enter -e './test_bpf_map_2.c/map:pmu_map.event=raw_syscalls:sys_enter/' ls
ERROR: Apply config to BPF failed: Can only put raw, hardware and BPF output event into a BPF map
Error case 6:
# perf record -i -e './test_bpf_map_2.c/map:pmu_map.event=123/' ls /
event syntax error: '.._map.event=123/'
\___ Incorrect value type for map
[SNIP]
Signed-off-by: Wang Nan <wangnan0@huawei.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Brendan Gregg <brendan.d.gregg@gmail.com>
Cc: Cody P Schafer <dev@codyps.com>
Cc: He Kuang <hekuang@huawei.com>
Cc: Jeremie Galarneau <jeremie.galarneau@efficios.com>
Cc: Kirill Smelkov <kirr@nexedi.com>
Cc: Li Zefan <lizefan@huawei.com>
Cc: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Zefan Li <lizefan@huawei.com>
Cc: pi3orama@163.com
Link: http://lkml.kernel.org/r/1456132275-98875-7-git-send-email-wangnan0@huawei.com
Signed-off-by: He Kuang <hekuang@huawei.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-02-22 17:10:33 +08:00
|
|
|
{
|
2019-07-21 19:23:51 +08:00
|
|
|
struct evsel *evsel;
|
perf tools: Enable passing event to BPF object
A new syntax is added to the parser so that the user can access
predefined perf events in BPF objects.
After this patch, BPF programs for perf are finally able to utilize
bpf_perf_event_read() introduced in commit 35578d798400 ("bpf: Implement
function bpf_perf_event_read() that get the selected hardware PMU
counter").
Test result:
# cat test_bpf_map_2.c
/************************ BEGIN **************************/
#include <uapi/linux/bpf.h>
#define SEC(NAME) __attribute__((section(NAME), used))
struct bpf_map_def {
unsigned int type;
unsigned int key_size;
unsigned int value_size;
unsigned int max_entries;
};
static int (*trace_printk)(const char *fmt, int fmt_size, ...) =
(void *)BPF_FUNC_trace_printk;
static int (*get_smp_processor_id)(void) =
(void *)BPF_FUNC_get_smp_processor_id;
static int (*perf_event_read)(struct bpf_map_def *, int) =
(void *)BPF_FUNC_perf_event_read;
struct bpf_map_def SEC("maps") pmu_map = {
.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
.key_size = sizeof(int),
.value_size = sizeof(int),
.max_entries = __NR_CPUS__,
};
SEC("func_write=sys_write")
int func_write(void *ctx)
{
unsigned long long val;
char fmt[] = "sys_write: pmu=%llu\n";
val = perf_event_read(&pmu_map, get_smp_processor_id());
trace_printk(fmt, sizeof(fmt), val);
return 0;
}
SEC("func_write_return=sys_write%return")
int func_write_return(void *ctx)
{
unsigned long long val = 0;
char fmt[] = "sys_write_return: pmu=%llu\n";
val = perf_event_read(&pmu_map, get_smp_processor_id());
trace_printk(fmt, sizeof(fmt), val);
return 0;
}
char _license[] SEC("license") = "GPL";
int _version SEC("version") = LINUX_VERSION_CODE;
/************************* END ***************************/
Normal case:
# echo "" > /sys/kernel/debug/tracing/trace
# perf record -i -e cycles -e './test_bpf_map_2.c/map:pmu_map.event=cycles/' ls /
[SNIP]
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.013 MB perf.data (7 samples) ]
# cat /sys/kernel/debug/tracing/trace | grep ls
ls-17066 [000] d... 938449.863301: : sys_write: pmu=1157327
ls-17066 [000] dN.. 938449.863342: : sys_write_return: pmu=1225218
ls-17066 [000] d... 938449.863349: : sys_write: pmu=1241922
ls-17066 [000] dN.. 938449.863369: : sys_write_return: pmu=1267445
Normal case (system wide):
# echo "" > /sys/kernel/debug/tracing/trace
# perf record -i -e cycles -e './test_bpf_map_2.c/map:pmu_map.event=cycles/' -a
^C[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.811 MB perf.data (120 samples) ]
# cat /sys/kernel/debug/tracing/trace | grep -v '18446744073709551594' | grep -v perf | head -n 20
[SNIP]
# TASK-PID CPU# |||| TIMESTAMP FUNCTION
# | | | |||| | |
gmain-30828 [002] d... 2740551.068992: : sys_write: pmu=84373
gmain-30828 [002] d... 2740551.068992: : sys_write_return: pmu=87696
gmain-30828 [002] d... 2740551.068996: : sys_write: pmu=100658
gmain-30828 [002] d... 2740551.068997: : sys_write_return: pmu=102572
Error case 1:
# perf record -e './test_bpf_map_2.c' ls /
[SNIP]
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.014 MB perf.data ]
# cat /sys/kernel/debug/tracing/trace | grep ls
ls-17115 [007] d... 2724279.665625: : sys_write: pmu=18446744073709551614
ls-17115 [007] dN.. 2724279.665651: : sys_write_return: pmu=18446744073709551614
ls-17115 [007] d... 2724279.665658: : sys_write: pmu=18446744073709551614
ls-17115 [007] dN.. 2724279.665677: : sys_write_return: pmu=18446744073709551614
(18446744073709551614 is 0xfffffffffffffffe (-2))
Error case 2:
# perf record -e cycles -e './test_bpf_map_2.c/map:pmu_map.event=evt/' -a
event syntax error: '..ps:pmu_map.event=evt/'
\___ Event not found for map setting
Hint: Valid config terms:
map:[<arraymap>].value=[value]
map:[<eventmap>].event=[event]
[SNIP]
Error case 3:
# ls /proc/2348/task/
2348 2505 2506 2507 2508
# perf record -i -e cycles -e './test_bpf_map_2.c/map:pmu_map.event=cycles/' -p 2348
ERROR: Apply config to BPF failed: Cannot set event to BPF map in multi-thread tracing
Error case 4:
# perf record -e cycles -e './test_bpf_map_2.c/map:pmu_map.event=cycles/' ls /
ERROR: Apply config to BPF failed: Doesn't support inherit event (Hint: use -i to turn off inherit)
Error case 5:
# perf record -i -e raw_syscalls:sys_enter -e './test_bpf_map_2.c/map:pmu_map.event=raw_syscalls:sys_enter/' ls
ERROR: Apply config to BPF failed: Can only put raw, hardware and BPF output event into a BPF map
Error case 6:
# perf record -i -e './test_bpf_map_2.c/map:pmu_map.event=123/' ls /
event syntax error: '.._map.event=123/'
\___ Incorrect value type for map
[SNIP]
Signed-off-by: Wang Nan <wangnan0@huawei.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Brendan Gregg <brendan.d.gregg@gmail.com>
Cc: Cody P Schafer <dev@codyps.com>
Cc: He Kuang <hekuang@huawei.com>
Cc: Jeremie Galarneau <jeremie.galarneau@efficios.com>
Cc: Kirill Smelkov <kirr@nexedi.com>
Cc: Li Zefan <lizefan@huawei.com>
Cc: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Zefan Li <lizefan@huawei.com>
Cc: pi3orama@163.com
Link: http://lkml.kernel.org/r/1456132275-98875-7-git-send-email-wangnan0@huawei.com
Signed-off-by: He Kuang <hekuang@huawei.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-02-22 17:10:33 +08:00
|
|
|
|
2016-06-23 22:26:15 +08:00
|
|
|
evlist__for_each_entry(evlist, evsel) {
|
perf tools: Enable passing event to BPF object
A new syntax is added to the parser so that the user can access
predefined perf events in BPF objects.
After this patch, BPF programs for perf are finally able to utilize
bpf_perf_event_read() introduced in commit 35578d798400 ("bpf: Implement
function bpf_perf_event_read() that get the selected hardware PMU
counter").
Test result:
# cat test_bpf_map_2.c
/************************ BEGIN **************************/
#include <uapi/linux/bpf.h>
#define SEC(NAME) __attribute__((section(NAME), used))
struct bpf_map_def {
unsigned int type;
unsigned int key_size;
unsigned int value_size;
unsigned int max_entries;
};
static int (*trace_printk)(const char *fmt, int fmt_size, ...) =
(void *)BPF_FUNC_trace_printk;
static int (*get_smp_processor_id)(void) =
(void *)BPF_FUNC_get_smp_processor_id;
static int (*perf_event_read)(struct bpf_map_def *, int) =
(void *)BPF_FUNC_perf_event_read;
struct bpf_map_def SEC("maps") pmu_map = {
.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
.key_size = sizeof(int),
.value_size = sizeof(int),
.max_entries = __NR_CPUS__,
};
SEC("func_write=sys_write")
int func_write(void *ctx)
{
unsigned long long val;
char fmt[] = "sys_write: pmu=%llu\n";
val = perf_event_read(&pmu_map, get_smp_processor_id());
trace_printk(fmt, sizeof(fmt), val);
return 0;
}
SEC("func_write_return=sys_write%return")
int func_write_return(void *ctx)
{
unsigned long long val = 0;
char fmt[] = "sys_write_return: pmu=%llu\n";
val = perf_event_read(&pmu_map, get_smp_processor_id());
trace_printk(fmt, sizeof(fmt), val);
return 0;
}
char _license[] SEC("license") = "GPL";
int _version SEC("version") = LINUX_VERSION_CODE;
/************************* END ***************************/
Normal case:
# echo "" > /sys/kernel/debug/tracing/trace
# perf record -i -e cycles -e './test_bpf_map_2.c/map:pmu_map.event=cycles/' ls /
[SNIP]
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.013 MB perf.data (7 samples) ]
# cat /sys/kernel/debug/tracing/trace | grep ls
ls-17066 [000] d... 938449.863301: : sys_write: pmu=1157327
ls-17066 [000] dN.. 938449.863342: : sys_write_return: pmu=1225218
ls-17066 [000] d... 938449.863349: : sys_write: pmu=1241922
ls-17066 [000] dN.. 938449.863369: : sys_write_return: pmu=1267445
Normal case (system wide):
# echo "" > /sys/kernel/debug/tracing/trace
# perf record -i -e cycles -e './test_bpf_map_2.c/map:pmu_map.event=cycles/' -a
^C[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.811 MB perf.data (120 samples) ]
# cat /sys/kernel/debug/tracing/trace | grep -v '18446744073709551594' | grep -v perf | head -n 20
[SNIP]
# TASK-PID CPU# |||| TIMESTAMP FUNCTION
# | | | |||| | |
gmain-30828 [002] d... 2740551.068992: : sys_write: pmu=84373
gmain-30828 [002] d... 2740551.068992: : sys_write_return: pmu=87696
gmain-30828 [002] d... 2740551.068996: : sys_write: pmu=100658
gmain-30828 [002] d... 2740551.068997: : sys_write_return: pmu=102572
Error case 1:
# perf record -e './test_bpf_map_2.c' ls /
[SNIP]
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.014 MB perf.data ]
# cat /sys/kernel/debug/tracing/trace | grep ls
ls-17115 [007] d... 2724279.665625: : sys_write: pmu=18446744073709551614
ls-17115 [007] dN.. 2724279.665651: : sys_write_return: pmu=18446744073709551614
ls-17115 [007] d... 2724279.665658: : sys_write: pmu=18446744073709551614
ls-17115 [007] dN.. 2724279.665677: : sys_write_return: pmu=18446744073709551614
(18446744073709551614 is 0xfffffffffffffffe (-2))
Error case 2:
# perf record -e cycles -e './test_bpf_map_2.c/map:pmu_map.event=evt/' -a
event syntax error: '..ps:pmu_map.event=evt/'
\___ Event not found for map setting
Hint: Valid config terms:
map:[<arraymap>].value=[value]
map:[<eventmap>].event=[event]
[SNIP]
Error case 3:
# ls /proc/2348/task/
2348 2505 2506 2507 2508
# perf record -i -e cycles -e './test_bpf_map_2.c/map:pmu_map.event=cycles/' -p 2348
ERROR: Apply config to BPF failed: Cannot set event to BPF map in multi-thread tracing
Error case 4:
# perf record -e cycles -e './test_bpf_map_2.c/map:pmu_map.event=cycles/' ls /
ERROR: Apply config to BPF failed: Doesn't support inherit event (Hint: use -i to turn off inherit)
Error case 5:
# perf record -i -e raw_syscalls:sys_enter -e './test_bpf_map_2.c/map:pmu_map.event=raw_syscalls:sys_enter/' ls
ERROR: Apply config to BPF failed: Can only put raw, hardware and BPF output event into a BPF map
Error case 6:
# perf record -i -e './test_bpf_map_2.c/map:pmu_map.event=123/' ls /
event syntax error: '.._map.event=123/'
\___ Incorrect value type for map
[SNIP]
Signed-off-by: Wang Nan <wangnan0@huawei.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Brendan Gregg <brendan.d.gregg@gmail.com>
Cc: Cody P Schafer <dev@codyps.com>
Cc: He Kuang <hekuang@huawei.com>
Cc: Jeremie Galarneau <jeremie.galarneau@efficios.com>
Cc: Kirill Smelkov <kirr@nexedi.com>
Cc: Li Zefan <lizefan@huawei.com>
Cc: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Zefan Li <lizefan@huawei.com>
Cc: pi3orama@163.com
Link: http://lkml.kernel.org/r/1456132275-98875-7-git-send-email-wangnan0@huawei.com
Signed-off-by: He Kuang <hekuang@huawei.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-02-22 17:10:33 +08:00
|
|
|
if (!evsel->name)
|
|
|
|
continue;
|
|
|
|
if (strcmp(str, evsel->name) == 0)
|
|
|
|
return evsel;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
2016-07-14 16:34:42 +08:00
|
|
|
|
2020-11-30 20:33:55 +08:00
|
|
|
void evlist__toggle_bkw_mmap(struct evlist *evlist, enum bkw_mmap_state state)
|
2016-07-14 16:34:42 +08:00
|
|
|
{
|
|
|
|
enum bkw_mmap_state old_state = evlist->bkw_mmap_state;
|
|
|
|
enum action {
|
|
|
|
NONE,
|
|
|
|
PAUSE,
|
|
|
|
RESUME,
|
|
|
|
} action = NONE;
|
|
|
|
|
2017-12-05 00:51:07 +08:00
|
|
|
if (!evlist->overwrite_mmap)
|
2016-07-14 16:34:42 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
switch (old_state) {
|
|
|
|
case BKW_MMAP_NOTREADY: {
|
|
|
|
if (state != BKW_MMAP_RUNNING)
|
2018-01-11 23:50:20 +08:00
|
|
|
goto state_err;
|
2016-07-14 16:34:42 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case BKW_MMAP_RUNNING: {
|
|
|
|
if (state != BKW_MMAP_DATA_PENDING)
|
|
|
|
goto state_err;
|
|
|
|
action = PAUSE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case BKW_MMAP_DATA_PENDING: {
|
|
|
|
if (state != BKW_MMAP_EMPTY)
|
|
|
|
goto state_err;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case BKW_MMAP_EMPTY: {
|
|
|
|
if (state != BKW_MMAP_RUNNING)
|
|
|
|
goto state_err;
|
|
|
|
action = RESUME;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
WARN_ONCE(1, "Shouldn't get there\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
evlist->bkw_mmap_state = state;
|
|
|
|
|
|
|
|
switch (action) {
|
|
|
|
case PAUSE:
|
2020-12-01 01:49:05 +08:00
|
|
|
evlist__pause(evlist);
|
2016-07-14 16:34:42 +08:00
|
|
|
break;
|
|
|
|
case RESUME:
|
2020-12-01 01:49:05 +08:00
|
|
|
evlist__resume(evlist);
|
2016-07-14 16:34:42 +08:00
|
|
|
break;
|
|
|
|
case NONE:
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
state_err:
|
|
|
|
return;
|
|
|
|
}
|
2017-11-14 22:01:06 +08:00
|
|
|
|
2019-07-21 19:23:52 +08:00
|
|
|
bool perf_evlist__exclude_kernel(struct evlist *evlist)
|
2017-11-14 22:01:06 +08:00
|
|
|
{
|
2019-07-21 19:23:51 +08:00
|
|
|
struct evsel *evsel;
|
2017-11-14 22:01:06 +08:00
|
|
|
|
|
|
|
evlist__for_each_entry(evlist, evsel) {
|
2019-07-21 19:24:29 +08:00
|
|
|
if (!evsel->core.attr.exclude_kernel)
|
2017-11-14 22:01:06 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
2018-05-21 22:57:44 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Events in data file are not collect in groups, but we still want
|
|
|
|
* the group display. Set the artificial group and set the leader's
|
|
|
|
* forced_leader flag to notify the display code.
|
|
|
|
*/
|
2020-12-01 01:58:32 +08:00
|
|
|
void evlist__force_leader(struct evlist *evlist)
|
2018-05-21 22:57:44 +08:00
|
|
|
{
|
|
|
|
if (!evlist->nr_groups) {
|
2019-09-03 16:39:52 +08:00
|
|
|
struct evsel *leader = evlist__first(evlist);
|
2018-05-21 22:57:44 +08:00
|
|
|
|
2020-11-30 20:22:07 +08:00
|
|
|
evlist__set_leader(evlist);
|
2018-05-21 22:57:44 +08:00
|
|
|
leader->forced_leader = true;
|
|
|
|
}
|
|
|
|
}
|
2018-10-02 03:59:26 +08:00
|
|
|
|
2020-12-01 01:58:32 +08:00
|
|
|
struct evsel *evlist__reset_weak_group(struct evlist *evsel_list, struct evsel *evsel, bool close)
|
2018-10-02 03:59:26 +08:00
|
|
|
{
|
2019-07-21 19:23:51 +08:00
|
|
|
struct evsel *c2, *leader;
|
2018-10-02 03:59:26 +08:00
|
|
|
bool is_open = true;
|
|
|
|
|
|
|
|
leader = evsel->leader;
|
|
|
|
pr_debug("Weak group for %s/%d failed\n",
|
2019-07-21 19:24:46 +08:00
|
|
|
leader->name, leader->core.nr_members);
|
2018-10-02 03:59:26 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* for_each_group_member doesn't work here because it doesn't
|
|
|
|
* include the first entry.
|
|
|
|
*/
|
|
|
|
evlist__for_each_entry(evsel_list, c2) {
|
|
|
|
if (c2 == evsel)
|
|
|
|
is_open = false;
|
|
|
|
if (c2->leader == leader) {
|
2019-11-21 08:15:19 +08:00
|
|
|
if (is_open && close)
|
2019-10-12 02:21:40 +08:00
|
|
|
perf_evsel__close(&c2->core);
|
2018-10-02 03:59:26 +08:00
|
|
|
c2->leader = c2;
|
2019-07-21 19:24:46 +08:00
|
|
|
c2->core.nr_members = 0;
|
2019-11-21 08:15:19 +08:00
|
|
|
/*
|
|
|
|
* Set this for all former members of the group
|
|
|
|
* to indicate they get reopened.
|
|
|
|
*/
|
|
|
|
c2->reset_group = true;
|
2018-10-02 03:59:26 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return leader;
|
|
|
|
}
|
2020-07-17 15:01:33 +08:00
|
|
|
|
2020-09-02 18:57:07 +08:00
|
|
|
static int evlist__parse_control_fifo(const char *str, int *ctl_fd, int *ctl_fd_ack, bool *ctl_fd_close)
|
|
|
|
{
|
|
|
|
char *s, *p;
|
|
|
|
int ret = 0, fd;
|
|
|
|
|
|
|
|
if (strncmp(str, "fifo:", 5))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
str += 5;
|
|
|
|
if (!*str || *str == ',')
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
s = strdup(str);
|
|
|
|
if (!s)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
p = strchr(s, ',');
|
|
|
|
if (p)
|
|
|
|
*p = '\0';
|
|
|
|
|
|
|
|
/*
|
|
|
|
* O_RDWR avoids POLLHUPs which is necessary to allow the other
|
|
|
|
* end of a FIFO to be repeatedly opened and closed.
|
|
|
|
*/
|
|
|
|
fd = open(s, O_RDWR | O_NONBLOCK | O_CLOEXEC);
|
|
|
|
if (fd < 0) {
|
|
|
|
pr_err("Failed to open '%s'\n", s);
|
|
|
|
ret = -errno;
|
|
|
|
goto out_free;
|
|
|
|
}
|
|
|
|
*ctl_fd = fd;
|
|
|
|
*ctl_fd_close = true;
|
|
|
|
|
|
|
|
if (p && *++p) {
|
|
|
|
/* O_RDWR | O_NONBLOCK means the other end need not be open */
|
|
|
|
fd = open(p, O_RDWR | O_NONBLOCK | O_CLOEXEC);
|
|
|
|
if (fd < 0) {
|
|
|
|
pr_err("Failed to open '%s'\n", p);
|
|
|
|
ret = -errno;
|
|
|
|
goto out_free;
|
|
|
|
}
|
|
|
|
*ctl_fd_ack = fd;
|
|
|
|
}
|
|
|
|
|
|
|
|
out_free:
|
|
|
|
free(s);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int evlist__parse_control(const char *str, int *ctl_fd, int *ctl_fd_ack, bool *ctl_fd_close)
|
2020-09-01 17:37:53 +08:00
|
|
|
{
|
|
|
|
char *comma = NULL, *endptr = NULL;
|
|
|
|
|
2020-09-02 18:57:07 +08:00
|
|
|
*ctl_fd_close = false;
|
|
|
|
|
2020-09-01 17:37:53 +08:00
|
|
|
if (strncmp(str, "fd:", 3))
|
2020-09-02 18:57:07 +08:00
|
|
|
return evlist__parse_control_fifo(str, ctl_fd, ctl_fd_ack, ctl_fd_close);
|
2020-09-01 17:37:53 +08:00
|
|
|
|
|
|
|
*ctl_fd = strtoul(&str[3], &endptr, 0);
|
|
|
|
if (endptr == &str[3])
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
comma = strchr(str, ',');
|
|
|
|
if (comma) {
|
|
|
|
if (endptr != comma)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
*ctl_fd_ack = strtoul(comma + 1, &endptr, 0);
|
|
|
|
if (endptr == comma + 1 || *endptr != '\0')
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-09-03 20:29:37 +08:00
|
|
|
void evlist__close_control(int ctl_fd, int ctl_fd_ack, bool *ctl_fd_close)
|
|
|
|
{
|
|
|
|
if (*ctl_fd_close) {
|
|
|
|
*ctl_fd_close = false;
|
|
|
|
close(ctl_fd);
|
|
|
|
if (ctl_fd_ack >= 0)
|
|
|
|
close(ctl_fd_ack);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-17 15:01:33 +08:00
|
|
|
int evlist__initialize_ctlfd(struct evlist *evlist, int fd, int ack)
|
|
|
|
{
|
|
|
|
if (fd == -1) {
|
|
|
|
pr_debug("Control descriptor is not initialized\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
evlist->ctl_fd.pos = perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN,
|
|
|
|
fdarray_flag__nonfilterable);
|
|
|
|
if (evlist->ctl_fd.pos < 0) {
|
|
|
|
evlist->ctl_fd.pos = -1;
|
|
|
|
pr_err("Failed to add ctl fd entry: %m\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
evlist->ctl_fd.fd = fd;
|
|
|
|
evlist->ctl_fd.ack = ack;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool evlist__ctlfd_initialized(struct evlist *evlist)
|
|
|
|
{
|
|
|
|
return evlist->ctl_fd.pos >= 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int evlist__finalize_ctlfd(struct evlist *evlist)
|
|
|
|
{
|
|
|
|
struct pollfd *entries = evlist->core.pollfd.entries;
|
|
|
|
|
|
|
|
if (!evlist__ctlfd_initialized(evlist))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
entries[evlist->ctl_fd.pos].fd = -1;
|
|
|
|
entries[evlist->ctl_fd.pos].events = 0;
|
|
|
|
entries[evlist->ctl_fd.pos].revents = 0;
|
|
|
|
|
|
|
|
evlist->ctl_fd.pos = -1;
|
|
|
|
evlist->ctl_fd.ack = -1;
|
|
|
|
evlist->ctl_fd.fd = -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int evlist__ctlfd_recv(struct evlist *evlist, enum evlist_ctl_cmd *cmd,
|
|
|
|
char *cmd_data, size_t data_size)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
char c;
|
|
|
|
size_t bytes_read = 0;
|
|
|
|
|
2020-09-01 17:37:54 +08:00
|
|
|
*cmd = EVLIST_CTL_CMD_UNSUPPORTED;
|
2020-07-17 15:01:33 +08:00
|
|
|
memset(cmd_data, 0, data_size);
|
|
|
|
data_size--;
|
|
|
|
|
|
|
|
do {
|
|
|
|
err = read(evlist->ctl_fd.fd, &c, 1);
|
|
|
|
if (err > 0) {
|
|
|
|
if (c == '\n' || c == '\0')
|
|
|
|
break;
|
|
|
|
cmd_data[bytes_read++] = c;
|
|
|
|
if (bytes_read == data_size)
|
|
|
|
break;
|
2020-09-01 17:37:54 +08:00
|
|
|
continue;
|
|
|
|
} else if (err == -1) {
|
|
|
|
if (errno == EINTR)
|
|
|
|
continue;
|
|
|
|
if (errno == EAGAIN || errno == EWOULDBLOCK)
|
|
|
|
err = 0;
|
|
|
|
else
|
2020-07-17 15:01:33 +08:00
|
|
|
pr_err("Failed to read from ctlfd %d: %m\n", evlist->ctl_fd.fd);
|
|
|
|
}
|
2020-09-01 17:37:54 +08:00
|
|
|
break;
|
2020-07-17 15:01:33 +08:00
|
|
|
} while (1);
|
|
|
|
|
|
|
|
pr_debug("Message from ctl_fd: \"%s%s\"\n", cmd_data,
|
|
|
|
bytes_read == data_size ? "" : c == '\n' ? "\\n" : "\\0");
|
|
|
|
|
2020-09-01 17:37:54 +08:00
|
|
|
if (bytes_read > 0) {
|
2020-07-17 15:01:33 +08:00
|
|
|
if (!strncmp(cmd_data, EVLIST_CTL_CMD_ENABLE_TAG,
|
|
|
|
(sizeof(EVLIST_CTL_CMD_ENABLE_TAG)-1))) {
|
|
|
|
*cmd = EVLIST_CTL_CMD_ENABLE;
|
|
|
|
} else if (!strncmp(cmd_data, EVLIST_CTL_CMD_DISABLE_TAG,
|
|
|
|
(sizeof(EVLIST_CTL_CMD_DISABLE_TAG)-1))) {
|
|
|
|
*cmd = EVLIST_CTL_CMD_DISABLE;
|
2020-09-01 17:37:57 +08:00
|
|
|
} else if (!strncmp(cmd_data, EVLIST_CTL_CMD_SNAPSHOT_TAG,
|
|
|
|
(sizeof(EVLIST_CTL_CMD_SNAPSHOT_TAG)-1))) {
|
|
|
|
*cmd = EVLIST_CTL_CMD_SNAPSHOT;
|
|
|
|
pr_debug("is snapshot\n");
|
2020-07-17 15:01:33 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-01 17:37:54 +08:00
|
|
|
return bytes_read ? (int)bytes_read : err;
|
2020-07-17 15:01:33 +08:00
|
|
|
}
|
|
|
|
|
2020-09-01 17:37:57 +08:00
|
|
|
int evlist__ctlfd_ack(struct evlist *evlist)
|
2020-07-17 15:01:33 +08:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (evlist->ctl_fd.ack == -1)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err = write(evlist->ctl_fd.ack, EVLIST_CTL_CMD_ACK_TAG,
|
|
|
|
sizeof(EVLIST_CTL_CMD_ACK_TAG));
|
|
|
|
if (err == -1)
|
|
|
|
pr_err("failed to write to ctl_ack_fd %d: %m\n", evlist->ctl_fd.ack);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
int evlist__ctlfd_process(struct evlist *evlist, enum evlist_ctl_cmd *cmd)
|
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
char cmd_data[EVLIST_CTL_CMD_MAX_LEN];
|
|
|
|
int ctlfd_pos = evlist->ctl_fd.pos;
|
|
|
|
struct pollfd *entries = evlist->core.pollfd.entries;
|
|
|
|
|
|
|
|
if (!evlist__ctlfd_initialized(evlist) || !entries[ctlfd_pos].revents)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (entries[ctlfd_pos].revents & POLLIN) {
|
|
|
|
err = evlist__ctlfd_recv(evlist, cmd, cmd_data,
|
|
|
|
EVLIST_CTL_CMD_MAX_LEN);
|
|
|
|
if (err > 0) {
|
|
|
|
switch (*cmd) {
|
|
|
|
case EVLIST_CTL_CMD_ENABLE:
|
|
|
|
evlist__enable(evlist);
|
|
|
|
break;
|
|
|
|
case EVLIST_CTL_CMD_DISABLE:
|
|
|
|
evlist__disable(evlist);
|
|
|
|
break;
|
2020-09-01 17:37:57 +08:00
|
|
|
case EVLIST_CTL_CMD_SNAPSHOT:
|
|
|
|
break;
|
2020-07-17 15:01:33 +08:00
|
|
|
case EVLIST_CTL_CMD_ACK:
|
|
|
|
case EVLIST_CTL_CMD_UNSUPPORTED:
|
|
|
|
default:
|
|
|
|
pr_debug("ctlfd: unsupported %d\n", *cmd);
|
|
|
|
break;
|
|
|
|
}
|
2020-09-01 17:37:57 +08:00
|
|
|
if (!(*cmd == EVLIST_CTL_CMD_ACK || *cmd == EVLIST_CTL_CMD_UNSUPPORTED ||
|
|
|
|
*cmd == EVLIST_CTL_CMD_SNAPSHOT))
|
2020-07-17 15:01:33 +08:00
|
|
|
evlist__ctlfd_ack(evlist);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (entries[ctlfd_pos].revents & (POLLHUP | POLLERR))
|
|
|
|
evlist__finalize_ctlfd(evlist);
|
|
|
|
else
|
|
|
|
entries[ctlfd_pos].revents = 0;
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
2020-09-24 20:44:53 +08:00
|
|
|
|
|
|
|
struct evsel *evlist__find_evsel(struct evlist *evlist, int idx)
|
|
|
|
{
|
|
|
|
struct evsel *evsel;
|
|
|
|
|
|
|
|
evlist__for_each_entry(evlist, evsel) {
|
|
|
|
if (evsel->idx == idx)
|
|
|
|
return evsel;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|