2019-05-29 22:12:25 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2011-01-30 20:46:46 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
|
|
|
|
*
|
|
|
|
* Parts came from builtin-{top,stat,record}.c, see those files for further
|
|
|
|
* copyright notes.
|
|
|
|
*/
|
2014-12-12 05:03:01 +08:00
|
|
|
#include <api/fs/fs.h>
|
2017-04-18 21:46:11 +08:00
|
|
|
#include <errno.h>
|
2017-04-18 02:23:08 +08:00
|
|
|
#include <inttypes.h>
|
2011-01-12 08:30:02 +08:00
|
|
|
#include <poll.h>
|
2011-01-30 20:46:46 +08:00
|
|
|
#include "cpumap.h"
|
2019-09-23 23:20:38 +08:00
|
|
|
#include "util/mmap.h"
|
2011-01-30 20:46:46 +08:00
|
|
|
#include "thread_map.h"
|
2012-04-26 13:15:22 +08:00
|
|
|
#include "target.h"
|
2011-01-12 06:56:53 +08:00
|
|
|
#include "evlist.h"
|
|
|
|
#include "evsel.h"
|
perf record: Allow multiple recording time ranges
AUX area traces can produce too much data to record successfully or
analyze subsequently. Add another means to reduce data collection by
allowing multiple recording time ranges.
This is useful, for instance, in cases where a workload produces
predictably reproducible events in specific time ranges.
Today we only have perf record -D <msecs> to start at a specific region, or
some complicated approach using snapshot mode and external scripts sending
signals or using the fifos. But these approaches are difficult to set up
compared with simply having perf do it.
Extend perf record option -D/--delay option to specifying relative time
stamps for start stop controlled by perf with the right time offset, for
instance:
perf record -e intel_pt// -D 10-20,30-40
to record 10ms to 20ms into the trace and 30ms to 40ms.
Example:
The example workload is:
$ cat repeat-usleep.c
int usleep(useconds_t usec);
int usage(int ret, const char *msg)
{
if (msg)
fprintf(stderr, "%s\n", msg);
fprintf(stderr, "Usage is: repeat-usleep <microseconds>\n");
return ret;
}
int main(int argc, char *argv[])
{
unsigned long usecs;
char *end_ptr;
if (argc != 2)
return usage(1, "Error: Wrong number of arguments!");
errno = 0;
usecs = strtoul(argv[1], &end_ptr, 0);
if (errno || *end_ptr || usecs > UINT_MAX)
return usage(1, "Error: Invalid argument!");
while (1) {
int ret = usleep(usecs);
if (ret & errno != EINTR)
return usage(1, "Error: usleep() failed!");
}
return 0;
}
$ perf record -e intel_pt//u --delay 10-20,40-70,110-160 -- ./repeat-usleep 500
Events disabled
Events enabled
Events disabled
Events enabled
Events disabled
Events enabled
Events disabled
[ perf record: Woken up 5 times to write data ]
[ perf record: Captured and wrote 0.204 MB perf.data ]
Terminated
A dlfilter is used to determine continuous data collection (timestamps
less than 1ms apart):
$ cat dlfilter-show-delays.c
static __u64 start_time;
static __u64 last_time;
int start(void **data, void *ctx)
{
printf("%-17s\t%-9s\t%-6s\n", " Time", " Duration", " Delay");
return 0;
}
int filter_event_early(void *data, const struct perf_dlfilter_sample *sample, void *ctx)
{
__u64 delta;
if (!sample->time)
return 1;
if (!last_time)
goto out;
delta = sample->time - last_time;
if (delta < 1000000)
goto out2;;
printf("%17.9f\t%9.1f\t%6.1f\n", start_time / 1000000000.0, (last_time - start_time) / 1000000.0, delta / 1000000.0);
out:
start_time = sample->time;
out2:
last_time = sample->time;
return 1;
}
int stop(void *data, void *ctx)
{
printf("%17.9f\t%9.1f\n", start_time / 1000000000.0, (last_time - start_time) / 1000000.0);
return 0;
}
The result shows the times roughly match the --delay option:
$ perf script --itrace=qb --dlfilter dlfilter-show-delays.so
Time Duration Delay
39215.302317300 9.7 20.5
39215.332480217 30.4 40.9
39215.403837717 49.8
Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lore.kernel.org/r/20220824072814.16422-6-adrian.hunter@intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-08-24 15:28:14 +08:00
|
|
|
#include "record.h"
|
2013-08-14 20:48:24 +08:00
|
|
|
#include "debug.h"
|
2017-04-20 03:05:56 +08:00
|
|
|
#include "units.h"
|
2021-04-26 05:43:33 +08:00
|
|
|
#include "bpf_counter.h"
|
2019-08-06 21:25:25 +08:00
|
|
|
#include <internal/lib.h> // page_size
|
2019-11-21 08:15:17 +08:00
|
|
|
#include "affinity.h"
|
2019-08-30 02:20:59 +08:00
|
|
|
#include "../perf.h"
|
2016-07-14 16:34:42 +08:00
|
|
|
#include "asm/bug.h"
|
2019-03-12 13:30:50 +08:00
|
|
|
#include "bpf-event.h"
|
2022-10-28 04:44:07 +08:00
|
|
|
#include "util/event.h"
|
2019-10-15 07:10:50 +08:00
|
|
|
#include "util/string2.h"
|
2020-05-05 22:49:08 +08:00
|
|
|
#include "util/perf_api_probe.h"
|
perf tools: Add 'evlist' control command
Add a new 'evlist' control command to display all the evlist events.
When it is received, perf will scan and print current evlist into perf
record terminal.
The interface string for control file is:
evlist [-v|-g|-F]
The syntax follows perf evlist command:
-F Show just the sample frequency used for each event.
-v Show all fields.
-g Show event group information.
Example session:
terminal 1:
# mkfifo control ack
# perf record --control=fifo:control,ack -e '{cycles,instructions}'
terminal 2:
# echo evlist > control
terminal 1:
cycles
instructions
dummy:HG
terminal 2:
# echo 'evlist -v' > control
terminal 1:
cycles: size: 120, { sample_period, sample_freq }: 4000, sample_type: \
IP|TID|TIME|ID|CPU|PERIOD, read_format: ID, disabled: 1, inherit: 1, freq: 1, \
sample_id_all: 1, exclude_guest: 1
instructions: size: 120, config: 0x1, { sample_period, sample_freq }: 4000, \
sample_type: IP|TID|TIME|ID|CPU|PERIOD, read_format: ID, inherit: 1, freq: 1, \
sample_id_all: 1, exclude_guest: 1
dummy:HG: type: 1, size: 120, config: 0x9, { sample_period, sample_freq }: 4000, \
sample_type: IP|TID|TIME|ID|CPU|PERIOD, read_format: ID, inherit: 1, mmap: 1, \
comm: 1, freq: 1, task: 1, sample_id_all: 1, mmap2: 1, comm_exec: 1, ksymbol: 1, \
bpf_event: 1
terminal 2:
# echo 'evlist -g' > control
terminal 1:
{cycles,instructions}
dummy:HG
terminal 2:
# echo 'evlist -F' > control
terminal 1:
cycles: sample_freq=4000
instructions: sample_freq=4000
dummy:HG: sample_freq=4000
This new evlist command is handy to get real event names when
wildcards are used.
Adding evsel_fprintf.c object to python/perf.so build, because
it's now evlist.c dependency.
Adding PYTHON_PERF define for python/perf.so compilation, so we
can use it to compile in only evsel__fprintf from evsel_fprintf.c
object.
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexei Budankov <abudankov@huawei.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Michael Petlan <mpetlan@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lore.kernel.org/lkml/20201226232038.390883-3-jolsa@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-12-27 07:20:36 +08:00
|
|
|
#include "util/evsel_fprintf.h"
|
2021-04-27 15:01:26 +08:00
|
|
|
#include "util/evlist-hybrid.h"
|
perf tools: Enable on a list of CPUs for hybrid
The 'perf record' and 'perf stat' commands have supported the option
'-C/--cpus' to count or collect only on the list of CPUs provided. This
option needs to be supported for hybrid as well.
For hybrid support, it needs to check that the cpu list are available
on hybrid PMU. One example for AlderLake, cpu0-7 is 'cpu_core', cpu8-11
is 'cpu_atom'.
Before:
# perf stat -e cpu_core/cycles/ -C11 -- sleep 1
Performance counter stats for 'CPU(s) 11':
<not supported> cpu_core/cycles/
1.006179431 seconds time elapsed
The 'perf stat' command silently returned "<not supported>" without any
helpful information. It should error out pointing out that that cpu11
was not 'cpu_core'.
After:
# perf stat -e cpu_core/cycles/ -C11 -- sleep 1
WARNING: 11 isn't a 'cpu_core', please use a CPU list in the 'cpu_core' range (0-7)
failed to use cpu list 11
We also need to support the events without pmu prefix specified.
# perf stat -e cycles -C11 -- sleep 1
WARNING: 11 isn't a 'cpu_core', please use a CPU list in the 'cpu_core' range (0-7)
Performance counter stats for 'CPU(s) 11':
1,067,373 cpu_atom/cycles/
1.005544738 seconds time elapsed
The perf tool creates two cycles events automatically, cpu_core/cycles/ and
cpu_atom/cycles/. It checks that cpu11 is not 'cpu_core', then shows a warning
for cpu_core/cycles/ and only count the cpu_atom/cycles/.
If part of cpus are 'cpu_core' and part of cpus are 'cpu_atom', for example,
# perf stat -e cycles -C0,11 -- sleep 1
WARNING: use 0 in 'cpu_core' for 'cycles', skip other cpus in list.
WARNING: use 11 in 'cpu_atom' for 'cycles', skip other cpus in list.
Performance counter stats for 'CPU(s) 0,11':
1,914,704 cpu_core/cycles/
2,036,983 cpu_atom/cycles/
1.005815641 seconds time elapsed
It now automatically selects cpu0 for cpu_core/cycles/, selects cpu11 for
cpu_atom/cycles/, and output with some warnings.
Some more complex examples,
# perf stat -e cycles,instructions -C0,11 -- sleep 1
WARNING: use 0 in 'cpu_core' for 'cycles', skip other cpus in list.
WARNING: use 11 in 'cpu_atom' for 'cycles', skip other cpus in list.
WARNING: use 0 in 'cpu_core' for 'instructions', skip other cpus in list.
WARNING: use 11 in 'cpu_atom' for 'instructions', skip other cpus in list.
Performance counter stats for 'CPU(s) 0,11':
2,780,387 cpu_core/cycles/
1,583,432 cpu_atom/cycles/
3,957,277 cpu_core/instructions/
1,167,089 cpu_atom/instructions/
1.006005124 seconds time elapsed
# perf stat -e cycles,cpu_atom/instructions/ -C0,11 -- sleep 1
WARNING: use 0 in 'cpu_core' for 'cycles', skip other cpus in list.
WARNING: use 11 in 'cpu_atom' for 'cycles', skip other cpus in list.
WARNING: use 11 in 'cpu_atom' for 'cpu_atom/instructions/', skip other cpus in list.
Performance counter stats for 'CPU(s) 0,11':
3,290,301 cpu_core/cycles/
1,953,073 cpu_atom/cycles/
1,407,869 cpu_atom/instructions/
1.006260912 seconds time elapsed
Signed-off-by: Jin Yao <yao.jin@linux.intel.com>
Acked-by: Jiri Olsa <jolsa@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jin Yao <yao.jin@intel.com>
Cc: Kan Liang <kan.liang@intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: https //lore.kernel.org/r/20210723063433.7318-4-yao.jin@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2021-07-23 14:34:33 +08:00
|
|
|
#include "util/pmu.h"
|
2022-10-27 04:24:27 +08:00
|
|
|
#include "util/sample.h"
|
2017-04-20 02:49:18 +08:00
|
|
|
#include <signal.h>
|
2011-11-09 18:47:15 +08:00
|
|
|
#include <unistd.h>
|
perf evlist: Use unshare(CLONE_FS) in sb threads to let setns(CLONE_NEWNS) work
When we started using a thread to catch the PERF_RECORD_BPF_EVENT meta
data events to then ask the kernel for further info (BTF, etc) for BPF
programs shortly after they get loaded, we forgot to use
unshare(CLONE_FS) as was done in:
868a832918f6 ("perf top: Support lookup of symbols in other mount namespaces.")
Do it so that we can enter the namespaces to read the build-ids at the
end of a 'perf record' session for the DSOs that had hits.
Before:
Starting a 'stress-ng --cpus 8' inside a container and then, outside the
container running:
# perf record -a --namespaces sleep 5
# perf buildid-list | grep stress-ng
#
We would end up with a 'perf.data' file that had no entry in its
build-id table for the /usr/bin/stress-ng binary inside the container
that got tons of PERF_RECORD_SAMPLEs.
After:
# perf buildid-list | grep stress-ng
f2ed02c68341183a124b9b0f6e2e6c493c465b29 /usr/bin/stress-ng
#
Then its just a matter of making sure that that binary debuginfo package
gets available in a place that 'perf report' will look at build-id keyed
ELF files, which, in my case, on a f30 notebook, was a matter of
installing the debuginfo file for the distro used in the container,
fedora 31:
# rpm -ivh http://fedora.c3sl.ufpr.br/linux/development/31/Everything/x86_64/debug/tree/Packages/s/stress-ng-debuginfo-0.07.29-10.fc31.x86_64.rpm
Then, because perf currently looks for those debuginfo files (richer ELF
symtab) inside that namespace (look at the setns calls):
openat(AT_FDCWD, "/proc/self/ns/mnt", O_RDONLY) = 137
openat(AT_FDCWD, "/proc/13169/ns/mnt", O_RDONLY) = 139
setns(139, CLONE_NEWNS) = 0
stat("/usr/bin/stress-ng", {st_mode=S_IFREG|0755, st_size=3065416, ...}) = 0
openat(AT_FDCWD, "/usr/bin/stress-ng", O_RDONLY) = 140
fcntl(140, F_GETFD) = 0
fstat(140, {st_mode=S_IFREG|0755, st_size=3065416, ...}) = 0
mmap(NULL, 3065416, PROT_READ, MAP_PRIVATE, 140, 0) = 0x7ff2fdc5b000
munmap(0x7ff2fdc5b000, 3065416) = 0
close(140) = 0
stat("stress-ng-0.07.29-10.fc31.x86_64.debug", 0x7fff45d71260) = -1 ENOENT (No such file or directory)
stat("/usr/bin/stress-ng-0.07.29-10.fc31.x86_64.debug", 0x7fff45d71260) = -1 ENOENT (No such file or directory)
stat("/usr/bin/.debug/stress-ng-0.07.29-10.fc31.x86_64.debug", 0x7fff45d71260) = -1 ENOENT (No such file or directory)
stat("/usr/lib/debug/usr/bin/stress-ng-0.07.29-10.fc31.x86_64.debug", 0x7fff45d71260) = -1 ENOENT (No such file or directory)
stat("/root/.debug/.build-id/f2/ed02c68341183a124b9b0f6e2e6c493c465b29", 0x7fff45d711e0) = -1 ENOENT (No such file or directory)
To only then go back to the "host" namespace to look just in the users's
~/.debug cache:
setns(137, CLONE_NEWNS) = 0
chdir("/root") = 0
close(137) = 0
close(139) = 0
stat("/root/.debug/.build-id/f2/ed02c68341183a124b9b0f6e2e6c493c465b29/elf", 0x7fff45d732e0) = -1 ENOENT (No such file or directory)
It continues to fail to resolve symbols:
# perf report | grep stress-ng | head -5
9.50% stress-ng-cpu stress-ng [.] 0x0000000000021ac1
8.58% stress-ng-cpu stress-ng [.] 0x0000000000021ab4
8.51% stress-ng-cpu stress-ng [.] 0x0000000000021489
7.17% stress-ng-cpu stress-ng [.] 0x00000000000219b6
3.93% stress-ng-cpu stress-ng [.] 0x0000000000021478
#
To overcome that we use:
# perf buildid-cache -v --add /usr/lib/debug/usr/bin/stress-ng-0.07.29-10.fc31.x86_64.debug
Adding f2ed02c68341183a124b9b0f6e2e6c493c465b29 /usr/lib/debug/usr/bin/stress-ng-0.07.29-10.fc31.x86_64.debug: Ok
#
# ls -la /root/.debug/.build-id/f2/ed02c68341183a124b9b0f6e2e6c493c465b29/elf
-rw-r--r--. 3 root root 2401184 Jul 27 07:03 /root/.debug/.build-id/f2/ed02c68341183a124b9b0f6e2e6c493c465b29/elf
# file /root/.debug/.build-id/f2/ed02c68341183a124b9b0f6e2e6c493c465b29/elf
/root/.debug/.build-id/f2/ed02c68341183a124b9b0f6e2e6c493c465b29/elf: ELF 64-bit LSB shared object, x86-64, version 1 (SYSV), dynamically linked, interpreter \004, BuildID[sha1]=f2ed02c68341183a124b9b0f6e2e6c493c465b29, for GNU/Linux 3.2.0, with debug_info, not stripped, too many notes (256)
#
Now it finally works:
# perf report | grep stress-ng | head -5
23.59% stress-ng-cpu stress-ng [.] ackermann
23.33% stress-ng-cpu stress-ng [.] is_prime
17.36% stress-ng-cpu stress-ng [.] stress_cpu_sieve
6.08% stress-ng-cpu stress-ng [.] stress_cpu_correlate
3.55% stress-ng-cpu stress-ng [.] queens_try
#
I'll make sure that it looks for the build-id keyed files in both the
"host" namespace (the namespace the user running 'perf record' was a the
time of the recording) and in the container namespace, as it shouldn't
matter where a content based key lookup finds the ELF file to use in
resolving symbols, etc.
Reported-by: Karl Rister <krister@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Brendan Gregg <brendan.d.gregg@gmail.com>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: Krister Johansen <kjlx@templeofstupid.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Song Liu <songliubraving@fb.com>
Cc: Stanislav Fomichev <sdf@google.com>
Cc: Thomas-Mich Richter <tmricht@linux.vnet.ibm.com>
Fixes: 657ee5531903 ("perf evlist: Introduce side band thread")
Link: https://lkml.kernel.org/n/tip-g79k0jz41adiaeuqud742t2l@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-08-29 03:48:50 +08:00
|
|
|
#include <sched.h>
|
2019-08-31 01:45:20 +08:00
|
|
|
#include <stdlib.h>
|
2011-01-12 06:56:53 +08:00
|
|
|
|
2011-11-04 19:10:59 +08:00
|
|
|
#include "parse-events.h"
|
2015-12-15 23:39:39 +08:00
|
|
|
#include <subcmd/parse-options.h>
|
2011-11-04 19:10:59 +08:00
|
|
|
|
2018-01-23 03:42:16 +08:00
|
|
|
#include <fcntl.h>
|
2017-04-20 06:03:14 +08:00
|
|
|
#include <sys/ioctl.h>
|
2011-01-30 20:46:46 +08:00
|
|
|
#include <sys/mman.h>
|
2021-03-15 21:13:22 +08:00
|
|
|
#include <sys/prctl.h>
|
perf record: Allow multiple recording time ranges
AUX area traces can produce too much data to record successfully or
analyze subsequently. Add another means to reduce data collection by
allowing multiple recording time ranges.
This is useful, for instance, in cases where a workload produces
predictably reproducible events in specific time ranges.
Today we only have perf record -D <msecs> to start at a specific region, or
some complicated approach using snapshot mode and external scripts sending
signals or using the fifos. But these approaches are difficult to set up
compared with simply having perf do it.
Extend perf record option -D/--delay option to specifying relative time
stamps for start stop controlled by perf with the right time offset, for
instance:
perf record -e intel_pt// -D 10-20,30-40
to record 10ms to 20ms into the trace and 30ms to 40ms.
Example:
The example workload is:
$ cat repeat-usleep.c
int usleep(useconds_t usec);
int usage(int ret, const char *msg)
{
if (msg)
fprintf(stderr, "%s\n", msg);
fprintf(stderr, "Usage is: repeat-usleep <microseconds>\n");
return ret;
}
int main(int argc, char *argv[])
{
unsigned long usecs;
char *end_ptr;
if (argc != 2)
return usage(1, "Error: Wrong number of arguments!");
errno = 0;
usecs = strtoul(argv[1], &end_ptr, 0);
if (errno || *end_ptr || usecs > UINT_MAX)
return usage(1, "Error: Invalid argument!");
while (1) {
int ret = usleep(usecs);
if (ret & errno != EINTR)
return usage(1, "Error: usleep() failed!");
}
return 0;
}
$ perf record -e intel_pt//u --delay 10-20,40-70,110-160 -- ./repeat-usleep 500
Events disabled
Events enabled
Events disabled
Events enabled
Events disabled
Events enabled
Events disabled
[ perf record: Woken up 5 times to write data ]
[ perf record: Captured and wrote 0.204 MB perf.data ]
Terminated
A dlfilter is used to determine continuous data collection (timestamps
less than 1ms apart):
$ cat dlfilter-show-delays.c
static __u64 start_time;
static __u64 last_time;
int start(void **data, void *ctx)
{
printf("%-17s\t%-9s\t%-6s\n", " Time", " Duration", " Delay");
return 0;
}
int filter_event_early(void *data, const struct perf_dlfilter_sample *sample, void *ctx)
{
__u64 delta;
if (!sample->time)
return 1;
if (!last_time)
goto out;
delta = sample->time - last_time;
if (delta < 1000000)
goto out2;;
printf("%17.9f\t%9.1f\t%6.1f\n", start_time / 1000000000.0, (last_time - start_time) / 1000000.0, delta / 1000000.0);
out:
start_time = sample->time;
out2:
last_time = sample->time;
return 1;
}
int stop(void *data, void *ctx)
{
printf("%17.9f\t%9.1f\n", start_time / 1000000000.0, (last_time - start_time) / 1000000.0);
return 0;
}
The result shows the times roughly match the --delay option:
$ perf script --itrace=qb --dlfilter dlfilter-show-delays.so
Time Duration Delay
39215.302317300 9.7 20.5
39215.332480217 30.4 40.9
39215.403837717 49.8
Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lore.kernel.org/r/20220824072814.16422-6-adrian.hunter@intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-08-24 15:28:14 +08:00
|
|
|
#include <sys/timerfd.h>
|
2011-01-30 20:46:46 +08:00
|
|
|
|
2011-01-13 08:39:13 +08:00
|
|
|
#include <linux/bitops.h>
|
|
|
|
#include <linux/hash.h>
|
2014-12-16 03:04:11 +08:00
|
|
|
#include <linux/log2.h>
|
2015-09-07 16:38:06 +08:00
|
|
|
#include <linux/err.h>
|
2019-08-30 03:18:59 +08:00
|
|
|
#include <linux/string.h>
|
perf record: Allow multiple recording time ranges
AUX area traces can produce too much data to record successfully or
analyze subsequently. Add another means to reduce data collection by
allowing multiple recording time ranges.
This is useful, for instance, in cases where a workload produces
predictably reproducible events in specific time ranges.
Today we only have perf record -D <msecs> to start at a specific region, or
some complicated approach using snapshot mode and external scripts sending
signals or using the fifos. But these approaches are difficult to set up
compared with simply having perf do it.
Extend perf record option -D/--delay option to specifying relative time
stamps for start stop controlled by perf with the right time offset, for
instance:
perf record -e intel_pt// -D 10-20,30-40
to record 10ms to 20ms into the trace and 30ms to 40ms.
Example:
The example workload is:
$ cat repeat-usleep.c
int usleep(useconds_t usec);
int usage(int ret, const char *msg)
{
if (msg)
fprintf(stderr, "%s\n", msg);
fprintf(stderr, "Usage is: repeat-usleep <microseconds>\n");
return ret;
}
int main(int argc, char *argv[])
{
unsigned long usecs;
char *end_ptr;
if (argc != 2)
return usage(1, "Error: Wrong number of arguments!");
errno = 0;
usecs = strtoul(argv[1], &end_ptr, 0);
if (errno || *end_ptr || usecs > UINT_MAX)
return usage(1, "Error: Invalid argument!");
while (1) {
int ret = usleep(usecs);
if (ret & errno != EINTR)
return usage(1, "Error: usleep() failed!");
}
return 0;
}
$ perf record -e intel_pt//u --delay 10-20,40-70,110-160 -- ./repeat-usleep 500
Events disabled
Events enabled
Events disabled
Events enabled
Events disabled
Events enabled
Events disabled
[ perf record: Woken up 5 times to write data ]
[ perf record: Captured and wrote 0.204 MB perf.data ]
Terminated
A dlfilter is used to determine continuous data collection (timestamps
less than 1ms apart):
$ cat dlfilter-show-delays.c
static __u64 start_time;
static __u64 last_time;
int start(void **data, void *ctx)
{
printf("%-17s\t%-9s\t%-6s\n", " Time", " Duration", " Delay");
return 0;
}
int filter_event_early(void *data, const struct perf_dlfilter_sample *sample, void *ctx)
{
__u64 delta;
if (!sample->time)
return 1;
if (!last_time)
goto out;
delta = sample->time - last_time;
if (delta < 1000000)
goto out2;;
printf("%17.9f\t%9.1f\t%6.1f\n", start_time / 1000000000.0, (last_time - start_time) / 1000000.0, delta / 1000000.0);
out:
start_time = sample->time;
out2:
last_time = sample->time;
return 1;
}
int stop(void *data, void *ctx)
{
printf("%17.9f\t%9.1f\n", start_time / 1000000000.0, (last_time - start_time) / 1000000.0);
return 0;
}
The result shows the times roughly match the --delay option:
$ perf script --itrace=qb --dlfilter dlfilter-show-delays.so
Time Duration Delay
39215.302317300 9.7 20.5
39215.332480217 30.4 40.9
39215.403837717 49.8
Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lore.kernel.org/r/20220824072814.16422-6-adrian.hunter@intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-08-24 15:28:14 +08:00
|
|
|
#include <linux/time64.h>
|
2019-07-04 22:32:27 +08:00
|
|
|
#include <linux/zalloc.h>
|
libperf: Add perf_evlist__init() function
Add the perf_evlist__init() function to initialize a perf_evlist struct.
Committer testing:
Fix a change in init ordering that was causing this backtrace:
(gdb) run stat sleep 1
Starting program: /root/bin/perf stat sleep 1
Program received signal SIGSEGV, Segmentation fault.
0x00000000004f6b55 in __perf_evlist__propagate_maps (evlist=0xbb34c0, evsel=0x0) at util/evlist.c:161
161 if (!evsel->own_cpus || evlist->has_user_cpus) {
Missing separate debuginfos, use: dnf debuginfo-install bzip2-libs-1.0.6-29.fc30.x86_64 elfutils-libelf-0.176-3.fc30.x86_64 elfutils-libs-0.176-3.fc30.x86_64 glib2-2.60.4-1.fc30.x86_64 libbabeltrace-1.5.6-2.fc30.x86_64 libgcc-9.1.1-1.fc30.x86_64 libunwind-1.3.1-2.fc30.x86_64 libuuid-2.33.2-1.fc30.x86_64 libxcrypt-4.4.6-2.fc30.x86_64 libzstd-1.4.0-1.fc30.x86_64 numactl-libs-2.0.12-2.fc30.x86_64 pcre-8.43-2.fc30.x86_64 perl-libs-5.28.2-436.fc30.x86_64 popt-1.16-17.fc30.x86_64 python2-libs-2.7.16-2.fc30.x86_64 slang-2.3.2-5.fc30.x86_64 xz-libs-5.2.4-5.fc30.x86_64 zlib-1.2.11-15.fc30.x86_64
(gdb) bt
#0 0x00000000004f6b55 in __perf_evlist__propagate_maps (evlist=0xbb34c0, evsel=0x0) at util/evlist.c:161
#1 0x00000000004f6c7a in perf_evlist__propagate_maps (evlist=0xbb34c0) at util/evlist.c:178
#2 0x00000000004f955e in perf_evlist__set_maps (evlist=0xbb34c0, cpus=0x0, threads=0x0) at util/evlist.c:1128
#3 0x00000000004f66f8 in evlist__init (evlist=0xbb34c0, cpus=0x0, threads=0x0) at util/evlist.c:52
#4 0x00000000004f6790 in evlist__new () at util/evlist.c:64
#5 0x0000000000456071 in cmd_stat (argc=3, argv=0x7fffffffd670) at builtin-stat.c:1705
#6 0x00000000004dd0fa in run_builtin (p=0xa21e00 <commands+288>, argc=3, argv=0x7fffffffd670) at perf.c:304
#7 0x00000000004dd367 in handle_internal_command (argc=3, argv=0x7fffffffd670) at perf.c:356
#8 0x00000000004dd4ae in run_argv (argcp=0x7fffffffd4cc, argv=0x7fffffffd4c0) at perf.c:400
#9 0x00000000004dd81a in main (argc=3, argv=0x7fffffffd670) at perf.c:522
(gdb) bt
So move the initialization of the core evlist (calling
perf_evlist__init()) to before perf_evlist__set_maps() in
evlist__init().
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexey Budankov <alexey.budankov@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Michael Petlan <mpetlan@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20190721112506.12306-39-jolsa@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-07-21 19:24:25 +08:00
|
|
|
#include <perf/evlist.h>
|
2019-07-21 19:24:50 +08:00
|
|
|
#include <perf/evsel.h>
|
2019-07-21 19:24:30 +08:00
|
|
|
#include <perf/cpumap.h>
|
2019-10-07 20:53:17 +08:00
|
|
|
#include <perf/mmap.h>
|
2011-01-13 08:39:13 +08:00
|
|
|
|
2019-08-22 00:58:12 +08:00
|
|
|
#include <internal/xyarray.h>
|
|
|
|
|
2018-12-12 02:48:47 +08:00
|
|
|
#ifdef LACKS_SIGQUEUE_PROTOTYPE
|
|
|
|
int sigqueue(pid_t pid, int sig, const union sigval value);
|
|
|
|
#endif
|
|
|
|
|
2019-07-21 19:24:45 +08:00
|
|
|
#define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y))
|
2019-09-03 04:04:12 +08:00
|
|
|
#define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y)
|
2011-01-30 20:46:46 +08:00
|
|
|
|
2019-07-21 19:23:54 +08:00
|
|
|
void evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus,
|
|
|
|
struct perf_thread_map *threads)
|
2011-01-19 07:41:45 +08:00
|
|
|
{
|
libperf: Add perf_evlist__init() function
Add the perf_evlist__init() function to initialize a perf_evlist struct.
Committer testing:
Fix a change in init ordering that was causing this backtrace:
(gdb) run stat sleep 1
Starting program: /root/bin/perf stat sleep 1
Program received signal SIGSEGV, Segmentation fault.
0x00000000004f6b55 in __perf_evlist__propagate_maps (evlist=0xbb34c0, evsel=0x0) at util/evlist.c:161
161 if (!evsel->own_cpus || evlist->has_user_cpus) {
Missing separate debuginfos, use: dnf debuginfo-install bzip2-libs-1.0.6-29.fc30.x86_64 elfutils-libelf-0.176-3.fc30.x86_64 elfutils-libs-0.176-3.fc30.x86_64 glib2-2.60.4-1.fc30.x86_64 libbabeltrace-1.5.6-2.fc30.x86_64 libgcc-9.1.1-1.fc30.x86_64 libunwind-1.3.1-2.fc30.x86_64 libuuid-2.33.2-1.fc30.x86_64 libxcrypt-4.4.6-2.fc30.x86_64 libzstd-1.4.0-1.fc30.x86_64 numactl-libs-2.0.12-2.fc30.x86_64 pcre-8.43-2.fc30.x86_64 perl-libs-5.28.2-436.fc30.x86_64 popt-1.16-17.fc30.x86_64 python2-libs-2.7.16-2.fc30.x86_64 slang-2.3.2-5.fc30.x86_64 xz-libs-5.2.4-5.fc30.x86_64 zlib-1.2.11-15.fc30.x86_64
(gdb) bt
#0 0x00000000004f6b55 in __perf_evlist__propagate_maps (evlist=0xbb34c0, evsel=0x0) at util/evlist.c:161
#1 0x00000000004f6c7a in perf_evlist__propagate_maps (evlist=0xbb34c0) at util/evlist.c:178
#2 0x00000000004f955e in perf_evlist__set_maps (evlist=0xbb34c0, cpus=0x0, threads=0x0) at util/evlist.c:1128
#3 0x00000000004f66f8 in evlist__init (evlist=0xbb34c0, cpus=0x0, threads=0x0) at util/evlist.c:52
#4 0x00000000004f6790 in evlist__new () at util/evlist.c:64
#5 0x0000000000456071 in cmd_stat (argc=3, argv=0x7fffffffd670) at builtin-stat.c:1705
#6 0x00000000004dd0fa in run_builtin (p=0xa21e00 <commands+288>, argc=3, argv=0x7fffffffd670) at perf.c:304
#7 0x00000000004dd367 in handle_internal_command (argc=3, argv=0x7fffffffd670) at perf.c:356
#8 0x00000000004dd4ae in run_argv (argcp=0x7fffffffd4cc, argv=0x7fffffffd4c0) at perf.c:400
#9 0x00000000004dd81a in main (argc=3, argv=0x7fffffffd670) at perf.c:522
(gdb) bt
So move the initialization of the core evlist (calling
perf_evlist__init()) to before perf_evlist__set_maps() in
evlist__init().
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexey Budankov <alexey.budankov@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Michael Petlan <mpetlan@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20190721112506.12306-39-jolsa@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-07-21 19:24:25 +08:00
|
|
|
perf_evlist__init(&evlist->core);
|
2019-07-21 19:24:43 +08:00
|
|
|
perf_evlist__set_maps(&evlist->core, cpus, threads);
|
2011-11-09 18:47:15 +08:00
|
|
|
evlist->workload.pid = -1;
|
2016-07-14 16:34:42 +08:00
|
|
|
evlist->bkw_mmap_state = BKW_MMAP_NOTREADY;
|
2020-07-17 15:00:47 +08:00
|
|
|
evlist->ctl_fd.fd = -1;
|
|
|
|
evlist->ctl_fd.ack = -1;
|
|
|
|
evlist->ctl_fd.pos = -1;
|
2011-01-19 07:41:45 +08:00
|
|
|
}
|
|
|
|
|
2019-07-21 19:23:55 +08:00
|
|
|
struct evlist *evlist__new(void)
|
2011-01-12 06:56:53 +08:00
|
|
|
{
|
2019-07-21 19:23:52 +08:00
|
|
|
struct evlist *evlist = zalloc(sizeof(*evlist));
|
2011-01-12 06:56:53 +08:00
|
|
|
|
2011-01-19 07:41:45 +08:00
|
|
|
if (evlist != NULL)
|
2019-07-21 19:23:54 +08:00
|
|
|
evlist__init(evlist, NULL, NULL);
|
2011-01-12 06:56:53 +08:00
|
|
|
|
|
|
|
return evlist;
|
|
|
|
}
|
|
|
|
|
2020-12-01 02:04:05 +08:00
|
|
|
struct evlist *evlist__new_default(void)
|
2013-09-01 18:36:14 +08:00
|
|
|
{
|
2019-07-21 19:23:55 +08:00
|
|
|
struct evlist *evlist = evlist__new();
|
2013-09-01 18:36:14 +08:00
|
|
|
|
2020-06-17 20:16:20 +08:00
|
|
|
if (evlist && evlist__add_default(evlist)) {
|
2019-07-21 19:23:56 +08:00
|
|
|
evlist__delete(evlist);
|
2013-09-01 18:36:14 +08:00
|
|
|
evlist = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return evlist;
|
|
|
|
}
|
|
|
|
|
2020-12-01 02:04:05 +08:00
|
|
|
struct evlist *evlist__new_dummy(void)
|
2016-01-08 00:14:56 +08:00
|
|
|
{
|
2019-07-21 19:23:55 +08:00
|
|
|
struct evlist *evlist = evlist__new();
|
2016-01-08 00:14:56 +08:00
|
|
|
|
2020-06-17 20:16:20 +08:00
|
|
|
if (evlist && evlist__add_dummy(evlist)) {
|
2019-07-21 19:23:56 +08:00
|
|
|
evlist__delete(evlist);
|
2016-01-08 00:14:56 +08:00
|
|
|
evlist = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return evlist;
|
|
|
|
}
|
|
|
|
|
2013-08-27 16:23:09 +08:00
|
|
|
/**
|
2020-12-01 01:44:40 +08:00
|
|
|
* evlist__set_id_pos - set the positions of event ids.
|
2013-08-27 16:23:09 +08:00
|
|
|
* @evlist: selected event list
|
|
|
|
*
|
|
|
|
* Events with compatible sample types all have the same id_pos
|
|
|
|
* and is_pos. For convenience, put a copy on evlist.
|
|
|
|
*/
|
2020-12-01 01:44:40 +08:00
|
|
|
void evlist__set_id_pos(struct evlist *evlist)
|
2013-08-27 16:23:09 +08:00
|
|
|
{
|
2019-09-03 16:39:52 +08:00
|
|
|
struct evsel *first = evlist__first(evlist);
|
2013-08-27 16:23:09 +08:00
|
|
|
|
|
|
|
evlist->id_pos = first->id_pos;
|
|
|
|
evlist->is_pos = first->is_pos;
|
|
|
|
}
|
|
|
|
|
2020-12-01 01:44:40 +08:00
|
|
|
static void evlist__update_id_pos(struct evlist *evlist)
|
2013-09-07 03:40:11 +08:00
|
|
|
{
|
2019-07-21 19:23:51 +08:00
|
|
|
struct evsel *evsel;
|
2013-09-07 03:40:11 +08:00
|
|
|
|
2016-06-23 22:26:15 +08:00
|
|
|
evlist__for_each_entry(evlist, evsel)
|
2020-04-30 02:58:40 +08:00
|
|
|
evsel__calc_id_pos(evsel);
|
2013-09-07 03:40:11 +08:00
|
|
|
|
2020-12-01 01:44:40 +08:00
|
|
|
evlist__set_id_pos(evlist);
|
2013-09-07 03:40:11 +08:00
|
|
|
}
|
|
|
|
|
2019-09-05 16:11:37 +08:00
|
|
|
static void evlist__purge(struct evlist *evlist)
|
2011-01-12 06:56:53 +08:00
|
|
|
{
|
2019-07-21 19:23:51 +08:00
|
|
|
struct evsel *pos, *n;
|
2011-01-12 06:56:53 +08:00
|
|
|
|
2016-06-23 22:26:15 +08:00
|
|
|
evlist__for_each_entry_safe(evlist, n, pos) {
|
2019-07-21 19:24:22 +08:00
|
|
|
list_del_init(&pos->core.node);
|
2015-08-27 20:07:40 +08:00
|
|
|
pos->evlist = NULL;
|
2019-07-21 19:23:57 +08:00
|
|
|
evsel__delete(pos);
|
2011-01-12 06:56:53 +08:00
|
|
|
}
|
|
|
|
|
2019-07-21 19:24:28 +08:00
|
|
|
evlist->core.nr_entries = 0;
|
2011-01-12 06:56:53 +08:00
|
|
|
}
|
|
|
|
|
2019-09-02 20:34:52 +08:00
|
|
|
void evlist__exit(struct evlist *evlist)
|
2011-01-12 06:56:53 +08:00
|
|
|
{
|
perf record: Allow multiple recording time ranges
AUX area traces can produce too much data to record successfully or
analyze subsequently. Add another means to reduce data collection by
allowing multiple recording time ranges.
This is useful, for instance, in cases where a workload produces
predictably reproducible events in specific time ranges.
Today we only have perf record -D <msecs> to start at a specific region, or
some complicated approach using snapshot mode and external scripts sending
signals or using the fifos. But these approaches are difficult to set up
compared with simply having perf do it.
Extend perf record option -D/--delay option to specifying relative time
stamps for start stop controlled by perf with the right time offset, for
instance:
perf record -e intel_pt// -D 10-20,30-40
to record 10ms to 20ms into the trace and 30ms to 40ms.
Example:
The example workload is:
$ cat repeat-usleep.c
int usleep(useconds_t usec);
int usage(int ret, const char *msg)
{
if (msg)
fprintf(stderr, "%s\n", msg);
fprintf(stderr, "Usage is: repeat-usleep <microseconds>\n");
return ret;
}
int main(int argc, char *argv[])
{
unsigned long usecs;
char *end_ptr;
if (argc != 2)
return usage(1, "Error: Wrong number of arguments!");
errno = 0;
usecs = strtoul(argv[1], &end_ptr, 0);
if (errno || *end_ptr || usecs > UINT_MAX)
return usage(1, "Error: Invalid argument!");
while (1) {
int ret = usleep(usecs);
if (ret & errno != EINTR)
return usage(1, "Error: usleep() failed!");
}
return 0;
}
$ perf record -e intel_pt//u --delay 10-20,40-70,110-160 -- ./repeat-usleep 500
Events disabled
Events enabled
Events disabled
Events enabled
Events disabled
Events enabled
Events disabled
[ perf record: Woken up 5 times to write data ]
[ perf record: Captured and wrote 0.204 MB perf.data ]
Terminated
A dlfilter is used to determine continuous data collection (timestamps
less than 1ms apart):
$ cat dlfilter-show-delays.c
static __u64 start_time;
static __u64 last_time;
int start(void **data, void *ctx)
{
printf("%-17s\t%-9s\t%-6s\n", " Time", " Duration", " Delay");
return 0;
}
int filter_event_early(void *data, const struct perf_dlfilter_sample *sample, void *ctx)
{
__u64 delta;
if (!sample->time)
return 1;
if (!last_time)
goto out;
delta = sample->time - last_time;
if (delta < 1000000)
goto out2;;
printf("%17.9f\t%9.1f\t%6.1f\n", start_time / 1000000000.0, (last_time - start_time) / 1000000.0, delta / 1000000.0);
out:
start_time = sample->time;
out2:
last_time = sample->time;
return 1;
}
int stop(void *data, void *ctx)
{
printf("%17.9f\t%9.1f\n", start_time / 1000000000.0, (last_time - start_time) / 1000000.0);
return 0;
}
The result shows the times roughly match the --delay option:
$ perf script --itrace=qb --dlfilter dlfilter-show-delays.so
Time Duration Delay
39215.302317300 9.7 20.5
39215.332480217 30.4 40.9
39215.403837717 49.8
Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lore.kernel.org/r/20220824072814.16422-6-adrian.hunter@intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-08-24 15:28:14 +08:00
|
|
|
event_enable_timer__exit(&evlist->eet);
|
2013-12-27 04:41:15 +08:00
|
|
|
zfree(&evlist->mmap);
|
2017-12-05 00:51:07 +08:00
|
|
|
zfree(&evlist->overwrite_mmap);
|
2019-10-07 20:53:32 +08:00
|
|
|
perf_evlist__exit(&evlist->core);
|
2011-01-19 07:41:45 +08:00
|
|
|
}
|
|
|
|
|
2019-07-21 19:23:56 +08:00
|
|
|
void evlist__delete(struct evlist *evlist)
|
2011-01-19 07:41:45 +08:00
|
|
|
{
|
2016-06-22 05:15:45 +08:00
|
|
|
if (evlist == NULL)
|
|
|
|
return;
|
|
|
|
|
2019-08-16 22:19:55 +08:00
|
|
|
evlist__munmap(evlist);
|
2019-07-21 19:24:07 +08:00
|
|
|
evlist__close(evlist);
|
2019-09-05 16:11:37 +08:00
|
|
|
evlist__purge(evlist);
|
2019-09-02 20:34:52 +08:00
|
|
|
evlist__exit(evlist);
|
2011-01-12 06:56:53 +08:00
|
|
|
free(evlist);
|
|
|
|
}
|
|
|
|
|
2019-07-21 19:23:59 +08:00
|
|
|
void evlist__add(struct evlist *evlist, struct evsel *entry)
|
2011-01-12 06:56:53 +08:00
|
|
|
{
|
2019-07-21 19:24:28 +08:00
|
|
|
perf_evlist__add(&evlist->core, &entry->core);
|
2021-07-06 23:16:59 +08:00
|
|
|
entry->evlist = evlist;
|
|
|
|
entry->tracking = !entry->core.idx;
|
2019-07-21 19:24:28 +08:00
|
|
|
|
|
|
|
if (evlist->core.nr_entries == 1)
|
2020-12-01 01:44:40 +08:00
|
|
|
evlist__set_id_pos(evlist);
|
2011-01-12 06:56:53 +08:00
|
|
|
}
|
|
|
|
|
2019-07-21 19:24:00 +08:00
|
|
|
void evlist__remove(struct evlist *evlist, struct evsel *evsel)
|
2015-09-25 21:15:53 +08:00
|
|
|
{
|
|
|
|
evsel->evlist = NULL;
|
2019-07-21 19:24:27 +08:00
|
|
|
perf_evlist__remove(&evlist->core, &evsel->core);
|
2015-09-25 21:15:53 +08:00
|
|
|
}
|
|
|
|
|
2020-12-01 01:52:44 +08:00
|
|
|
void evlist__splice_list_tail(struct evlist *evlist, struct list_head *list)
|
2011-11-04 19:10:59 +08:00
|
|
|
{
|
2020-12-04 19:10:11 +08:00
|
|
|
while (!list_empty(list)) {
|
|
|
|
struct evsel *evsel, *temp, *leader = NULL;
|
|
|
|
|
|
|
|
__evlist__for_each_entry_safe(list, temp, evsel) {
|
|
|
|
list_del_init(&evsel->core.node);
|
|
|
|
evlist__add(evlist, evsel);
|
|
|
|
leader = evsel;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
__evlist__for_each_entry_safe(list, temp, evsel) {
|
2021-07-06 23:17:00 +08:00
|
|
|
if (evsel__has_leader(evsel, leader)) {
|
2020-12-04 19:10:11 +08:00
|
|
|
list_del_init(&evsel->core.node);
|
|
|
|
evlist__add(evlist, evsel);
|
|
|
|
}
|
|
|
|
}
|
2015-09-08 15:58:53 +08:00
|
|
|
}
|
2011-11-04 19:10:59 +08:00
|
|
|
}
|
|
|
|
|
2019-10-01 22:14:26 +08:00
|
|
|
int __evlist__set_tracepoints_handlers(struct evlist *evlist,
|
|
|
|
const struct evsel_str_handler *assocs, size_t nr_assocs)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
for (i = 0; i < nr_assocs; i++) {
|
|
|
|
// Adding a handler for an event not in this evlist, just ignore it.
|
2020-11-30 20:48:07 +08:00
|
|
|
struct evsel *evsel = evlist__find_tracepoint_by_name(evlist, assocs[i].name);
|
2019-10-01 22:14:26 +08:00
|
|
|
if (evsel == NULL)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
err = -EEXIST;
|
|
|
|
if (evsel->handler != NULL)
|
|
|
|
goto out;
|
|
|
|
evsel->handler = assocs[i].handler;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = 0;
|
|
|
|
out:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-11-30 20:22:07 +08:00
|
|
|
void evlist__set_leader(struct evlist *evlist)
|
perf tools: Enable grouping logic for parsed events
This patch adds a functionality that allows to create event groups
based on the way they are specified on the command line. Adding
functionality to the '{}' group syntax introduced in earlier patch.
The current '--group/-g' option behaviour remains intact. If you
specify it for record/stat/top command, all the specified events
become members of a single group with the first event as a group
leader.
With the new '{}' group syntax you can create group like:
# perf record -e '{cycles,faults}' ls
resulting in single event group containing 'cycles' and 'faults'
events, with cycles event as group leader.
All groups are created with regards to threads and cpus. Thus
recording an event group within a 2 threads on server with
4 CPUs will create 8 separate groups.
Examples (first event in brackets is group leader):
# 1 group (cpu-clock,task-clock)
perf record --group -e cpu-clock,task-clock ls
perf record -e '{cpu-clock,task-clock}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock},{minor-faults,major-faults}' ls
# 1 group (cpu-clock,task-clock,minor-faults,major-faults)
perf record --group -e cpu-clock,task-clock -e minor-faults,major-faults ls
perf record -e '{cpu-clock,task-clock,minor-faults,major-faults}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock} -e '{minor-faults,major-faults}' \
-e instructions ls
# 1 group
# (cpu-clock,task-clock,minor-faults,major-faults,instructions)
perf record --group -e cpu-clock,task-clock \
-e minor-faults,major-faults -e instructions ls perf record -e
'{cpu-clock,task-clock,minor-faults,major-faults,instructions}' ls
It's possible to use standard event modifier for a group, which spans
over all events in the group and updates each event modifier settings,
for example:
# perf record -r '{faults:k,cache-references}:p'
resulting in ':kp' modifier being used for 'faults' and ':p' modifier
being used for 'cache-references' event.
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ulrich Drepper <drepper@gmail.com>
Link: http://lkml.kernel.org/n/tip-ho42u0wcr8mn1otkalqi13qp@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-08-08 18:22:36 +08:00
|
|
|
{
|
2021-07-06 23:17:02 +08:00
|
|
|
perf_evlist__set_leader(&evlist->core);
|
perf tools: Enable grouping logic for parsed events
This patch adds a functionality that allows to create event groups
based on the way they are specified on the command line. Adding
functionality to the '{}' group syntax introduced in earlier patch.
The current '--group/-g' option behaviour remains intact. If you
specify it for record/stat/top command, all the specified events
become members of a single group with the first event as a group
leader.
With the new '{}' group syntax you can create group like:
# perf record -e '{cycles,faults}' ls
resulting in single event group containing 'cycles' and 'faults'
events, with cycles event as group leader.
All groups are created with regards to threads and cpus. Thus
recording an event group within a 2 threads on server with
4 CPUs will create 8 separate groups.
Examples (first event in brackets is group leader):
# 1 group (cpu-clock,task-clock)
perf record --group -e cpu-clock,task-clock ls
perf record -e '{cpu-clock,task-clock}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock},{minor-faults,major-faults}' ls
# 1 group (cpu-clock,task-clock,minor-faults,major-faults)
perf record --group -e cpu-clock,task-clock -e minor-faults,major-faults ls
perf record -e '{cpu-clock,task-clock,minor-faults,major-faults}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock} -e '{minor-faults,major-faults}' \
-e instructions ls
# 1 group
# (cpu-clock,task-clock,minor-faults,major-faults,instructions)
perf record --group -e cpu-clock,task-clock \
-e minor-faults,major-faults -e instructions ls perf record -e
'{cpu-clock,task-clock,minor-faults,major-faults,instructions}' ls
It's possible to use standard event modifier for a group, which spans
over all events in the group and updates each event modifier settings,
for example:
# perf record -r '{faults:k,cache-references}:p'
resulting in ':kp' modifier being used for 'faults' and ':p' modifier
being used for 'cache-references' event.
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ulrich Drepper <drepper@gmail.com>
Link: http://lkml.kernel.org/n/tip-ho42u0wcr8mn1otkalqi13qp@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-08-08 18:22:36 +08:00
|
|
|
}
|
|
|
|
|
2020-06-17 20:16:20 +08:00
|
|
|
int __evlist__add_default(struct evlist *evlist, bool precise)
|
2011-01-12 06:56:53 +08:00
|
|
|
{
|
2021-04-27 15:01:26 +08:00
|
|
|
struct evsel *evsel;
|
2012-01-05 00:54:20 +08:00
|
|
|
|
2021-04-27 15:01:26 +08:00
|
|
|
evsel = evsel__new_cycles(precise, PERF_TYPE_HARDWARE,
|
|
|
|
PERF_COUNT_HW_CPU_CYCLES);
|
2011-01-12 06:56:53 +08:00
|
|
|
if (evsel == NULL)
|
2016-07-29 05:33:20 +08:00
|
|
|
return -ENOMEM;
|
2011-01-12 06:56:53 +08:00
|
|
|
|
2019-07-21 19:23:59 +08:00
|
|
|
evlist__add(evlist, evsel);
|
2011-01-12 06:56:53 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2011-01-12 08:30:02 +08:00
|
|
|
|
2022-05-24 15:54:25 +08:00
|
|
|
static struct evsel *evlist__dummy_event(struct evlist *evlist)
|
2016-01-08 00:14:56 +08:00
|
|
|
{
|
|
|
|
struct perf_event_attr attr = {
|
|
|
|
.type = PERF_TYPE_SOFTWARE,
|
|
|
|
.config = PERF_COUNT_SW_DUMMY,
|
|
|
|
.size = sizeof(attr), /* to capture ABI version */
|
|
|
|
};
|
2022-05-24 15:54:25 +08:00
|
|
|
|
|
|
|
return evsel__new_idx(&attr, evlist->core.nr_entries);
|
|
|
|
}
|
|
|
|
|
|
|
|
int evlist__add_dummy(struct evlist *evlist)
|
|
|
|
{
|
|
|
|
struct evsel *evsel = evlist__dummy_event(evlist);
|
2016-01-08 00:14:56 +08:00
|
|
|
|
|
|
|
if (evsel == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2019-07-21 19:23:59 +08:00
|
|
|
evlist__add(evlist, evsel);
|
2016-01-08 00:14:56 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-05-24 15:54:26 +08:00
|
|
|
struct evsel *evlist__add_aux_dummy(struct evlist *evlist, bool system_wide)
|
|
|
|
{
|
|
|
|
struct evsel *evsel = evlist__dummy_event(evlist);
|
|
|
|
|
|
|
|
if (!evsel)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
evsel->core.attr.exclude_kernel = 1;
|
|
|
|
evsel->core.attr.exclude_guest = 1;
|
|
|
|
evsel->core.attr.exclude_hv = 1;
|
|
|
|
evsel->core.attr.freq = 0;
|
|
|
|
evsel->core.attr.sample_period = 1;
|
2022-10-04 04:46:45 +08:00
|
|
|
evsel->core.system_wide = system_wide;
|
2022-05-24 15:54:26 +08:00
|
|
|
evsel->no_aux_samples = true;
|
|
|
|
evsel->name = strdup("dummy:u");
|
|
|
|
|
2022-10-04 04:46:45 +08:00
|
|
|
evlist__add(evlist, evsel);
|
2022-05-24 15:54:26 +08:00
|
|
|
return evsel;
|
|
|
|
}
|
|
|
|
|
perf build: Use libtraceevent from the system
Remove the LIBTRACEEVENT_DYNAMIC and LIBTRACEFS_DYNAMIC make command
line variables.
If libtraceevent isn't installed or NO_LIBTRACEEVENT=1 is passed to the
build, don't compile in libtraceevent and libtracefs support.
This also disables CONFIG_TRACE that controls "perf trace".
CONFIG_LIBTRACEEVENT is used to control enablement in Build/Makefiles,
HAVE_LIBTRACEEVENT is used in C code.
Without HAVE_LIBTRACEEVENT tracepoints are disabled and as such the
commands kmem, kwork, lock, sched and timechart are removed. The
majority of commands continue to work including "perf test".
Committer notes:
Fixed up a tools/perf/util/Build reject and added:
#include <traceevent/event-parse.h>
to tools/perf/util/scripting-engines/trace-event-perl.c.
Committer testing:
$ rpm -qi libtraceevent-devel
Name : libtraceevent-devel
Version : 1.5.3
Release : 2.fc36
Architecture: x86_64
Install Date: Mon 25 Jul 2022 03:20:19 PM -03
Group : Unspecified
Size : 27728
License : LGPLv2+ and GPLv2+
Signature : RSA/SHA256, Fri 15 Apr 2022 02:11:58 PM -03, Key ID 999f7cbf38ab71f4
Source RPM : libtraceevent-1.5.3-2.fc36.src.rpm
Build Date : Fri 15 Apr 2022 10:57:01 AM -03
Build Host : buildvm-x86-05.iad2.fedoraproject.org
Packager : Fedora Project
Vendor : Fedora Project
URL : https://git.kernel.org/pub/scm/libs/libtrace/libtraceevent.git/
Bug URL : https://bugz.fedoraproject.org/libtraceevent
Summary : Development headers of libtraceevent
Description :
Development headers of libtraceevent-libs
$
Default build:
$ ldd ~/bin/perf | grep tracee
libtraceevent.so.1 => /lib64/libtraceevent.so.1 (0x00007f1dcaf8f000)
$
# perf trace -e sched:* --max-events 10
0.000 migration/0/17 sched:sched_migrate_task(comm: "", pid: 1603763 (perf), prio: 120, dest_cpu: 1)
0.005 migration/0/17 sched:sched_wake_idle_without_ipi(cpu: 1)
0.011 migration/0/17 sched:sched_switch(prev_comm: "", prev_pid: 17 (migration/0), prev_state: 1, next_comm: "", next_prio: 120)
1.173 :0/0 sched:sched_wakeup(comm: "", pid: 3138 (gnome-terminal-), prio: 120)
1.180 :0/0 sched:sched_switch(prev_comm: "", prev_prio: 120, next_comm: "", next_pid: 3138 (gnome-terminal-), next_prio: 120)
0.156 migration/1/21 sched:sched_migrate_task(comm: "", pid: 1603763 (perf), prio: 120, orig_cpu: 1, dest_cpu: 2)
0.160 migration/1/21 sched:sched_wake_idle_without_ipi(cpu: 2)
0.166 migration/1/21 sched:sched_switch(prev_comm: "", prev_pid: 21 (migration/1), prev_state: 1, next_comm: "", next_prio: 120)
1.183 :0/0 sched:sched_wakeup(comm: "", pid: 1602985 (kworker/u16:0-f), prio: 120, target_cpu: 1)
1.186 :0/0 sched:sched_switch(prev_comm: "", prev_prio: 120, next_comm: "", next_pid: 1602985 (kworker/u16:0-f), next_prio: 120)
#
Had to tweak tools/perf/util/setup.py to make sure the python binding
shared object links with libtraceevent if -DHAVE_LIBTRACEEVENT is
present in CFLAGS.
Building with NO_LIBTRACEEVENT=1 uncovered some more build failures:
- Make building of data-convert-bt.c to CONFIG_LIBTRACEEVENT=y
- perf-$(CONFIG_LIBTRACEEVENT) += scripts/
- bpf_kwork.o needs also to be dependent on CONFIG_LIBTRACEEVENT=y
- The python binding needed some fixups and util/trace-event.c can't be
built and linked with the python binding shared object, so remove it
in tools/perf/util/setup.py and exclude it from the list of
dependencies in the python/perf.so Makefile.perf target.
Building without libtraceevent-devel installed uncovered more build
failures:
- The python binding tools/perf/util/python.c was assuming that
traceevent/parse-events.h was always available, which was the case
when we defaulted to using the in-kernel tools/lib/traceevent/ files,
now we need to enclose it under ifdef HAVE_LIBTRACEEVENT, just like
the other parts of it that deal with tracepoints.
- We have to ifdef the rules in the Build files with
CONFIG_LIBTRACEEVENT=y to build builtin-trace.c and
tools/perf/trace/beauty/ as we only ifdef setting CONFIG_TRACE=y when
setting NO_LIBTRACEEVENT=1 in the make command line, not when we don't
detect libtraceevent-devel installed in the system. Simplification here
to avoid these two ways of disabling builtin-trace.c and not having
CONFIG_TRACE=y when libtraceevent-devel isn't installed is the clean
way.
From Athira:
<quote>
tools/perf/arch/powerpc/util/Build
-perf-y += kvm-stat.o
+perf-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o
</quote>
Then, ditto for arm64 and s390, detected by container cross build tests.
- s/390 uses test__checkevent_tracepoint() that is now only available if
HAVE_LIBTRACEEVENT is defined, enclose the callsite with ifder HAVE_LIBTRACEEVENT.
Also from Athira:
<quote>
With this change, I could successfully compile in these environment:
- Without libtraceevent-devel installed
- With libtraceevent-devel installed
- With “make NO_LIBTRACEEVENT=1”
</quote>
Then, finally rename CONFIG_TRACEEVENT to CONFIG_LIBTRACEEVENT for
consistency with other libraries detected in tools/perf/.
Signed-off-by: Ian Rogers <irogers@google.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Tested-by: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Nick Desaulniers <ndesaulniers@google.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: bpf@vger.kernel.org
Link: http://lore.kernel.org/lkml/20221205225940.3079667-3-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-12-06 06:59:39 +08:00
|
|
|
#ifdef HAVE_LIBTRACEEVENT
|
2022-10-04 04:46:46 +08:00
|
|
|
struct evsel *evlist__add_sched_switch(struct evlist *evlist, bool system_wide)
|
|
|
|
{
|
|
|
|
struct evsel *evsel = evsel__newtp_idx("sched", "sched_switch", 0);
|
|
|
|
|
|
|
|
if (IS_ERR(evsel))
|
|
|
|
return evsel;
|
|
|
|
|
|
|
|
evsel__set_sample_bit(evsel, CPU);
|
|
|
|
evsel__set_sample_bit(evsel, TIME);
|
|
|
|
|
|
|
|
evsel->core.system_wide = system_wide;
|
|
|
|
evsel->no_aux_samples = true;
|
|
|
|
|
|
|
|
evlist__add(evlist, evsel);
|
|
|
|
return evsel;
|
perf build: Use libtraceevent from the system
Remove the LIBTRACEEVENT_DYNAMIC and LIBTRACEFS_DYNAMIC make command
line variables.
If libtraceevent isn't installed or NO_LIBTRACEEVENT=1 is passed to the
build, don't compile in libtraceevent and libtracefs support.
This also disables CONFIG_TRACE that controls "perf trace".
CONFIG_LIBTRACEEVENT is used to control enablement in Build/Makefiles,
HAVE_LIBTRACEEVENT is used in C code.
Without HAVE_LIBTRACEEVENT tracepoints are disabled and as such the
commands kmem, kwork, lock, sched and timechart are removed. The
majority of commands continue to work including "perf test".
Committer notes:
Fixed up a tools/perf/util/Build reject and added:
#include <traceevent/event-parse.h>
to tools/perf/util/scripting-engines/trace-event-perl.c.
Committer testing:
$ rpm -qi libtraceevent-devel
Name : libtraceevent-devel
Version : 1.5.3
Release : 2.fc36
Architecture: x86_64
Install Date: Mon 25 Jul 2022 03:20:19 PM -03
Group : Unspecified
Size : 27728
License : LGPLv2+ and GPLv2+
Signature : RSA/SHA256, Fri 15 Apr 2022 02:11:58 PM -03, Key ID 999f7cbf38ab71f4
Source RPM : libtraceevent-1.5.3-2.fc36.src.rpm
Build Date : Fri 15 Apr 2022 10:57:01 AM -03
Build Host : buildvm-x86-05.iad2.fedoraproject.org
Packager : Fedora Project
Vendor : Fedora Project
URL : https://git.kernel.org/pub/scm/libs/libtrace/libtraceevent.git/
Bug URL : https://bugz.fedoraproject.org/libtraceevent
Summary : Development headers of libtraceevent
Description :
Development headers of libtraceevent-libs
$
Default build:
$ ldd ~/bin/perf | grep tracee
libtraceevent.so.1 => /lib64/libtraceevent.so.1 (0x00007f1dcaf8f000)
$
# perf trace -e sched:* --max-events 10
0.000 migration/0/17 sched:sched_migrate_task(comm: "", pid: 1603763 (perf), prio: 120, dest_cpu: 1)
0.005 migration/0/17 sched:sched_wake_idle_without_ipi(cpu: 1)
0.011 migration/0/17 sched:sched_switch(prev_comm: "", prev_pid: 17 (migration/0), prev_state: 1, next_comm: "", next_prio: 120)
1.173 :0/0 sched:sched_wakeup(comm: "", pid: 3138 (gnome-terminal-), prio: 120)
1.180 :0/0 sched:sched_switch(prev_comm: "", prev_prio: 120, next_comm: "", next_pid: 3138 (gnome-terminal-), next_prio: 120)
0.156 migration/1/21 sched:sched_migrate_task(comm: "", pid: 1603763 (perf), prio: 120, orig_cpu: 1, dest_cpu: 2)
0.160 migration/1/21 sched:sched_wake_idle_without_ipi(cpu: 2)
0.166 migration/1/21 sched:sched_switch(prev_comm: "", prev_pid: 21 (migration/1), prev_state: 1, next_comm: "", next_prio: 120)
1.183 :0/0 sched:sched_wakeup(comm: "", pid: 1602985 (kworker/u16:0-f), prio: 120, target_cpu: 1)
1.186 :0/0 sched:sched_switch(prev_comm: "", prev_prio: 120, next_comm: "", next_pid: 1602985 (kworker/u16:0-f), next_prio: 120)
#
Had to tweak tools/perf/util/setup.py to make sure the python binding
shared object links with libtraceevent if -DHAVE_LIBTRACEEVENT is
present in CFLAGS.
Building with NO_LIBTRACEEVENT=1 uncovered some more build failures:
- Make building of data-convert-bt.c to CONFIG_LIBTRACEEVENT=y
- perf-$(CONFIG_LIBTRACEEVENT) += scripts/
- bpf_kwork.o needs also to be dependent on CONFIG_LIBTRACEEVENT=y
- The python binding needed some fixups and util/trace-event.c can't be
built and linked with the python binding shared object, so remove it
in tools/perf/util/setup.py and exclude it from the list of
dependencies in the python/perf.so Makefile.perf target.
Building without libtraceevent-devel installed uncovered more build
failures:
- The python binding tools/perf/util/python.c was assuming that
traceevent/parse-events.h was always available, which was the case
when we defaulted to using the in-kernel tools/lib/traceevent/ files,
now we need to enclose it under ifdef HAVE_LIBTRACEEVENT, just like
the other parts of it that deal with tracepoints.
- We have to ifdef the rules in the Build files with
CONFIG_LIBTRACEEVENT=y to build builtin-trace.c and
tools/perf/trace/beauty/ as we only ifdef setting CONFIG_TRACE=y when
setting NO_LIBTRACEEVENT=1 in the make command line, not when we don't
detect libtraceevent-devel installed in the system. Simplification here
to avoid these two ways of disabling builtin-trace.c and not having
CONFIG_TRACE=y when libtraceevent-devel isn't installed is the clean
way.
From Athira:
<quote>
tools/perf/arch/powerpc/util/Build
-perf-y += kvm-stat.o
+perf-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o
</quote>
Then, ditto for arm64 and s390, detected by container cross build tests.
- s/390 uses test__checkevent_tracepoint() that is now only available if
HAVE_LIBTRACEEVENT is defined, enclose the callsite with ifder HAVE_LIBTRACEEVENT.
Also from Athira:
<quote>
With this change, I could successfully compile in these environment:
- Without libtraceevent-devel installed
- With libtraceevent-devel installed
- With “make NO_LIBTRACEEVENT=1”
</quote>
Then, finally rename CONFIG_TRACEEVENT to CONFIG_LIBTRACEEVENT for
consistency with other libraries detected in tools/perf/.
Signed-off-by: Ian Rogers <irogers@google.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Tested-by: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Nick Desaulniers <ndesaulniers@google.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: bpf@vger.kernel.org
Link: http://lore.kernel.org/lkml/20221205225940.3079667-3-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-12-06 06:59:39 +08:00
|
|
|
}
|
|
|
|
#endif
|
2022-10-04 04:46:46 +08:00
|
|
|
|
perf x86 evlist: Add default hybrid events for perf stat
Provide a new solution to replace the reverted commit ac2dc29edd21f9ec
("perf stat: Add default hybrid events")
For the default software attrs, nothing is changed.
For the default hardware attrs, create a new evsel for each hybrid pmu.
With the new solution, adding a new default attr will not require the
special support for the hybrid platform anymore.
Also, the "--detailed" is supported on the hybrid platform
With the patch,
$ perf stat -a -ddd sleep 1
Performance counter stats for 'system wide':
32,231.06 msec cpu-clock # 32.056 CPUs utilized
529 context-switches # 16.413 /sec
32 cpu-migrations # 0.993 /sec
69 page-faults # 2.141 /sec
176,754,151 cpu_core/cycles/ # 5.484 M/sec (41.65%)
161,695,280 cpu_atom/cycles/ # 5.017 M/sec (49.92%)
48,595,992 cpu_core/instructions/ # 1.508 M/sec (49.98%)
32,363,337 cpu_atom/instructions/ # 1.004 M/sec (58.26%)
10,088,639 cpu_core/branches/ # 313.010 K/sec (58.31%)
6,390,582 cpu_atom/branches/ # 198.274 K/sec (58.26%)
846,201 cpu_core/branch-misses/ # 26.254 K/sec (66.65%)
676,477 cpu_atom/branch-misses/ # 20.988 K/sec (58.27%)
14,290,070 cpu_core/L1-dcache-loads/ # 443.363 K/sec (66.66%)
9,983,532 cpu_atom/L1-dcache-loads/ # 309.749 K/sec (58.27%)
740,725 cpu_core/L1-dcache-load-misses/ # 22.982 K/sec (66.66%)
<not supported> cpu_atom/L1-dcache-load-misses/
480,441 cpu_core/LLC-loads/ # 14.906 K/sec (66.67%)
326,570 cpu_atom/LLC-loads/ # 10.132 K/sec (58.27%)
329 cpu_core/LLC-load-misses/ # 10.208 /sec (66.68%)
0 cpu_atom/LLC-load-misses/ # 0.000 /sec (58.32%)
<not supported> cpu_core/L1-icache-loads/
21,982,491 cpu_atom/L1-icache-loads/ # 682.028 K/sec (58.43%)
4,493,189 cpu_core/L1-icache-load-misses/ # 139.406 K/sec (33.34%)
4,711,404 cpu_atom/L1-icache-load-misses/ # 146.176 K/sec (50.08%)
13,713,090 cpu_core/dTLB-loads/ # 425.462 K/sec (33.34%)
9,384,727 cpu_atom/dTLB-loads/ # 291.170 K/sec (50.08%)
157,387 cpu_core/dTLB-load-misses/ # 4.883 K/sec (33.33%)
108,328 cpu_atom/dTLB-load-misses/ # 3.361 K/sec (50.08%)
<not supported> cpu_core/iTLB-loads/
<not supported> cpu_atom/iTLB-loads/
37,655 cpu_core/iTLB-load-misses/ # 1.168 K/sec (33.32%)
61,661 cpu_atom/iTLB-load-misses/ # 1.913 K/sec (50.03%)
<not supported> cpu_core/L1-dcache-prefetches/
<not supported> cpu_atom/L1-dcache-prefetches/
<not supported> cpu_core/L1-dcache-prefetch-misses/
<not supported> cpu_atom/L1-dcache-prefetch-misses/
1.005466919 seconds time elapsed
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Acked-by: Ian Rogers <irogers@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: https://lore.kernel.org/r/20220721065706.2886112-5-zhengjun.xing@linux.intel.com
Signed-off-by: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-07-21 14:57:05 +08:00
|
|
|
int evlist__add_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs)
|
2011-11-04 19:10:59 +08:00
|
|
|
{
|
2019-07-21 19:23:51 +08:00
|
|
|
struct evsel *evsel, *n;
|
2011-11-04 19:10:59 +08:00
|
|
|
LIST_HEAD(head);
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < nr_attrs; i++) {
|
2020-05-07 00:27:04 +08:00
|
|
|
evsel = evsel__new_idx(attrs + i, evlist->core.nr_entries + i);
|
2011-11-04 19:10:59 +08:00
|
|
|
if (evsel == NULL)
|
|
|
|
goto out_delete_partial_list;
|
2019-07-21 19:24:22 +08:00
|
|
|
list_add_tail(&evsel->core.node, &head);
|
2011-11-04 19:10:59 +08:00
|
|
|
}
|
|
|
|
|
2020-12-01 01:52:44 +08:00
|
|
|
evlist__splice_list_tail(evlist, &head);
|
2011-11-04 19:10:59 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_delete_partial_list:
|
2016-06-23 22:26:15 +08:00
|
|
|
__evlist__for_each_entry_safe(&head, n, evsel)
|
2019-07-21 19:23:57 +08:00
|
|
|
evsel__delete(evsel);
|
2011-11-04 19:10:59 +08:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-06-17 20:16:20 +08:00
|
|
|
int __evlist__add_default_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs)
|
perf stat: Initialize default events wrt exclude_{guest,host}
When no event is specified the tools use perf_evlist__add_default(), that will
call event_attr_init to initialize the KVM exclusion bits.
When the change was made to the tools so that by default guest samples would be
excluded, the changes were made just to the parsing routines and to
perf_evlist__add_default(), not to perf_evlist__add_attrs, that is used so far
just by perf stat to add multiple events, according to the level of detail
specified.
Recently the tools were changed to reconstruct the event name from all the
details in perf_event_attr, not just from .type and .config, but taking into
account all the feature bits (.exclude_{guest,host,user,kernel,etc},
.precise_ip, etc).
That is when we noticed that the default for perf stat wasn't the one for the
rest of the tools, i.e. the .exclude_guest bit wasn't being set.
I.e. the default, that doesn't call event_attr_init was showing the :HG
modifier:
$ perf stat usleep 1
Performance counter stats for 'usleep 1':
0.942119 task-clock # 0.454 CPUs utilized
1 context-switches # 0.001 M/sec
0 CPU-migrations # 0.000 K/sec
126 page-faults # 0.134 M/sec
693,193 cycles:HG # 0.736 GHz [40.11%]
407,461 stalled-cycles-frontend:HG # 58.78% frontend cycles idle [72.29%]
365,403 stalled-cycles-backend:HG # 52.71% backend cycles idle
465,982 instructions:HG # 0.67 insns per cycle
# 0.87 stalled cycles per insn
89,760 branches:HG # 95.275 M/sec
6,178 branch-misses:HG # 6.88% of all branches
0.002077228 seconds time elapsed
While if one explicitely specifies the same events, which will make the parsing code
to be called and thus event_attr_init is called:
$ perf stat -e task-clock,context-switches,migrations,page-faults,cycles,stalled-cycles-frontend,stalled-cycles-backend,instructions,branches,branch-misses usleep 1
Performance counter stats for 'usleep 1':
1.040349 task-clock # 0.500 CPUs utilized
2 context-switches # 0.002 M/sec
0 CPU-migrations # 0.000 K/sec
127 page-faults # 0.122 M/sec
587,966 cycles # 0.565 GHz [13.18%]
459,167 stalled-cycles-frontend # 78.09% frontend cycles idle
390,249 stalled-cycles-backend # 66.37% backend cycles idle
504,006 instructions # 0.86 insns per cycle
# 0.91 stalled cycles per insn
96,455 branches # 92.714 M/sec
6,522 branch-misses # 6.76% of all branches [96.12%]
0.002078681 seconds time elapsed
Fix it by introducing a perf_evlist__add_default_attrs method that will call
evlist_attr_init in all the perf_event_attr entries before adding the events.
Reported-by: Ingo Molnar <mingo@kernel.org>
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Namhyung Kim <namhyung@gmail.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-4eysr236r0pgiyum9epwxw7s@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-05-31 00:53:54 +08:00
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < nr_attrs; i++)
|
|
|
|
event_attr_init(attrs + i);
|
|
|
|
|
2019-07-21 19:23:59 +08:00
|
|
|
return evlist__add_attrs(evlist, attrs, nr_attrs);
|
perf stat: Initialize default events wrt exclude_{guest,host}
When no event is specified the tools use perf_evlist__add_default(), that will
call event_attr_init to initialize the KVM exclusion bits.
When the change was made to the tools so that by default guest samples would be
excluded, the changes were made just to the parsing routines and to
perf_evlist__add_default(), not to perf_evlist__add_attrs, that is used so far
just by perf stat to add multiple events, according to the level of detail
specified.
Recently the tools were changed to reconstruct the event name from all the
details in perf_event_attr, not just from .type and .config, but taking into
account all the feature bits (.exclude_{guest,host,user,kernel,etc},
.precise_ip, etc).
That is when we noticed that the default for perf stat wasn't the one for the
rest of the tools, i.e. the .exclude_guest bit wasn't being set.
I.e. the default, that doesn't call event_attr_init was showing the :HG
modifier:
$ perf stat usleep 1
Performance counter stats for 'usleep 1':
0.942119 task-clock # 0.454 CPUs utilized
1 context-switches # 0.001 M/sec
0 CPU-migrations # 0.000 K/sec
126 page-faults # 0.134 M/sec
693,193 cycles:HG # 0.736 GHz [40.11%]
407,461 stalled-cycles-frontend:HG # 58.78% frontend cycles idle [72.29%]
365,403 stalled-cycles-backend:HG # 52.71% backend cycles idle
465,982 instructions:HG # 0.67 insns per cycle
# 0.87 stalled cycles per insn
89,760 branches:HG # 95.275 M/sec
6,178 branch-misses:HG # 6.88% of all branches
0.002077228 seconds time elapsed
While if one explicitely specifies the same events, which will make the parsing code
to be called and thus event_attr_init is called:
$ perf stat -e task-clock,context-switches,migrations,page-faults,cycles,stalled-cycles-frontend,stalled-cycles-backend,instructions,branches,branch-misses usleep 1
Performance counter stats for 'usleep 1':
1.040349 task-clock # 0.500 CPUs utilized
2 context-switches # 0.002 M/sec
0 CPU-migrations # 0.000 K/sec
127 page-faults # 0.122 M/sec
587,966 cycles # 0.565 GHz [13.18%]
459,167 stalled-cycles-frontend # 78.09% frontend cycles idle
390,249 stalled-cycles-backend # 66.37% backend cycles idle
504,006 instructions # 0.86 insns per cycle
# 0.91 stalled cycles per insn
96,455 branches # 92.714 M/sec
6,522 branch-misses # 6.76% of all branches [96.12%]
0.002078681 seconds time elapsed
Fix it by introducing a perf_evlist__add_default_attrs method that will call
evlist_attr_init in all the perf_event_attr entries before adding the events.
Reported-by: Ingo Molnar <mingo@kernel.org>
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Namhyung Kim <namhyung@gmail.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-4eysr236r0pgiyum9epwxw7s@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-05-31 00:53:54 +08:00
|
|
|
}
|
|
|
|
|
2022-07-21 14:57:04 +08:00
|
|
|
__weak int arch_evlist__add_default_attrs(struct evlist *evlist,
|
|
|
|
struct perf_event_attr *attrs,
|
|
|
|
size_t nr_attrs)
|
perf stat: Add Topdown metrics events as default events
The Topdown Microarchitecture Analysis (TMA) Method is a structured
analysis methodology to identify critical performance bottlenecks in
out-of-order processors. From the Ice Lake and later platforms, the
Topdown information can be retrieved from the dedicated "metrics"
register, which isn't impacted by other events. Also, the Topdown
metrics support both per thread/process and per core measuring. Adding
Topdown metrics events as default events can enrich the default
measuring information, and would not cost any extra multiplexing.
Introduce arch_evlist__add_default_attrs() to allow architecture
specific default events. Add the Topdown metrics events in the X86
specific arch_evlist__add_default_attrs(). Other architectures can add
their own default events later separately.
With the patch:
$ perf stat sleep 1
Performance counter stats for 'sleep 1':
0.82 msec task-clock:u # 0.001 CPUs utilized
0 context-switches:u # 0.000 K/sec
0 cpu-migrations:u # 0.000 K/sec
61 page-faults:u # 0.074 M/sec
319,941 cycles:u # 0.388 GHz
242,802 instructions:u # 0.76 insn per cycle
54,380 branches:u # 66.028 M/sec
4,043 branch-misses:u # 7.43% of all branches
1,585,555 slots:u # 1925.189 M/sec
238,941 topdown-retiring:u # 15.0% retiring
410,378 topdown-bad-spec:u # 25.8% bad speculation
634,222 topdown-fe-bound:u # 39.9% frontend bound
304,675 topdown-be-bound:u # 19.2% backend bound
1.001791625 seconds time elapsed
0.000000000 seconds user
0.001572000 seconds sys
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Jin Yao <yao.jin@linux.intel.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: http://lore.kernel.org/lkml/20210121133752.118327-1-kan.liang@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2021-01-21 21:37:52 +08:00
|
|
|
{
|
2022-07-21 14:57:04 +08:00
|
|
|
if (!nr_attrs)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return __evlist__add_default_attrs(evlist, attrs, nr_attrs);
|
perf stat: Add Topdown metrics events as default events
The Topdown Microarchitecture Analysis (TMA) Method is a structured
analysis methodology to identify critical performance bottlenecks in
out-of-order processors. From the Ice Lake and later platforms, the
Topdown information can be retrieved from the dedicated "metrics"
register, which isn't impacted by other events. Also, the Topdown
metrics support both per thread/process and per core measuring. Adding
Topdown metrics events as default events can enrich the default
measuring information, and would not cost any extra multiplexing.
Introduce arch_evlist__add_default_attrs() to allow architecture
specific default events. Add the Topdown metrics events in the X86
specific arch_evlist__add_default_attrs(). Other architectures can add
their own default events later separately.
With the patch:
$ perf stat sleep 1
Performance counter stats for 'sleep 1':
0.82 msec task-clock:u # 0.001 CPUs utilized
0 context-switches:u # 0.000 K/sec
0 cpu-migrations:u # 0.000 K/sec
61 page-faults:u # 0.074 M/sec
319,941 cycles:u # 0.388 GHz
242,802 instructions:u # 0.76 insn per cycle
54,380 branches:u # 66.028 M/sec
4,043 branch-misses:u # 7.43% of all branches
1,585,555 slots:u # 1925.189 M/sec
238,941 topdown-retiring:u # 15.0% retiring
410,378 topdown-bad-spec:u # 25.8% bad speculation
634,222 topdown-fe-bound:u # 39.9% frontend bound
304,675 topdown-be-bound:u # 19.2% backend bound
1.001791625 seconds time elapsed
0.000000000 seconds user
0.001572000 seconds sys
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Jin Yao <yao.jin@linux.intel.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: http://lore.kernel.org/lkml/20210121133752.118327-1-kan.liang@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2021-01-21 21:37:52 +08:00
|
|
|
}
|
|
|
|
|
2020-11-30 20:48:07 +08:00
|
|
|
struct evsel *evlist__find_tracepoint_by_id(struct evlist *evlist, int id)
|
perf tools: Save some loops using perf_evlist__id2evsel
Since we already ask for PERF_SAMPLE_ID and use it to quickly find the
associated evsel, add handler func + data to struct perf_evsel to avoid
using chains of if(strcmp(event_name)) and also to avoid all the linear
list searches via trace_event_find.
To demonstrate the technique convert 'perf sched' to it:
# perf sched record sleep 5m
And then:
Performance counter stats for '/tmp/oldperf sched lat':
646.929438 task-clock # 0.999 CPUs utilized
9 context-switches # 0.000 M/sec
0 CPU-migrations # 0.000 M/sec
20,901 page-faults # 0.032 M/sec
1,290,144,450 cycles # 1.994 GHz
<not supported> stalled-cycles-frontend
<not supported> stalled-cycles-backend
1,606,158,439 instructions # 1.24 insns per cycle
339,088,395 branches # 524.151 M/sec
4,550,735 branch-misses # 1.34% of all branches
0.647524759 seconds time elapsed
Versus:
Performance counter stats for 'perf sched lat':
473.564691 task-clock # 0.999 CPUs utilized
9 context-switches # 0.000 M/sec
0 CPU-migrations # 0.000 M/sec
20,903 page-faults # 0.044 M/sec
944,367,984 cycles # 1.994 GHz
<not supported> stalled-cycles-frontend
<not supported> stalled-cycles-backend
1,442,385,571 instructions # 1.53 insns per cycle
308,383,106 branches # 651.195 M/sec
4,481,784 branch-misses # 1.45% of all branches
0.474215751 seconds time elapsed
[root@emilia ~]#
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-1kbzpl74lwi6lavpqke2u2p3@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2011-11-29 03:57:40 +08:00
|
|
|
{
|
2019-07-21 19:23:51 +08:00
|
|
|
struct evsel *evsel;
|
perf tools: Save some loops using perf_evlist__id2evsel
Since we already ask for PERF_SAMPLE_ID and use it to quickly find the
associated evsel, add handler func + data to struct perf_evsel to avoid
using chains of if(strcmp(event_name)) and also to avoid all the linear
list searches via trace_event_find.
To demonstrate the technique convert 'perf sched' to it:
# perf sched record sleep 5m
And then:
Performance counter stats for '/tmp/oldperf sched lat':
646.929438 task-clock # 0.999 CPUs utilized
9 context-switches # 0.000 M/sec
0 CPU-migrations # 0.000 M/sec
20,901 page-faults # 0.032 M/sec
1,290,144,450 cycles # 1.994 GHz
<not supported> stalled-cycles-frontend
<not supported> stalled-cycles-backend
1,606,158,439 instructions # 1.24 insns per cycle
339,088,395 branches # 524.151 M/sec
4,550,735 branch-misses # 1.34% of all branches
0.647524759 seconds time elapsed
Versus:
Performance counter stats for 'perf sched lat':
473.564691 task-clock # 0.999 CPUs utilized
9 context-switches # 0.000 M/sec
0 CPU-migrations # 0.000 M/sec
20,903 page-faults # 0.044 M/sec
944,367,984 cycles # 1.994 GHz
<not supported> stalled-cycles-frontend
<not supported> stalled-cycles-backend
1,442,385,571 instructions # 1.53 insns per cycle
308,383,106 branches # 651.195 M/sec
4,481,784 branch-misses # 1.45% of all branches
0.474215751 seconds time elapsed
[root@emilia ~]#
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-1kbzpl74lwi6lavpqke2u2p3@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2011-11-29 03:57:40 +08:00
|
|
|
|
2016-06-23 22:26:15 +08:00
|
|
|
evlist__for_each_entry(evlist, evsel) {
|
2019-07-21 19:24:29 +08:00
|
|
|
if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT &&
|
|
|
|
(int)evsel->core.attr.config == id)
|
perf tools: Save some loops using perf_evlist__id2evsel
Since we already ask for PERF_SAMPLE_ID and use it to quickly find the
associated evsel, add handler func + data to struct perf_evsel to avoid
using chains of if(strcmp(event_name)) and also to avoid all the linear
list searches via trace_event_find.
To demonstrate the technique convert 'perf sched' to it:
# perf sched record sleep 5m
And then:
Performance counter stats for '/tmp/oldperf sched lat':
646.929438 task-clock # 0.999 CPUs utilized
9 context-switches # 0.000 M/sec
0 CPU-migrations # 0.000 M/sec
20,901 page-faults # 0.032 M/sec
1,290,144,450 cycles # 1.994 GHz
<not supported> stalled-cycles-frontend
<not supported> stalled-cycles-backend
1,606,158,439 instructions # 1.24 insns per cycle
339,088,395 branches # 524.151 M/sec
4,550,735 branch-misses # 1.34% of all branches
0.647524759 seconds time elapsed
Versus:
Performance counter stats for 'perf sched lat':
473.564691 task-clock # 0.999 CPUs utilized
9 context-switches # 0.000 M/sec
0 CPU-migrations # 0.000 M/sec
20,903 page-faults # 0.044 M/sec
944,367,984 cycles # 1.994 GHz
<not supported> stalled-cycles-frontend
<not supported> stalled-cycles-backend
1,442,385,571 instructions # 1.53 insns per cycle
308,383,106 branches # 651.195 M/sec
4,481,784 branch-misses # 1.45% of all branches
0.474215751 seconds time elapsed
[root@emilia ~]#
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-1kbzpl74lwi6lavpqke2u2p3@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2011-11-29 03:57:40 +08:00
|
|
|
return evsel;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-11-30 20:48:07 +08:00
|
|
|
struct evsel *evlist__find_tracepoint_by_name(struct evlist *evlist, const char *name)
|
2013-08-29 12:29:51 +08:00
|
|
|
{
|
2019-07-21 19:23:51 +08:00
|
|
|
struct evsel *evsel;
|
2013-08-29 12:29:51 +08:00
|
|
|
|
2016-06-23 22:26:15 +08:00
|
|
|
evlist__for_each_entry(evlist, evsel) {
|
2019-07-21 19:24:29 +08:00
|
|
|
if ((evsel->core.attr.type == PERF_TYPE_TRACEPOINT) &&
|
2013-08-29 12:29:51 +08:00
|
|
|
(strcmp(evsel->name, name) == 0))
|
|
|
|
return evsel;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
perf build: Use libtraceevent from the system
Remove the LIBTRACEEVENT_DYNAMIC and LIBTRACEFS_DYNAMIC make command
line variables.
If libtraceevent isn't installed or NO_LIBTRACEEVENT=1 is passed to the
build, don't compile in libtraceevent and libtracefs support.
This also disables CONFIG_TRACE that controls "perf trace".
CONFIG_LIBTRACEEVENT is used to control enablement in Build/Makefiles,
HAVE_LIBTRACEEVENT is used in C code.
Without HAVE_LIBTRACEEVENT tracepoints are disabled and as such the
commands kmem, kwork, lock, sched and timechart are removed. The
majority of commands continue to work including "perf test".
Committer notes:
Fixed up a tools/perf/util/Build reject and added:
#include <traceevent/event-parse.h>
to tools/perf/util/scripting-engines/trace-event-perl.c.
Committer testing:
$ rpm -qi libtraceevent-devel
Name : libtraceevent-devel
Version : 1.5.3
Release : 2.fc36
Architecture: x86_64
Install Date: Mon 25 Jul 2022 03:20:19 PM -03
Group : Unspecified
Size : 27728
License : LGPLv2+ and GPLv2+
Signature : RSA/SHA256, Fri 15 Apr 2022 02:11:58 PM -03, Key ID 999f7cbf38ab71f4
Source RPM : libtraceevent-1.5.3-2.fc36.src.rpm
Build Date : Fri 15 Apr 2022 10:57:01 AM -03
Build Host : buildvm-x86-05.iad2.fedoraproject.org
Packager : Fedora Project
Vendor : Fedora Project
URL : https://git.kernel.org/pub/scm/libs/libtrace/libtraceevent.git/
Bug URL : https://bugz.fedoraproject.org/libtraceevent
Summary : Development headers of libtraceevent
Description :
Development headers of libtraceevent-libs
$
Default build:
$ ldd ~/bin/perf | grep tracee
libtraceevent.so.1 => /lib64/libtraceevent.so.1 (0x00007f1dcaf8f000)
$
# perf trace -e sched:* --max-events 10
0.000 migration/0/17 sched:sched_migrate_task(comm: "", pid: 1603763 (perf), prio: 120, dest_cpu: 1)
0.005 migration/0/17 sched:sched_wake_idle_without_ipi(cpu: 1)
0.011 migration/0/17 sched:sched_switch(prev_comm: "", prev_pid: 17 (migration/0), prev_state: 1, next_comm: "", next_prio: 120)
1.173 :0/0 sched:sched_wakeup(comm: "", pid: 3138 (gnome-terminal-), prio: 120)
1.180 :0/0 sched:sched_switch(prev_comm: "", prev_prio: 120, next_comm: "", next_pid: 3138 (gnome-terminal-), next_prio: 120)
0.156 migration/1/21 sched:sched_migrate_task(comm: "", pid: 1603763 (perf), prio: 120, orig_cpu: 1, dest_cpu: 2)
0.160 migration/1/21 sched:sched_wake_idle_without_ipi(cpu: 2)
0.166 migration/1/21 sched:sched_switch(prev_comm: "", prev_pid: 21 (migration/1), prev_state: 1, next_comm: "", next_prio: 120)
1.183 :0/0 sched:sched_wakeup(comm: "", pid: 1602985 (kworker/u16:0-f), prio: 120, target_cpu: 1)
1.186 :0/0 sched:sched_switch(prev_comm: "", prev_prio: 120, next_comm: "", next_pid: 1602985 (kworker/u16:0-f), next_prio: 120)
#
Had to tweak tools/perf/util/setup.py to make sure the python binding
shared object links with libtraceevent if -DHAVE_LIBTRACEEVENT is
present in CFLAGS.
Building with NO_LIBTRACEEVENT=1 uncovered some more build failures:
- Make building of data-convert-bt.c to CONFIG_LIBTRACEEVENT=y
- perf-$(CONFIG_LIBTRACEEVENT) += scripts/
- bpf_kwork.o needs also to be dependent on CONFIG_LIBTRACEEVENT=y
- The python binding needed some fixups and util/trace-event.c can't be
built and linked with the python binding shared object, so remove it
in tools/perf/util/setup.py and exclude it from the list of
dependencies in the python/perf.so Makefile.perf target.
Building without libtraceevent-devel installed uncovered more build
failures:
- The python binding tools/perf/util/python.c was assuming that
traceevent/parse-events.h was always available, which was the case
when we defaulted to using the in-kernel tools/lib/traceevent/ files,
now we need to enclose it under ifdef HAVE_LIBTRACEEVENT, just like
the other parts of it that deal with tracepoints.
- We have to ifdef the rules in the Build files with
CONFIG_LIBTRACEEVENT=y to build builtin-trace.c and
tools/perf/trace/beauty/ as we only ifdef setting CONFIG_TRACE=y when
setting NO_LIBTRACEEVENT=1 in the make command line, not when we don't
detect libtraceevent-devel installed in the system. Simplification here
to avoid these two ways of disabling builtin-trace.c and not having
CONFIG_TRACE=y when libtraceevent-devel isn't installed is the clean
way.
From Athira:
<quote>
tools/perf/arch/powerpc/util/Build
-perf-y += kvm-stat.o
+perf-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o
</quote>
Then, ditto for arm64 and s390, detected by container cross build tests.
- s/390 uses test__checkevent_tracepoint() that is now only available if
HAVE_LIBTRACEEVENT is defined, enclose the callsite with ifder HAVE_LIBTRACEEVENT.
Also from Athira:
<quote>
With this change, I could successfully compile in these environment:
- Without libtraceevent-devel installed
- With libtraceevent-devel installed
- With “make NO_LIBTRACEEVENT=1”
</quote>
Then, finally rename CONFIG_TRACEEVENT to CONFIG_LIBTRACEEVENT for
consistency with other libraries detected in tools/perf/.
Signed-off-by: Ian Rogers <irogers@google.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Tested-by: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Nick Desaulniers <ndesaulniers@google.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: bpf@vger.kernel.org
Link: http://lore.kernel.org/lkml/20221205225940.3079667-3-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-12-06 06:59:39 +08:00
|
|
|
#ifdef HAVE_LIBTRACEEVENT
|
2020-06-17 20:16:20 +08:00
|
|
|
int evlist__add_newtp(struct evlist *evlist, const char *sys, const char *name, void *handler)
|
2012-10-03 22:40:22 +08:00
|
|
|
{
|
2020-05-07 00:27:04 +08:00
|
|
|
struct evsel *evsel = evsel__newtp(sys, name);
|
2012-10-03 22:40:22 +08:00
|
|
|
|
2015-09-07 16:38:06 +08:00
|
|
|
if (IS_ERR(evsel))
|
2012-10-03 22:40:22 +08:00
|
|
|
return -1;
|
|
|
|
|
2013-11-06 21:17:38 +08:00
|
|
|
evsel->handler = handler;
|
2019-07-21 19:23:59 +08:00
|
|
|
evlist__add(evlist, evsel);
|
2012-10-03 22:40:22 +08:00
|
|
|
return 0;
|
|
|
|
}
|
perf build: Use libtraceevent from the system
Remove the LIBTRACEEVENT_DYNAMIC and LIBTRACEFS_DYNAMIC make command
line variables.
If libtraceevent isn't installed or NO_LIBTRACEEVENT=1 is passed to the
build, don't compile in libtraceevent and libtracefs support.
This also disables CONFIG_TRACE that controls "perf trace".
CONFIG_LIBTRACEEVENT is used to control enablement in Build/Makefiles,
HAVE_LIBTRACEEVENT is used in C code.
Without HAVE_LIBTRACEEVENT tracepoints are disabled and as such the
commands kmem, kwork, lock, sched and timechart are removed. The
majority of commands continue to work including "perf test".
Committer notes:
Fixed up a tools/perf/util/Build reject and added:
#include <traceevent/event-parse.h>
to tools/perf/util/scripting-engines/trace-event-perl.c.
Committer testing:
$ rpm -qi libtraceevent-devel
Name : libtraceevent-devel
Version : 1.5.3
Release : 2.fc36
Architecture: x86_64
Install Date: Mon 25 Jul 2022 03:20:19 PM -03
Group : Unspecified
Size : 27728
License : LGPLv2+ and GPLv2+
Signature : RSA/SHA256, Fri 15 Apr 2022 02:11:58 PM -03, Key ID 999f7cbf38ab71f4
Source RPM : libtraceevent-1.5.3-2.fc36.src.rpm
Build Date : Fri 15 Apr 2022 10:57:01 AM -03
Build Host : buildvm-x86-05.iad2.fedoraproject.org
Packager : Fedora Project
Vendor : Fedora Project
URL : https://git.kernel.org/pub/scm/libs/libtrace/libtraceevent.git/
Bug URL : https://bugz.fedoraproject.org/libtraceevent
Summary : Development headers of libtraceevent
Description :
Development headers of libtraceevent-libs
$
Default build:
$ ldd ~/bin/perf | grep tracee
libtraceevent.so.1 => /lib64/libtraceevent.so.1 (0x00007f1dcaf8f000)
$
# perf trace -e sched:* --max-events 10
0.000 migration/0/17 sched:sched_migrate_task(comm: "", pid: 1603763 (perf), prio: 120, dest_cpu: 1)
0.005 migration/0/17 sched:sched_wake_idle_without_ipi(cpu: 1)
0.011 migration/0/17 sched:sched_switch(prev_comm: "", prev_pid: 17 (migration/0), prev_state: 1, next_comm: "", next_prio: 120)
1.173 :0/0 sched:sched_wakeup(comm: "", pid: 3138 (gnome-terminal-), prio: 120)
1.180 :0/0 sched:sched_switch(prev_comm: "", prev_prio: 120, next_comm: "", next_pid: 3138 (gnome-terminal-), next_prio: 120)
0.156 migration/1/21 sched:sched_migrate_task(comm: "", pid: 1603763 (perf), prio: 120, orig_cpu: 1, dest_cpu: 2)
0.160 migration/1/21 sched:sched_wake_idle_without_ipi(cpu: 2)
0.166 migration/1/21 sched:sched_switch(prev_comm: "", prev_pid: 21 (migration/1), prev_state: 1, next_comm: "", next_prio: 120)
1.183 :0/0 sched:sched_wakeup(comm: "", pid: 1602985 (kworker/u16:0-f), prio: 120, target_cpu: 1)
1.186 :0/0 sched:sched_switch(prev_comm: "", prev_prio: 120, next_comm: "", next_pid: 1602985 (kworker/u16:0-f), next_prio: 120)
#
Had to tweak tools/perf/util/setup.py to make sure the python binding
shared object links with libtraceevent if -DHAVE_LIBTRACEEVENT is
present in CFLAGS.
Building with NO_LIBTRACEEVENT=1 uncovered some more build failures:
- Make building of data-convert-bt.c to CONFIG_LIBTRACEEVENT=y
- perf-$(CONFIG_LIBTRACEEVENT) += scripts/
- bpf_kwork.o needs also to be dependent on CONFIG_LIBTRACEEVENT=y
- The python binding needed some fixups and util/trace-event.c can't be
built and linked with the python binding shared object, so remove it
in tools/perf/util/setup.py and exclude it from the list of
dependencies in the python/perf.so Makefile.perf target.
Building without libtraceevent-devel installed uncovered more build
failures:
- The python binding tools/perf/util/python.c was assuming that
traceevent/parse-events.h was always available, which was the case
when we defaulted to using the in-kernel tools/lib/traceevent/ files,
now we need to enclose it under ifdef HAVE_LIBTRACEEVENT, just like
the other parts of it that deal with tracepoints.
- We have to ifdef the rules in the Build files with
CONFIG_LIBTRACEEVENT=y to build builtin-trace.c and
tools/perf/trace/beauty/ as we only ifdef setting CONFIG_TRACE=y when
setting NO_LIBTRACEEVENT=1 in the make command line, not when we don't
detect libtraceevent-devel installed in the system. Simplification here
to avoid these two ways of disabling builtin-trace.c and not having
CONFIG_TRACE=y when libtraceevent-devel isn't installed is the clean
way.
From Athira:
<quote>
tools/perf/arch/powerpc/util/Build
-perf-y += kvm-stat.o
+perf-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o
</quote>
Then, ditto for arm64 and s390, detected by container cross build tests.
- s/390 uses test__checkevent_tracepoint() that is now only available if
HAVE_LIBTRACEEVENT is defined, enclose the callsite with ifder HAVE_LIBTRACEEVENT.
Also from Athira:
<quote>
With this change, I could successfully compile in these environment:
- Without libtraceevent-devel installed
- With libtraceevent-devel installed
- With “make NO_LIBTRACEEVENT=1”
</quote>
Then, finally rename CONFIG_TRACEEVENT to CONFIG_LIBTRACEEVENT for
consistency with other libraries detected in tools/perf/.
Signed-off-by: Ian Rogers <irogers@google.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Tested-by: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Nick Desaulniers <ndesaulniers@google.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: bpf@vger.kernel.org
Link: http://lore.kernel.org/lkml/20221205225940.3079667-3-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-12-06 06:59:39 +08:00
|
|
|
#endif
|
2012-10-03 22:40:22 +08:00
|
|
|
|
2022-01-05 14:13:37 +08:00
|
|
|
struct evlist_cpu_iterator evlist__cpu_begin(struct evlist *evlist, struct affinity *affinity)
|
|
|
|
{
|
|
|
|
struct evlist_cpu_iterator itr = {
|
|
|
|
.container = evlist,
|
2022-03-18 07:16:43 +08:00
|
|
|
.evsel = NULL,
|
2022-01-05 14:13:37 +08:00
|
|
|
.cpu_map_idx = 0,
|
|
|
|
.evlist_cpu_map_idx = 0,
|
|
|
|
.evlist_cpu_map_nr = perf_cpu_map__nr(evlist->core.all_cpus),
|
2022-01-05 14:13:51 +08:00
|
|
|
.cpu = (struct perf_cpu){ .cpu = -1},
|
2022-01-05 14:13:37 +08:00
|
|
|
.affinity = affinity,
|
|
|
|
};
|
2019-11-21 08:15:15 +08:00
|
|
|
|
2022-03-18 07:16:43 +08:00
|
|
|
if (evlist__empty(evlist)) {
|
|
|
|
/* Ensure the empty list doesn't iterate. */
|
|
|
|
itr.evlist_cpu_map_idx = itr.evlist_cpu_map_nr;
|
|
|
|
} else {
|
|
|
|
itr.evsel = evlist__first(evlist);
|
|
|
|
if (itr.affinity) {
|
|
|
|
itr.cpu = perf_cpu_map__cpu(evlist->core.all_cpus, 0);
|
|
|
|
affinity__set(itr.affinity, itr.cpu.cpu);
|
|
|
|
itr.cpu_map_idx = perf_cpu_map__idx(itr.evsel->core.cpus, itr.cpu);
|
|
|
|
/*
|
|
|
|
* If this CPU isn't in the evsel's cpu map then advance
|
|
|
|
* through the list.
|
|
|
|
*/
|
|
|
|
if (itr.cpu_map_idx == -1)
|
|
|
|
evlist_cpu_iterator__next(&itr);
|
|
|
|
}
|
2022-01-05 14:13:37 +08:00
|
|
|
}
|
|
|
|
return itr;
|
2019-11-21 08:15:15 +08:00
|
|
|
}
|
|
|
|
|
2022-01-05 14:13:37 +08:00
|
|
|
void evlist_cpu_iterator__next(struct evlist_cpu_iterator *evlist_cpu_itr)
|
2019-11-21 08:15:15 +08:00
|
|
|
{
|
2022-01-05 14:13:37 +08:00
|
|
|
while (evlist_cpu_itr->evsel != evlist__last(evlist_cpu_itr->container)) {
|
|
|
|
evlist_cpu_itr->evsel = evsel__next(evlist_cpu_itr->evsel);
|
|
|
|
evlist_cpu_itr->cpu_map_idx =
|
|
|
|
perf_cpu_map__idx(evlist_cpu_itr->evsel->core.cpus,
|
|
|
|
evlist_cpu_itr->cpu);
|
|
|
|
if (evlist_cpu_itr->cpu_map_idx != -1)
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
evlist_cpu_itr->evlist_cpu_map_idx++;
|
|
|
|
if (evlist_cpu_itr->evlist_cpu_map_idx < evlist_cpu_itr->evlist_cpu_map_nr) {
|
|
|
|
evlist_cpu_itr->evsel = evlist__first(evlist_cpu_itr->container);
|
|
|
|
evlist_cpu_itr->cpu =
|
|
|
|
perf_cpu_map__cpu(evlist_cpu_itr->container->core.all_cpus,
|
|
|
|
evlist_cpu_itr->evlist_cpu_map_idx);
|
|
|
|
if (evlist_cpu_itr->affinity)
|
2022-01-05 14:13:51 +08:00
|
|
|
affinity__set(evlist_cpu_itr->affinity, evlist_cpu_itr->cpu.cpu);
|
2022-01-05 14:13:37 +08:00
|
|
|
evlist_cpu_itr->cpu_map_idx =
|
|
|
|
perf_cpu_map__idx(evlist_cpu_itr->evsel->core.cpus,
|
|
|
|
evlist_cpu_itr->cpu);
|
|
|
|
/*
|
|
|
|
* If this CPU isn't in the evsel's cpu map then advance through
|
|
|
|
* the list.
|
|
|
|
*/
|
|
|
|
if (evlist_cpu_itr->cpu_map_idx == -1)
|
|
|
|
evlist_cpu_iterator__next(evlist_cpu_itr);
|
|
|
|
}
|
2019-11-21 08:15:15 +08:00
|
|
|
}
|
|
|
|
|
2022-01-05 14:13:37 +08:00
|
|
|
bool evlist_cpu_iterator__end(const struct evlist_cpu_iterator *evlist_cpu_itr)
|
2019-11-21 08:15:15 +08:00
|
|
|
{
|
2022-01-05 14:13:37 +08:00
|
|
|
return evlist_cpu_itr->evlist_cpu_map_idx >= evlist_cpu_itr->evlist_cpu_map_nr;
|
2019-11-21 08:15:15 +08:00
|
|
|
}
|
|
|
|
|
2020-12-11 04:43:28 +08:00
|
|
|
static int evsel__strcmp(struct evsel *pos, char *evsel_name)
|
|
|
|
{
|
|
|
|
if (!evsel_name)
|
|
|
|
return 0;
|
|
|
|
if (evsel__is_dummy_event(pos))
|
|
|
|
return 1;
|
|
|
|
return strcmp(pos->name, evsel_name);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int evlist__is_enabled(struct evlist *evlist)
|
|
|
|
{
|
|
|
|
struct evsel *pos;
|
|
|
|
|
|
|
|
evlist__for_each_entry(evlist, pos) {
|
|
|
|
if (!evsel__is_group_leader(pos) || !pos->core.fd)
|
|
|
|
continue;
|
|
|
|
/* If at least one event is enabled, evlist is enabled. */
|
|
|
|
if (!pos->disabled)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2022-08-24 15:28:13 +08:00
|
|
|
static void __evlist__disable(struct evlist *evlist, char *evsel_name, bool excl_dummy)
|
2011-07-25 22:06:19 +08:00
|
|
|
{
|
2019-07-21 19:23:51 +08:00
|
|
|
struct evsel *pos;
|
2022-01-05 14:13:37 +08:00
|
|
|
struct evlist_cpu_iterator evlist_cpu_itr;
|
perf evlist: No need to setup affinities when disabling events for pid targets
When the target is a pid, not started by 'perf stat' we need to disable
the events, and in that case there is no need to setup affinities as we
use a dummy CPU map, with just one entry set to -1.
So stop doing it to avoid this needless call to sched_getaffinity():
# strace -ke sched_getaffinity perf stat -e cycles -p 241957 sleep 1
<SNIP>
sched_getaffinity(0, 512, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) = 8
> /usr/lib64/libc-2.33.so(sched_getaffinity@@GLIBC_2.3.4+0x1a) [0xe6eea]
> /var/home/acme/bin/perf(affinity__setup+0x6a) [0x532a2a]
> /var/home/acme/bin/perf(__evlist__disable.constprop.0+0x27) [0x4b9827]
> /var/home/acme/bin/perf(cmd_stat+0x29b5) [0x431725]
> /var/home/acme/bin/perf(run_builtin+0x6a) [0x4a2cfa]
> /var/home/acme/bin/perf(main+0x612) [0x40f8c2]
> /usr/lib64/libc-2.33.so(__libc_start_main+0xd4) [0x27b74]
> /var/home/acme/bin/perf(_start+0x2d) [0x40fadd]
<SNIP>
Acked-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Ian Rogers <irogers@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lore.kernel.org/r/20220117160931.1191712-5-acme@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-01-18 00:09:31 +08:00
|
|
|
struct affinity saved_affinity, *affinity = NULL;
|
2020-05-12 20:19:17 +08:00
|
|
|
bool has_imm = false;
|
2015-12-03 17:06:42 +08:00
|
|
|
|
perf evlist: No need to setup affinities when disabling events for pid targets
When the target is a pid, not started by 'perf stat' we need to disable
the events, and in that case there is no need to setup affinities as we
use a dummy CPU map, with just one entry set to -1.
So stop doing it to avoid this needless call to sched_getaffinity():
# strace -ke sched_getaffinity perf stat -e cycles -p 241957 sleep 1
<SNIP>
sched_getaffinity(0, 512, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) = 8
> /usr/lib64/libc-2.33.so(sched_getaffinity@@GLIBC_2.3.4+0x1a) [0xe6eea]
> /var/home/acme/bin/perf(affinity__setup+0x6a) [0x532a2a]
> /var/home/acme/bin/perf(__evlist__disable.constprop.0+0x27) [0x4b9827]
> /var/home/acme/bin/perf(cmd_stat+0x29b5) [0x431725]
> /var/home/acme/bin/perf(run_builtin+0x6a) [0x4a2cfa]
> /var/home/acme/bin/perf(main+0x612) [0x40f8c2]
> /usr/lib64/libc-2.33.so(__libc_start_main+0xd4) [0x27b74]
> /var/home/acme/bin/perf(_start+0x2d) [0x40fadd]
<SNIP>
Acked-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Ian Rogers <irogers@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lore.kernel.org/r/20220117160931.1191712-5-acme@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-01-18 00:09:31 +08:00
|
|
|
// See explanation in evlist__close()
|
2022-03-29 07:26:44 +08:00
|
|
|
if (!cpu_map__is_dummy(evlist->core.user_requested_cpus)) {
|
perf evlist: No need to setup affinities when disabling events for pid targets
When the target is a pid, not started by 'perf stat' we need to disable
the events, and in that case there is no need to setup affinities as we
use a dummy CPU map, with just one entry set to -1.
So stop doing it to avoid this needless call to sched_getaffinity():
# strace -ke sched_getaffinity perf stat -e cycles -p 241957 sleep 1
<SNIP>
sched_getaffinity(0, 512, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) = 8
> /usr/lib64/libc-2.33.so(sched_getaffinity@@GLIBC_2.3.4+0x1a) [0xe6eea]
> /var/home/acme/bin/perf(affinity__setup+0x6a) [0x532a2a]
> /var/home/acme/bin/perf(__evlist__disable.constprop.0+0x27) [0x4b9827]
> /var/home/acme/bin/perf(cmd_stat+0x29b5) [0x431725]
> /var/home/acme/bin/perf(run_builtin+0x6a) [0x4a2cfa]
> /var/home/acme/bin/perf(main+0x612) [0x40f8c2]
> /usr/lib64/libc-2.33.so(__libc_start_main+0xd4) [0x27b74]
> /var/home/acme/bin/perf(_start+0x2d) [0x40fadd]
<SNIP>
Acked-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Ian Rogers <irogers@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lore.kernel.org/r/20220117160931.1191712-5-acme@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-01-18 00:09:31 +08:00
|
|
|
if (affinity__setup(&saved_affinity) < 0)
|
|
|
|
return;
|
|
|
|
affinity = &saved_affinity;
|
|
|
|
}
|
2019-11-21 08:15:22 +08:00
|
|
|
|
2020-05-12 20:19:17 +08:00
|
|
|
/* Disable 'immediate' events last */
|
2022-01-05 14:13:37 +08:00
|
|
|
for (int imm = 0; imm <= 1; imm++) {
|
perf evlist: No need to setup affinities when disabling events for pid targets
When the target is a pid, not started by 'perf stat' we need to disable
the events, and in that case there is no need to setup affinities as we
use a dummy CPU map, with just one entry set to -1.
So stop doing it to avoid this needless call to sched_getaffinity():
# strace -ke sched_getaffinity perf stat -e cycles -p 241957 sleep 1
<SNIP>
sched_getaffinity(0, 512, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) = 8
> /usr/lib64/libc-2.33.so(sched_getaffinity@@GLIBC_2.3.4+0x1a) [0xe6eea]
> /var/home/acme/bin/perf(affinity__setup+0x6a) [0x532a2a]
> /var/home/acme/bin/perf(__evlist__disable.constprop.0+0x27) [0x4b9827]
> /var/home/acme/bin/perf(cmd_stat+0x29b5) [0x431725]
> /var/home/acme/bin/perf(run_builtin+0x6a) [0x4a2cfa]
> /var/home/acme/bin/perf(main+0x612) [0x40f8c2]
> /usr/lib64/libc-2.33.so(__libc_start_main+0xd4) [0x27b74]
> /var/home/acme/bin/perf(_start+0x2d) [0x40fadd]
<SNIP>
Acked-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Ian Rogers <irogers@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lore.kernel.org/r/20220117160931.1191712-5-acme@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-01-18 00:09:31 +08:00
|
|
|
evlist__for_each_cpu(evlist_cpu_itr, evlist, affinity) {
|
2022-01-05 14:13:37 +08:00
|
|
|
pos = evlist_cpu_itr.evsel;
|
|
|
|
if (evsel__strcmp(pos, evsel_name))
|
|
|
|
continue;
|
|
|
|
if (pos->disabled || !evsel__is_group_leader(pos) || !pos->core.fd)
|
|
|
|
continue;
|
2022-08-24 15:28:13 +08:00
|
|
|
if (excl_dummy && evsel__is_dummy_event(pos))
|
|
|
|
continue;
|
2022-01-05 14:13:37 +08:00
|
|
|
if (pos->immediate)
|
|
|
|
has_imm = true;
|
|
|
|
if (pos->immediate != imm)
|
|
|
|
continue;
|
|
|
|
evsel__disable_cpu(pos, evlist_cpu_itr.cpu_map_idx);
|
2019-11-21 08:15:22 +08:00
|
|
|
}
|
2020-05-12 20:19:17 +08:00
|
|
|
if (!has_imm)
|
|
|
|
break;
|
2019-11-21 08:15:22 +08:00
|
|
|
}
|
2020-05-12 20:19:17 +08:00
|
|
|
|
perf evlist: No need to setup affinities when disabling events for pid targets
When the target is a pid, not started by 'perf stat' we need to disable
the events, and in that case there is no need to setup affinities as we
use a dummy CPU map, with just one entry set to -1.
So stop doing it to avoid this needless call to sched_getaffinity():
# strace -ke sched_getaffinity perf stat -e cycles -p 241957 sleep 1
<SNIP>
sched_getaffinity(0, 512, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) = 8
> /usr/lib64/libc-2.33.so(sched_getaffinity@@GLIBC_2.3.4+0x1a) [0xe6eea]
> /var/home/acme/bin/perf(affinity__setup+0x6a) [0x532a2a]
> /var/home/acme/bin/perf(__evlist__disable.constprop.0+0x27) [0x4b9827]
> /var/home/acme/bin/perf(cmd_stat+0x29b5) [0x431725]
> /var/home/acme/bin/perf(run_builtin+0x6a) [0x4a2cfa]
> /var/home/acme/bin/perf(main+0x612) [0x40f8c2]
> /usr/lib64/libc-2.33.so(__libc_start_main+0xd4) [0x27b74]
> /var/home/acme/bin/perf(_start+0x2d) [0x40fadd]
<SNIP>
Acked-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Ian Rogers <irogers@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lore.kernel.org/r/20220117160931.1191712-5-acme@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-01-18 00:09:31 +08:00
|
|
|
affinity__cleanup(affinity);
|
2016-06-23 22:26:15 +08:00
|
|
|
evlist__for_each_entry(evlist, pos) {
|
2020-12-11 04:43:28 +08:00
|
|
|
if (evsel__strcmp(pos, evsel_name))
|
|
|
|
continue;
|
2020-04-30 21:51:16 +08:00
|
|
|
if (!evsel__is_group_leader(pos) || !pos->core.fd)
|
2015-12-03 17:06:42 +08:00
|
|
|
continue;
|
2022-08-24 15:28:13 +08:00
|
|
|
if (excl_dummy && evsel__is_dummy_event(pos))
|
|
|
|
continue;
|
2019-11-21 08:15:22 +08:00
|
|
|
pos->disabled = true;
|
2011-07-25 22:06:19 +08:00
|
|
|
}
|
2015-06-18 03:40:26 +08:00
|
|
|
|
2020-12-11 04:43:28 +08:00
|
|
|
/*
|
|
|
|
* If we disabled only single event, we need to check
|
|
|
|
* the enabled state of the evlist manually.
|
|
|
|
*/
|
|
|
|
if (evsel_name)
|
|
|
|
evlist->enabled = evlist__is_enabled(evlist);
|
|
|
|
else
|
|
|
|
evlist->enabled = false;
|
2011-07-25 22:06:19 +08:00
|
|
|
}
|
|
|
|
|
2020-12-11 04:43:28 +08:00
|
|
|
void evlist__disable(struct evlist *evlist)
|
|
|
|
{
|
2022-08-24 15:28:13 +08:00
|
|
|
__evlist__disable(evlist, NULL, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
void evlist__disable_non_dummy(struct evlist *evlist)
|
|
|
|
{
|
|
|
|
__evlist__disable(evlist, NULL, true);
|
2020-12-11 04:43:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void evlist__disable_evsel(struct evlist *evlist, char *evsel_name)
|
|
|
|
{
|
2022-08-24 15:28:13 +08:00
|
|
|
__evlist__disable(evlist, evsel_name, false);
|
2020-12-11 04:43:28 +08:00
|
|
|
}
|
|
|
|
|
2022-08-24 15:28:13 +08:00
|
|
|
static void __evlist__enable(struct evlist *evlist, char *evsel_name, bool excl_dummy)
|
2011-08-26 00:17:55 +08:00
|
|
|
{
|
2019-07-21 19:23:51 +08:00
|
|
|
struct evsel *pos;
|
2022-01-05 14:13:37 +08:00
|
|
|
struct evlist_cpu_iterator evlist_cpu_itr;
|
perf evlist: No need to setup affinities when enabling events for pid targets
When the target is a pid, not started by 'perf stat' we need to enable
the events, and in that case there is no need to setup affinities as we
use a dummy CPU map, with just one entry set to -1.
So stop doing it to avoid this needless call to sched_getaffinity():
# strace -ke sched_getaffinity perf stat -e cycles -p 241957 sleep 1
<SNIP>
sched_getaffinity(0, 512, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) = 8
> /usr/lib64/libc-2.33.so(sched_getaffinity@@GLIBC_2.3.4+0x1a) [0xe6eea]
> /var/home/acme/bin/perf(affinity__setup+0x6a) [0x5329ca]
> /var/home/acme/bin/perf(__evlist__enable.constprop.0+0x23) [0x4b9693]
> /var/home/acme/bin/perf(enable_counters+0x14d) [0x42de5d]
> /var/home/acme/bin/perf(cmd_stat+0x2358) [0x4310c8]
> /var/home/acme/bin/perf(run_builtin+0x6a) [0x4a2cfa]
> /var/home/acme/bin/perf(main+0x612) [0x40f8c2]
> /usr/lib64/libc-2.33.so(__libc_start_main+0xd4) [0x27b74]
> /var/home/acme/bin/perf(_start+0x2d) [0x40fadd]
<SNIP>
Acked-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Ian Rogers <irogers@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lore.kernel.org/r/20220117160931.1191712-4-acme@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-01-18 00:09:30 +08:00
|
|
|
struct affinity saved_affinity, *affinity = NULL;
|
2015-12-03 17:06:42 +08:00
|
|
|
|
perf evlist: No need to setup affinities when enabling events for pid targets
When the target is a pid, not started by 'perf stat' we need to enable
the events, and in that case there is no need to setup affinities as we
use a dummy CPU map, with just one entry set to -1.
So stop doing it to avoid this needless call to sched_getaffinity():
# strace -ke sched_getaffinity perf stat -e cycles -p 241957 sleep 1
<SNIP>
sched_getaffinity(0, 512, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) = 8
> /usr/lib64/libc-2.33.so(sched_getaffinity@@GLIBC_2.3.4+0x1a) [0xe6eea]
> /var/home/acme/bin/perf(affinity__setup+0x6a) [0x5329ca]
> /var/home/acme/bin/perf(__evlist__enable.constprop.0+0x23) [0x4b9693]
> /var/home/acme/bin/perf(enable_counters+0x14d) [0x42de5d]
> /var/home/acme/bin/perf(cmd_stat+0x2358) [0x4310c8]
> /var/home/acme/bin/perf(run_builtin+0x6a) [0x4a2cfa]
> /var/home/acme/bin/perf(main+0x612) [0x40f8c2]
> /usr/lib64/libc-2.33.so(__libc_start_main+0xd4) [0x27b74]
> /var/home/acme/bin/perf(_start+0x2d) [0x40fadd]
<SNIP>
Acked-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Ian Rogers <irogers@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lore.kernel.org/r/20220117160931.1191712-4-acme@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-01-18 00:09:30 +08:00
|
|
|
// See explanation in evlist__close()
|
2022-03-29 07:26:44 +08:00
|
|
|
if (!cpu_map__is_dummy(evlist->core.user_requested_cpus)) {
|
perf evlist: No need to setup affinities when enabling events for pid targets
When the target is a pid, not started by 'perf stat' we need to enable
the events, and in that case there is no need to setup affinities as we
use a dummy CPU map, with just one entry set to -1.
So stop doing it to avoid this needless call to sched_getaffinity():
# strace -ke sched_getaffinity perf stat -e cycles -p 241957 sleep 1
<SNIP>
sched_getaffinity(0, 512, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) = 8
> /usr/lib64/libc-2.33.so(sched_getaffinity@@GLIBC_2.3.4+0x1a) [0xe6eea]
> /var/home/acme/bin/perf(affinity__setup+0x6a) [0x5329ca]
> /var/home/acme/bin/perf(__evlist__enable.constprop.0+0x23) [0x4b9693]
> /var/home/acme/bin/perf(enable_counters+0x14d) [0x42de5d]
> /var/home/acme/bin/perf(cmd_stat+0x2358) [0x4310c8]
> /var/home/acme/bin/perf(run_builtin+0x6a) [0x4a2cfa]
> /var/home/acme/bin/perf(main+0x612) [0x40f8c2]
> /usr/lib64/libc-2.33.so(__libc_start_main+0xd4) [0x27b74]
> /var/home/acme/bin/perf(_start+0x2d) [0x40fadd]
<SNIP>
Acked-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Ian Rogers <irogers@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lore.kernel.org/r/20220117160931.1191712-4-acme@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-01-18 00:09:30 +08:00
|
|
|
if (affinity__setup(&saved_affinity) < 0)
|
|
|
|
return;
|
|
|
|
affinity = &saved_affinity;
|
|
|
|
}
|
2019-11-21 08:15:22 +08:00
|
|
|
|
perf evlist: No need to setup affinities when enabling events for pid targets
When the target is a pid, not started by 'perf stat' we need to enable
the events, and in that case there is no need to setup affinities as we
use a dummy CPU map, with just one entry set to -1.
So stop doing it to avoid this needless call to sched_getaffinity():
# strace -ke sched_getaffinity perf stat -e cycles -p 241957 sleep 1
<SNIP>
sched_getaffinity(0, 512, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) = 8
> /usr/lib64/libc-2.33.so(sched_getaffinity@@GLIBC_2.3.4+0x1a) [0xe6eea]
> /var/home/acme/bin/perf(affinity__setup+0x6a) [0x5329ca]
> /var/home/acme/bin/perf(__evlist__enable.constprop.0+0x23) [0x4b9693]
> /var/home/acme/bin/perf(enable_counters+0x14d) [0x42de5d]
> /var/home/acme/bin/perf(cmd_stat+0x2358) [0x4310c8]
> /var/home/acme/bin/perf(run_builtin+0x6a) [0x4a2cfa]
> /var/home/acme/bin/perf(main+0x612) [0x40f8c2]
> /usr/lib64/libc-2.33.so(__libc_start_main+0xd4) [0x27b74]
> /var/home/acme/bin/perf(_start+0x2d) [0x40fadd]
<SNIP>
Acked-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Ian Rogers <irogers@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lore.kernel.org/r/20220117160931.1191712-4-acme@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-01-18 00:09:30 +08:00
|
|
|
evlist__for_each_cpu(evlist_cpu_itr, evlist, affinity) {
|
2022-01-05 14:13:37 +08:00
|
|
|
pos = evlist_cpu_itr.evsel;
|
|
|
|
if (evsel__strcmp(pos, evsel_name))
|
|
|
|
continue;
|
|
|
|
if (!evsel__is_group_leader(pos) || !pos->core.fd)
|
|
|
|
continue;
|
2022-08-24 15:28:13 +08:00
|
|
|
if (excl_dummy && evsel__is_dummy_event(pos))
|
|
|
|
continue;
|
2022-01-05 14:13:37 +08:00
|
|
|
evsel__enable_cpu(pos, evlist_cpu_itr.cpu_map_idx);
|
2019-11-21 08:15:22 +08:00
|
|
|
}
|
perf evlist: No need to setup affinities when enabling events for pid targets
When the target is a pid, not started by 'perf stat' we need to enable
the events, and in that case there is no need to setup affinities as we
use a dummy CPU map, with just one entry set to -1.
So stop doing it to avoid this needless call to sched_getaffinity():
# strace -ke sched_getaffinity perf stat -e cycles -p 241957 sleep 1
<SNIP>
sched_getaffinity(0, 512, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]) = 8
> /usr/lib64/libc-2.33.so(sched_getaffinity@@GLIBC_2.3.4+0x1a) [0xe6eea]
> /var/home/acme/bin/perf(affinity__setup+0x6a) [0x5329ca]
> /var/home/acme/bin/perf(__evlist__enable.constprop.0+0x23) [0x4b9693]
> /var/home/acme/bin/perf(enable_counters+0x14d) [0x42de5d]
> /var/home/acme/bin/perf(cmd_stat+0x2358) [0x4310c8]
> /var/home/acme/bin/perf(run_builtin+0x6a) [0x4a2cfa]
> /var/home/acme/bin/perf(main+0x612) [0x40f8c2]
> /usr/lib64/libc-2.33.so(__libc_start_main+0xd4) [0x27b74]
> /var/home/acme/bin/perf(_start+0x2d) [0x40fadd]
<SNIP>
Acked-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Ian Rogers <irogers@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lore.kernel.org/r/20220117160931.1191712-4-acme@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-01-18 00:09:30 +08:00
|
|
|
affinity__cleanup(affinity);
|
2016-06-23 22:26:15 +08:00
|
|
|
evlist__for_each_entry(evlist, pos) {
|
2020-12-11 04:43:28 +08:00
|
|
|
if (evsel__strcmp(pos, evsel_name))
|
|
|
|
continue;
|
2020-04-30 21:51:16 +08:00
|
|
|
if (!evsel__is_group_leader(pos) || !pos->core.fd)
|
2015-12-03 17:06:42 +08:00
|
|
|
continue;
|
2022-08-24 15:28:13 +08:00
|
|
|
if (excl_dummy && evsel__is_dummy_event(pos))
|
|
|
|
continue;
|
2019-11-21 08:15:22 +08:00
|
|
|
pos->disabled = false;
|
2011-08-26 00:17:55 +08:00
|
|
|
}
|
2015-06-18 03:40:26 +08:00
|
|
|
|
2020-12-11 04:43:28 +08:00
|
|
|
/*
|
|
|
|
* Even single event sets the 'enabled' for evlist,
|
|
|
|
* so the toggle can work properly and toggle to
|
|
|
|
* 'disabled' state.
|
|
|
|
*/
|
2015-06-18 03:40:26 +08:00
|
|
|
evlist->enabled = true;
|
|
|
|
}
|
|
|
|
|
2020-12-11 04:43:28 +08:00
|
|
|
void evlist__enable(struct evlist *evlist)
|
|
|
|
{
|
2022-08-24 15:28:13 +08:00
|
|
|
__evlist__enable(evlist, NULL, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
void evlist__enable_non_dummy(struct evlist *evlist)
|
|
|
|
{
|
|
|
|
__evlist__enable(evlist, NULL, true);
|
2020-12-11 04:43:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void evlist__enable_evsel(struct evlist *evlist, char *evsel_name)
|
|
|
|
{
|
2022-08-24 15:28:13 +08:00
|
|
|
__evlist__enable(evlist, evsel_name, false);
|
2020-12-11 04:43:28 +08:00
|
|
|
}
|
|
|
|
|
2020-11-30 20:33:55 +08:00
|
|
|
void evlist__toggle_enable(struct evlist *evlist)
|
2015-06-18 03:40:26 +08:00
|
|
|
{
|
2019-07-21 19:24:09 +08:00
|
|
|
(evlist->enabled ? evlist__disable : evlist__enable)(evlist);
|
2011-08-26 00:17:55 +08:00
|
|
|
}
|
|
|
|
|
2019-08-16 22:00:45 +08:00
|
|
|
int evlist__add_pollfd(struct evlist *evlist, int fd)
|
2011-01-13 03:03:24 +08:00
|
|
|
{
|
2020-07-17 14:59:45 +08:00
|
|
|
return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN, fdarray_flag__default);
|
2014-09-08 22:27:49 +08:00
|
|
|
}
|
|
|
|
|
2019-08-16 22:00:45 +08:00
|
|
|
int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask)
|
2014-08-13 10:04:11 +08:00
|
|
|
{
|
2019-10-07 20:53:34 +08:00
|
|
|
return perf_evlist__filter_pollfd(&evlist->core, revents_and_mask);
|
2014-08-13 10:04:11 +08:00
|
|
|
}
|
|
|
|
|
2021-02-05 14:50:01 +08:00
|
|
|
#ifdef HAVE_EVENTFD_SUPPORT
|
|
|
|
int evlist__add_wakeup_eventfd(struct evlist *evlist, int fd)
|
|
|
|
{
|
|
|
|
return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN,
|
2022-08-24 15:28:11 +08:00
|
|
|
fdarray_flag__nonfilterable |
|
|
|
|
fdarray_flag__non_perf_event);
|
2021-02-05 14:50:01 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2019-09-01 04:48:33 +08:00
|
|
|
int evlist__poll(struct evlist *evlist, int timeout)
|
2014-08-19 04:25:59 +08:00
|
|
|
{
|
2019-09-01 04:48:33 +08:00
|
|
|
return perf_evlist__poll(&evlist->core, timeout);
|
2014-08-19 04:25:59 +08:00
|
|
|
}
|
|
|
|
|
2020-12-01 01:17:57 +08:00
|
|
|
struct perf_sample_id *evlist__id2sid(struct evlist *evlist, u64 id)
|
2011-01-13 08:39:13 +08:00
|
|
|
{
|
|
|
|
struct hlist_head *head;
|
|
|
|
struct perf_sample_id *sid;
|
|
|
|
int hash;
|
|
|
|
|
|
|
|
hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
|
2019-09-03 04:20:12 +08:00
|
|
|
head = &evlist->core.heads[hash];
|
2011-01-13 08:39:13 +08:00
|
|
|
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
|
|
|
hlist_for_each_entry(sid, head, node)
|
2011-01-13 08:39:13 +08:00
|
|
|
if (sid->id == id)
|
2012-10-11 20:10:35 +08:00
|
|
|
return sid;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-12-01 01:17:57 +08:00
|
|
|
struct evsel *evlist__id2evsel(struct evlist *evlist, u64 id)
|
2012-10-11 20:10:35 +08:00
|
|
|
{
|
|
|
|
struct perf_sample_id *sid;
|
|
|
|
|
2019-07-21 19:24:28 +08:00
|
|
|
if (evlist->core.nr_entries == 1 || !id)
|
2019-09-03 16:39:52 +08:00
|
|
|
return evlist__first(evlist);
|
2012-10-11 20:10:35 +08:00
|
|
|
|
2020-12-01 01:17:57 +08:00
|
|
|
sid = evlist__id2sid(evlist, id);
|
2012-10-11 20:10:35 +08:00
|
|
|
if (sid)
|
2019-09-03 16:34:29 +08:00
|
|
|
return container_of(sid->evsel, struct evsel, core);
|
2012-02-20 09:47:26 +08:00
|
|
|
|
2020-06-17 20:29:48 +08:00
|
|
|
if (!evlist__sample_id_all(evlist))
|
2019-09-03 16:39:52 +08:00
|
|
|
return evlist__first(evlist);
|
2012-02-20 09:47:26 +08:00
|
|
|
|
2011-01-13 08:39:13 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
2011-01-15 20:40:59 +08:00
|
|
|
|
2020-12-01 01:17:57 +08:00
|
|
|
struct evsel *evlist__id2evsel_strict(struct evlist *evlist, u64 id)
|
2015-09-25 21:15:52 +08:00
|
|
|
{
|
|
|
|
struct perf_sample_id *sid;
|
|
|
|
|
|
|
|
if (!id)
|
|
|
|
return NULL;
|
|
|
|
|
2020-12-01 01:17:57 +08:00
|
|
|
sid = evlist__id2sid(evlist, id);
|
2015-09-25 21:15:52 +08:00
|
|
|
if (sid)
|
2019-09-03 16:34:29 +08:00
|
|
|
return container_of(sid->evsel, struct evsel, core);
|
2015-09-25 21:15:52 +08:00
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-12-01 01:17:57 +08:00
|
|
|
static int evlist__event2id(struct evlist *evlist, union perf_event *event, u64 *id)
|
2013-08-27 16:23:09 +08:00
|
|
|
{
|
2019-08-26 02:17:52 +08:00
|
|
|
const __u64 *array = event->sample.array;
|
2013-08-27 16:23:09 +08:00
|
|
|
ssize_t n;
|
|
|
|
|
|
|
|
n = (event->header.size - sizeof(event->header)) >> 3;
|
|
|
|
|
|
|
|
if (event->header.type == PERF_RECORD_SAMPLE) {
|
|
|
|
if (evlist->id_pos >= n)
|
|
|
|
return -1;
|
|
|
|
*id = array[evlist->id_pos];
|
|
|
|
} else {
|
|
|
|
if (evlist->is_pos > n)
|
|
|
|
return -1;
|
|
|
|
n -= evlist->is_pos;
|
|
|
|
*id = array[n];
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-12-01 01:17:57 +08:00
|
|
|
struct evsel *evlist__event2evsel(struct evlist *evlist, union perf_event *event)
|
2013-08-27 16:23:09 +08:00
|
|
|
{
|
2019-09-03 16:39:52 +08:00
|
|
|
struct evsel *first = evlist__first(evlist);
|
2013-08-27 16:23:09 +08:00
|
|
|
struct hlist_head *head;
|
|
|
|
struct perf_sample_id *sid;
|
|
|
|
int hash;
|
|
|
|
u64 id;
|
|
|
|
|
2019-07-21 19:24:28 +08:00
|
|
|
if (evlist->core.nr_entries == 1)
|
2013-09-05 04:18:17 +08:00
|
|
|
return first;
|
|
|
|
|
2019-07-21 19:24:29 +08:00
|
|
|
if (!first->core.attr.sample_id_all &&
|
2013-09-05 04:18:17 +08:00
|
|
|
event->header.type != PERF_RECORD_SAMPLE)
|
|
|
|
return first;
|
2013-08-27 16:23:09 +08:00
|
|
|
|
2020-12-01 01:17:57 +08:00
|
|
|
if (evlist__event2id(evlist, event, &id))
|
2013-08-27 16:23:09 +08:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/* Synthesized events have an id of zero */
|
|
|
|
if (!id)
|
2013-09-05 04:18:17 +08:00
|
|
|
return first;
|
2013-08-27 16:23:09 +08:00
|
|
|
|
|
|
|
hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
|
2019-09-03 04:20:12 +08:00
|
|
|
head = &evlist->core.heads[hash];
|
2013-08-27 16:23:09 +08:00
|
|
|
|
|
|
|
hlist_for_each_entry(sid, head, node) {
|
|
|
|
if (sid->id == id)
|
2019-09-03 16:34:29 +08:00
|
|
|
return container_of(sid->evsel, struct evsel, core);
|
2013-08-27 16:23:09 +08:00
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-12-01 01:49:05 +08:00
|
|
|
static int evlist__set_paused(struct evlist *evlist, bool value)
|
2016-05-23 15:13:38 +08:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2017-12-05 00:51:07 +08:00
|
|
|
if (!evlist->overwrite_mmap)
|
2016-07-14 16:34:40 +08:00
|
|
|
return 0;
|
|
|
|
|
2019-07-30 19:04:59 +08:00
|
|
|
for (i = 0; i < evlist->core.nr_mmaps; i++) {
|
2019-07-28 04:31:17 +08:00
|
|
|
int fd = evlist->overwrite_mmap[i].core.fd;
|
2016-05-23 15:13:38 +08:00
|
|
|
int err;
|
|
|
|
|
|
|
|
if (fd < 0)
|
|
|
|
continue;
|
|
|
|
err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-12-01 01:49:05 +08:00
|
|
|
static int evlist__pause(struct evlist *evlist)
|
2016-05-23 15:13:38 +08:00
|
|
|
{
|
2020-12-01 01:49:05 +08:00
|
|
|
return evlist__set_paused(evlist, true);
|
2016-05-23 15:13:38 +08:00
|
|
|
}
|
|
|
|
|
2020-12-01 01:49:05 +08:00
|
|
|
static int evlist__resume(struct evlist *evlist)
|
2016-05-23 15:13:38 +08:00
|
|
|
{
|
2020-12-01 01:49:05 +08:00
|
|
|
return evlist__set_paused(evlist, false);
|
2016-05-23 15:13:38 +08:00
|
|
|
}
|
|
|
|
|
2019-08-16 22:19:55 +08:00
|
|
|
static void evlist__munmap_nofree(struct evlist *evlist)
|
2011-01-30 20:46:46 +08:00
|
|
|
{
|
perf evlist: Fix per thread mmap setup
The PERF_EVENT_IOC_SET_OUTPUT ioctl was returning -EINVAL when using
--pid when monitoring multithreaded apps, as we can only share a ring
buffer for events on the same thread if not doing per cpu.
Fix it by using per thread ring buffers.
Tested with:
[root@felicio ~]# tuna -t 26131 -CP | nl
1 thread ctxt_switches
2 pid SCHED_ rtpri affinity voluntary nonvoluntary cmd
3 26131 OTHER 0 0,1 10814276 2397830 chromium-browse
4 642 OTHER 0 0,1 14688 0 chromium-browse
5 26148 OTHER 0 0,1 713602 115479 chromium-browse
6 26149 OTHER 0 0,1 801958 2262 chromium-browse
7 26150 OTHER 0 0,1 1271128 248 chromium-browse
8 26151 OTHER 0 0,1 3 0 chromium-browse
9 27049 OTHER 0 0,1 36796 9 chromium-browse
10 618 OTHER 0 0,1 14711 0 chromium-browse
11 661 OTHER 0 0,1 14593 0 chromium-browse
12 29048 OTHER 0 0,1 28125 0 chromium-browse
13 26143 OTHER 0 0,1 2202789 781 chromium-browse
[root@felicio ~]#
So 11 threads under pid 26131, then:
[root@felicio ~]# perf record -F 50000 --pid 26131
[root@felicio ~]# grep perf_event /proc/`pidof perf`/maps | nl
1 7fa4a2538000-7fa4a25b9000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
2 7fa4a25b9000-7fa4a263a000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
3 7fa4a263a000-7fa4a26bb000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
4 7fa4a26bb000-7fa4a273c000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
5 7fa4a273c000-7fa4a27bd000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
6 7fa4a27bd000-7fa4a283e000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
7 7fa4a283e000-7fa4a28bf000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
8 7fa4a28bf000-7fa4a2940000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
9 7fa4a2940000-7fa4a29c1000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
10 7fa4a29c1000-7fa4a2a42000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
11 7fa4a2a42000-7fa4a2ac3000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
[root@felicio ~]#
11 mmaps, one per thread since we didn't specify any CPU list, so we need one
mmap per thread and:
[root@felicio ~]# perf record -F 50000 --pid 26131
^M
^C[ perf record: Woken up 79 times to write data ]
[ perf record: Captured and wrote 20.614 MB perf.data (~900639 samples) ]
[root@felicio ~]# perf report -D | grep PERF_RECORD_SAMPLE | cut -d/ -f2 | cut -d: -f1 | sort -n | uniq -c | sort -nr | nl
1 371310 26131
2 96516 26148
3 95694 26149
4 95203 26150
5 7291 26143
6 87 27049
7 76 661
8 60 29048
9 47 618
10 43 642
[root@felicio ~]#
Ok, one of the threads, 26151 was quiescent, so no samples there, but all the
others are there.
Then, if I specify one CPU:
[root@felicio ~]# perf record -F 50000 --pid 26131 --cpu 1
^C[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.680 MB perf.data (~29730 samples) ]
[root@felicio ~]# perf report -D | grep PERF_RECORD_SAMPLE | cut -d/ -f2 | cut -d: -f1 | sort -n | uniq -c | sort -nr | nl
1 8444 26131
2 2584 26149
3 2518 26148
4 2324 26150
5 123 26143
6 9 661
7 9 29048
[root@felicio ~]#
This machine has two cores, so fewer threads appeared on the radar, and:
[root@felicio ~]# grep perf_event /proc/`pidof perf`/maps | nl
1 7f484b922000-7f484b9a3000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
[root@felicio ~]#
Just one mmap, as now we can use just one per-cpu buffer instead of the
per-thread needed in the previous case.
For global profiling:
[root@felicio ~]# perf record -F 50000 -a
^C[ perf record: Woken up 26 times to write data ]
[ perf record: Captured and wrote 7.128 MB perf.data (~311412 samples) ]
[root@felicio ~]# grep perf_event /proc/`pidof perf`/maps | nl
1 7fb49b435000-7fb49b4b6000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
2 7fb49b4b6000-7fb49b537000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
[root@felicio ~]#
It uses per-cpu buffers.
For just one thread:
[root@felicio ~]# perf record -F 50000 --tid 26148
^C[ perf record: Woken up 2 times to write data ]
[ perf record: Captured and wrote 0.330 MB perf.data (~14426 samples) ]
[root@felicio ~]# perf report -D | grep PERF_RECORD_SAMPLE | cut -d/ -f2 | cut -d: -f1 | sort -n | uniq -c | sort -nr | nl
1 9969 26148
[root@felicio ~]#
[root@felicio ~]# grep perf_event /proc/`pidof perf`/maps | nl
1 7f286a51b000-7f286a59c000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
[root@felicio ~]#
Tested-by: David Ahern <dsahern@gmail.com>
Tested-by: Lin Ming <ming.m.lin@intel.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Tom Zanussi <tzanussi@gmail.com>
Link: http://lkml.kernel.org/r/20110426204401.GB1746@ghostprotocols.net
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2011-05-15 20:39:00 +08:00
|
|
|
int i;
|
2011-01-30 20:46:46 +08:00
|
|
|
|
2016-07-14 16:34:39 +08:00
|
|
|
if (evlist->mmap)
|
2019-07-30 19:04:59 +08:00
|
|
|
for (i = 0; i < evlist->core.nr_mmaps; i++)
|
2019-10-07 20:53:15 +08:00
|
|
|
perf_mmap__munmap(&evlist->mmap[i].core);
|
2014-01-04 04:25:49 +08:00
|
|
|
|
2017-12-05 00:51:07 +08:00
|
|
|
if (evlist->overwrite_mmap)
|
2019-07-30 19:04:59 +08:00
|
|
|
for (i = 0; i < evlist->core.nr_mmaps; i++)
|
2019-10-07 20:53:15 +08:00
|
|
|
perf_mmap__munmap(&evlist->overwrite_mmap[i].core);
|
2016-07-14 16:34:38 +08:00
|
|
|
}
|
perf evlist: Fix per thread mmap setup
The PERF_EVENT_IOC_SET_OUTPUT ioctl was returning -EINVAL when using
--pid when monitoring multithreaded apps, as we can only share a ring
buffer for events on the same thread if not doing per cpu.
Fix it by using per thread ring buffers.
Tested with:
[root@felicio ~]# tuna -t 26131 -CP | nl
1 thread ctxt_switches
2 pid SCHED_ rtpri affinity voluntary nonvoluntary cmd
3 26131 OTHER 0 0,1 10814276 2397830 chromium-browse
4 642 OTHER 0 0,1 14688 0 chromium-browse
5 26148 OTHER 0 0,1 713602 115479 chromium-browse
6 26149 OTHER 0 0,1 801958 2262 chromium-browse
7 26150 OTHER 0 0,1 1271128 248 chromium-browse
8 26151 OTHER 0 0,1 3 0 chromium-browse
9 27049 OTHER 0 0,1 36796 9 chromium-browse
10 618 OTHER 0 0,1 14711 0 chromium-browse
11 661 OTHER 0 0,1 14593 0 chromium-browse
12 29048 OTHER 0 0,1 28125 0 chromium-browse
13 26143 OTHER 0 0,1 2202789 781 chromium-browse
[root@felicio ~]#
So 11 threads under pid 26131, then:
[root@felicio ~]# perf record -F 50000 --pid 26131
[root@felicio ~]# grep perf_event /proc/`pidof perf`/maps | nl
1 7fa4a2538000-7fa4a25b9000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
2 7fa4a25b9000-7fa4a263a000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
3 7fa4a263a000-7fa4a26bb000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
4 7fa4a26bb000-7fa4a273c000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
5 7fa4a273c000-7fa4a27bd000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
6 7fa4a27bd000-7fa4a283e000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
7 7fa4a283e000-7fa4a28bf000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
8 7fa4a28bf000-7fa4a2940000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
9 7fa4a2940000-7fa4a29c1000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
10 7fa4a29c1000-7fa4a2a42000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
11 7fa4a2a42000-7fa4a2ac3000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
[root@felicio ~]#
11 mmaps, one per thread since we didn't specify any CPU list, so we need one
mmap per thread and:
[root@felicio ~]# perf record -F 50000 --pid 26131
^M
^C[ perf record: Woken up 79 times to write data ]
[ perf record: Captured and wrote 20.614 MB perf.data (~900639 samples) ]
[root@felicio ~]# perf report -D | grep PERF_RECORD_SAMPLE | cut -d/ -f2 | cut -d: -f1 | sort -n | uniq -c | sort -nr | nl
1 371310 26131
2 96516 26148
3 95694 26149
4 95203 26150
5 7291 26143
6 87 27049
7 76 661
8 60 29048
9 47 618
10 43 642
[root@felicio ~]#
Ok, one of the threads, 26151 was quiescent, so no samples there, but all the
others are there.
Then, if I specify one CPU:
[root@felicio ~]# perf record -F 50000 --pid 26131 --cpu 1
^C[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.680 MB perf.data (~29730 samples) ]
[root@felicio ~]# perf report -D | grep PERF_RECORD_SAMPLE | cut -d/ -f2 | cut -d: -f1 | sort -n | uniq -c | sort -nr | nl
1 8444 26131
2 2584 26149
3 2518 26148
4 2324 26150
5 123 26143
6 9 661
7 9 29048
[root@felicio ~]#
This machine has two cores, so fewer threads appeared on the radar, and:
[root@felicio ~]# grep perf_event /proc/`pidof perf`/maps | nl
1 7f484b922000-7f484b9a3000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
[root@felicio ~]#
Just one mmap, as now we can use just one per-cpu buffer instead of the
per-thread needed in the previous case.
For global profiling:
[root@felicio ~]# perf record -F 50000 -a
^C[ perf record: Woken up 26 times to write data ]
[ perf record: Captured and wrote 7.128 MB perf.data (~311412 samples) ]
[root@felicio ~]# grep perf_event /proc/`pidof perf`/maps | nl
1 7fb49b435000-7fb49b4b6000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
2 7fb49b4b6000-7fb49b537000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
[root@felicio ~]#
It uses per-cpu buffers.
For just one thread:
[root@felicio ~]# perf record -F 50000 --tid 26148
^C[ perf record: Woken up 2 times to write data ]
[ perf record: Captured and wrote 0.330 MB perf.data (~14426 samples) ]
[root@felicio ~]# perf report -D | grep PERF_RECORD_SAMPLE | cut -d/ -f2 | cut -d: -f1 | sort -n | uniq -c | sort -nr | nl
1 9969 26148
[root@felicio ~]#
[root@felicio ~]# grep perf_event /proc/`pidof perf`/maps | nl
1 7f286a51b000-7f286a59c000 rwxs 00000000 00:09 4064 anon_inode:[perf_event]
[root@felicio ~]#
Tested-by: David Ahern <dsahern@gmail.com>
Tested-by: Lin Ming <ming.m.lin@intel.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Tom Zanussi <tzanussi@gmail.com>
Link: http://lkml.kernel.org/r/20110426204401.GB1746@ghostprotocols.net
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2011-05-15 20:39:00 +08:00
|
|
|
|
2019-08-16 22:19:55 +08:00
|
|
|
void evlist__munmap(struct evlist *evlist)
|
2016-07-14 16:34:38 +08:00
|
|
|
{
|
2019-08-16 22:19:55 +08:00
|
|
|
evlist__munmap_nofree(evlist);
|
2013-12-27 04:41:15 +08:00
|
|
|
zfree(&evlist->mmap);
|
2017-12-05 00:51:07 +08:00
|
|
|
zfree(&evlist->overwrite_mmap);
|
2011-01-30 20:46:46 +08:00
|
|
|
}
|
|
|
|
|
2019-10-07 20:53:15 +08:00
|
|
|
static void perf_mmap__unmap_cb(struct perf_mmap *map)
|
|
|
|
{
|
|
|
|
struct mmap *m = container_of(map, struct mmap, core);
|
|
|
|
|
|
|
|
mmap__munmap(m);
|
|
|
|
}
|
|
|
|
|
2019-08-16 22:21:46 +08:00
|
|
|
static struct mmap *evlist__alloc_mmap(struct evlist *evlist,
|
|
|
|
bool overwrite)
|
2011-01-30 20:46:46 +08:00
|
|
|
{
|
2016-05-21 00:38:24 +08:00
|
|
|
int i;
|
2019-07-28 02:30:53 +08:00
|
|
|
struct mmap *map;
|
2016-05-21 00:38:24 +08:00
|
|
|
|
2019-07-30 19:04:59 +08:00
|
|
|
map = zalloc(evlist->core.nr_mmaps * sizeof(struct mmap));
|
2016-07-14 16:34:35 +08:00
|
|
|
if (!map)
|
|
|
|
return NULL;
|
2016-05-31 21:06:15 +08:00
|
|
|
|
2019-07-30 19:04:59 +08:00
|
|
|
for (i = 0; i < evlist->core.nr_mmaps; i++) {
|
2019-10-17 18:59:09 +08:00
|
|
|
struct perf_mmap *prev = i ? &map[i - 1].core : NULL;
|
|
|
|
|
2017-02-24 00:24:34 +08:00
|
|
|
/*
|
|
|
|
* When the perf_mmap() call is made we grab one refcount, plus
|
2018-03-02 07:09:11 +08:00
|
|
|
* one extra to let perf_mmap__consume() get the last
|
2017-02-24 00:24:34 +08:00
|
|
|
* events after all real references (perf_mmap__get()) are
|
|
|
|
* dropped.
|
|
|
|
*
|
|
|
|
* Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
|
|
|
|
* thus does perf_mmap__get() on it.
|
|
|
|
*/
|
2019-10-17 18:59:09 +08:00
|
|
|
perf_mmap__init(&map[i].core, prev, overwrite, perf_mmap__unmap_cb);
|
2017-02-24 00:24:34 +08:00
|
|
|
}
|
2019-10-07 20:53:09 +08:00
|
|
|
|
2016-07-14 16:34:35 +08:00
|
|
|
return map;
|
2011-01-30 20:46:46 +08:00
|
|
|
}
|
|
|
|
|
2019-10-07 20:53:26 +08:00
|
|
|
static void
|
|
|
|
perf_evlist__mmap_cb_idx(struct perf_evlist *_evlist,
|
2022-05-24 15:54:23 +08:00
|
|
|
struct perf_evsel *_evsel,
|
2019-10-07 20:53:26 +08:00
|
|
|
struct perf_mmap_param *_mp,
|
2022-05-06 20:25:44 +08:00
|
|
|
int idx)
|
2019-10-07 20:53:26 +08:00
|
|
|
{
|
|
|
|
struct evlist *evlist = container_of(_evlist, struct evlist, core);
|
|
|
|
struct mmap_params *mp = container_of(_mp, struct mmap_params, core);
|
2022-05-24 15:54:23 +08:00
|
|
|
struct evsel *evsel = container_of(_evsel, struct evsel, core);
|
2019-10-07 20:53:26 +08:00
|
|
|
|
2022-05-24 15:54:24 +08:00
|
|
|
auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, evsel, idx);
|
2019-10-07 20:53:26 +08:00
|
|
|
}
|
|
|
|
|
2019-10-07 20:53:27 +08:00
|
|
|
static struct perf_mmap*
|
|
|
|
perf_evlist__mmap_cb_get(struct perf_evlist *_evlist, bool overwrite, int idx)
|
|
|
|
{
|
|
|
|
struct evlist *evlist = container_of(_evlist, struct evlist, core);
|
2019-10-17 18:59:10 +08:00
|
|
|
struct mmap *maps;
|
2019-10-07 20:53:27 +08:00
|
|
|
|
2019-10-17 18:59:10 +08:00
|
|
|
maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
|
2019-10-07 20:53:27 +08:00
|
|
|
|
2019-10-17 18:59:10 +08:00
|
|
|
if (!maps) {
|
|
|
|
maps = evlist__alloc_mmap(evlist, overwrite);
|
|
|
|
if (!maps)
|
|
|
|
return NULL;
|
2019-10-07 20:53:27 +08:00
|
|
|
|
2019-10-17 18:59:10 +08:00
|
|
|
if (overwrite) {
|
2019-10-07 20:53:27 +08:00
|
|
|
evlist->overwrite_mmap = maps;
|
|
|
|
if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY)
|
2020-11-30 20:33:55 +08:00
|
|
|
evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
|
2019-10-17 18:59:10 +08:00
|
|
|
} else {
|
|
|
|
evlist->mmap = maps;
|
2019-10-07 20:53:27 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return &maps[idx].core;
|
|
|
|
}
|
|
|
|
|
2019-10-07 20:53:28 +08:00
|
|
|
static int
|
|
|
|
perf_evlist__mmap_cb_mmap(struct perf_mmap *_map, struct perf_mmap_param *_mp,
|
2022-01-05 14:13:51 +08:00
|
|
|
int output, struct perf_cpu cpu)
|
2019-10-07 20:53:28 +08:00
|
|
|
{
|
|
|
|
struct mmap *map = container_of(_map, struct mmap, core);
|
|
|
|
struct mmap_params *mp = container_of(_mp, struct mmap_params, core);
|
|
|
|
|
|
|
|
return mmap__mmap(map, mp, output, cpu);
|
|
|
|
}
|
|
|
|
|
2016-04-16 04:46:31 +08:00
|
|
|
unsigned long perf_event_mlock_kb_in_pages(void)
|
2013-09-01 18:36:12 +08:00
|
|
|
{
|
2016-04-16 04:46:31 +08:00
|
|
|
unsigned long pages;
|
|
|
|
int max;
|
2014-12-13 04:46:45 +08:00
|
|
|
|
2016-04-16 04:46:31 +08:00
|
|
|
if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) {
|
|
|
|
/*
|
|
|
|
* Pick a once upon a time good value, i.e. things look
|
|
|
|
* strange since we can't read a sysctl value, but lets not
|
|
|
|
* die yet...
|
|
|
|
*/
|
|
|
|
max = 512;
|
|
|
|
} else {
|
|
|
|
max -= (page_size / 1024);
|
|
|
|
}
|
2014-12-13 04:46:45 +08:00
|
|
|
|
2016-04-16 04:46:31 +08:00
|
|
|
pages = (max * 1024) / page_size;
|
|
|
|
if (!is_power_of_2(pages))
|
|
|
|
pages = rounddown_pow_of_two(pages);
|
|
|
|
|
|
|
|
return pages;
|
|
|
|
}
|
|
|
|
|
2019-07-28 18:45:35 +08:00
|
|
|
size_t evlist__mmap_size(unsigned long pages)
|
2016-04-16 04:46:31 +08:00
|
|
|
{
|
|
|
|
if (pages == UINT_MAX)
|
|
|
|
pages = perf_event_mlock_kb_in_pages();
|
|
|
|
else if (!is_power_of_2(pages))
|
2013-09-01 18:36:12 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
return (pages + 1) * page_size;
|
|
|
|
}
|
|
|
|
|
2013-11-12 22:46:55 +08:00
|
|
|
static long parse_pages_arg(const char *str, unsigned long min,
|
|
|
|
unsigned long max)
|
2013-09-01 18:36:12 +08:00
|
|
|
{
|
2013-10-22 15:34:18 +08:00
|
|
|
unsigned long pages, val;
|
2013-09-01 18:36:13 +08:00
|
|
|
static struct parse_tag tags[] = {
|
|
|
|
{ .tag = 'B', .mult = 1 },
|
|
|
|
{ .tag = 'K', .mult = 1 << 10 },
|
|
|
|
{ .tag = 'M', .mult = 1 << 20 },
|
|
|
|
{ .tag = 'G', .mult = 1 << 30 },
|
|
|
|
{ .tag = 0 },
|
|
|
|
};
|
2013-09-01 18:36:12 +08:00
|
|
|
|
2013-11-12 22:46:53 +08:00
|
|
|
if (str == NULL)
|
2013-11-12 22:46:55 +08:00
|
|
|
return -EINVAL;
|
2013-11-12 22:46:53 +08:00
|
|
|
|
2013-09-01 18:36:13 +08:00
|
|
|
val = parse_tag_value(str, tags);
|
2013-10-22 15:34:18 +08:00
|
|
|
if (val != (unsigned long) -1) {
|
2013-09-01 18:36:13 +08:00
|
|
|
/* we got file size value */
|
|
|
|
pages = PERF_ALIGN(val, page_size) / page_size;
|
|
|
|
} else {
|
|
|
|
/* we got pages count value */
|
|
|
|
char *eptr;
|
|
|
|
pages = strtoul(str, &eptr, 10);
|
2013-11-12 22:46:55 +08:00
|
|
|
if (*eptr != '\0')
|
|
|
|
return -EINVAL;
|
2013-09-01 18:36:12 +08:00
|
|
|
}
|
|
|
|
|
2013-12-09 21:18:37 +08:00
|
|
|
if (pages == 0 && min == 0) {
|
2013-11-12 22:46:55 +08:00
|
|
|
/* leave number of pages at 0 */
|
2013-12-09 21:18:39 +08:00
|
|
|
} else if (!is_power_of_2(pages)) {
|
2017-01-09 17:51:55 +08:00
|
|
|
char buf[100];
|
|
|
|
|
2013-11-12 22:46:55 +08:00
|
|
|
/* round pages up to next power of 2 */
|
2014-12-17 00:24:41 +08:00
|
|
|
pages = roundup_pow_of_two(pages);
|
2013-12-09 21:18:39 +08:00
|
|
|
if (!pages)
|
|
|
|
return -EINVAL;
|
2017-01-09 17:51:55 +08:00
|
|
|
|
|
|
|
unit_number__scnprintf(buf, sizeof(buf), pages * page_size);
|
|
|
|
pr_info("rounding mmap pages size to %s (%lu pages)\n",
|
|
|
|
buf, pages);
|
2013-10-22 15:34:18 +08:00
|
|
|
}
|
|
|
|
|
2013-11-12 22:46:55 +08:00
|
|
|
if (pages > max)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return pages;
|
|
|
|
}
|
|
|
|
|
2020-12-01 02:09:45 +08:00
|
|
|
int __evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str)
|
2013-11-12 22:46:55 +08:00
|
|
|
{
|
|
|
|
unsigned long max = UINT_MAX;
|
|
|
|
long pages;
|
|
|
|
|
2013-12-09 21:18:38 +08:00
|
|
|
if (max > SIZE_MAX / page_size)
|
2013-11-12 22:46:55 +08:00
|
|
|
max = SIZE_MAX / page_size;
|
|
|
|
|
|
|
|
pages = parse_pages_arg(str, 1, max);
|
|
|
|
if (pages < 0) {
|
|
|
|
pr_err("Invalid argument for --mmap_pages/-m\n");
|
2013-09-01 18:36:12 +08:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
*mmap_pages = pages;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-12-01 02:09:45 +08:00
|
|
|
int evlist__parse_mmap_pages(const struct option *opt, const char *str, int unset __maybe_unused)
|
2015-04-09 23:53:46 +08:00
|
|
|
{
|
2020-12-01 02:09:45 +08:00
|
|
|
return __evlist__parse_mmap_pages(opt->value, str);
|
2015-04-09 23:53:46 +08:00
|
|
|
}
|
|
|
|
|
2013-10-18 20:29:12 +08:00
|
|
|
/**
|
2019-07-28 18:45:35 +08:00
|
|
|
* evlist__mmap_ex - Create mmaps to receive events.
|
2013-10-18 20:29:12 +08:00
|
|
|
* @evlist: list of events
|
|
|
|
* @pages: map length in pages
|
|
|
|
* @overwrite: overwrite older events?
|
2015-04-09 23:53:42 +08:00
|
|
|
* @auxtrace_pages - auxtrace map length in pages
|
|
|
|
* @auxtrace_overwrite - overwrite older auxtrace data?
|
2011-01-30 20:46:46 +08:00
|
|
|
*
|
2013-10-18 20:29:12 +08:00
|
|
|
* If @overwrite is %false the user needs to signal event consumption using
|
2019-07-28 18:45:35 +08:00
|
|
|
* perf_mmap__write_tail(). Using evlist__mmap_read() does this
|
2013-10-18 20:29:12 +08:00
|
|
|
* automatically.
|
2011-01-30 21:59:43 +08:00
|
|
|
*
|
2015-04-09 23:53:42 +08:00
|
|
|
* Similarly, if @auxtrace_overwrite is %false the user needs to signal data
|
|
|
|
* consumption using auxtrace_mmap__write_tail().
|
|
|
|
*
|
2013-10-18 20:29:12 +08:00
|
|
|
* Return: %0 on success, negative error code otherwise.
|
2011-01-30 20:46:46 +08:00
|
|
|
*/
|
2019-07-28 18:45:35 +08:00
|
|
|
int evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
|
2017-12-03 10:00:38 +08:00
|
|
|
unsigned int auxtrace_pages,
|
2019-03-19 01:42:19 +08:00
|
|
|
bool auxtrace_overwrite, int nr_cblocks, int affinity, int flush,
|
|
|
|
int comp_level)
|
2011-01-30 20:46:46 +08:00
|
|
|
{
|
perf mmap: Fix perf backward recording
'perf record' backward recording doesn't work as we expected: it never
overwrites when ring buffer gets full.
Test:
Run a busy python printing task background like this:
while True:
print 123
send SIGUSR2 to perf to capture snapshot, then:
# ./perf record --overwrite -e raw_syscalls:sys_enter -e raw_syscalls:sys_exit --exclude-perf -a --switch-output
[ perf record: dump data: Woken up 1 times ]
[ perf record: Dump perf.data.2017110101520743 ]
[ perf record: dump data: Woken up 1 times ]
[ perf record: Dump perf.data.2017110101521251 ]
[ perf record: dump data: Woken up 1 times ]
[ perf record: Dump perf.data.2017110101521692 ]
^C[ perf record: Woken up 1 times to write data ]
[ perf record: Dump perf.data.2017110101521936 ]
[ perf record: Captured and wrote 0.826 MB perf.data.<timestamp> ]
# ./perf script -i ./perf.data.2017110101520743 | head -n3
perf 2717 [000] 12449.310785: raw_syscalls:sys_enter: NR 16 (5, 2400, 0, 59, 100, 0)
perf 2717 [000] 12449.310790: raw_syscalls:sys_enter: NR 7 (4112340, 2, ffffffff, 3df, 100, 0)
python 2545 [000] 12449.310800: raw_syscalls:sys_exit: NR 1 = 4
# ./perf script -i ./perf.data.2017110101521251 | head -n3
perf 2717 [000] 12449.310785: raw_syscalls:sys_enter: NR 16 (5, 2400, 0, 59, 100, 0)
perf 2717 [000] 12449.310790: raw_syscalls:sys_enter: NR 7 (4112340, 2, ffffffff, 3df, 100, 0)
python 2545 [000] 12449.310800: raw_syscalls:sys_exit: NR 1 = 4
# ./perf script -i ./perf.data.2017110101521692 | head -n3
perf 2717 [000] 12449.310785: raw_syscalls:sys_enter: NR 16 (5, 2400, 0, 59, 100, 0)
perf 2717 [000] 12449.310790: raw_syscalls:sys_enter: NR 7 (4112340, 2, ffffffff, 3df, 100, 0)
python 2545 [000] 12449.310800: raw_syscalls:sys_exit: NR 1 = 4
Timestamps never change, but my background task is a dead loop, can
easily overwhelm the ring buffer.
This patch fixes it by forcing unsetting PROT_WRITE for a backward ring
buffer, so all backward ring buffers become overwrite ring buffers.
Test result:
# ./perf record --overwrite -e raw_syscalls:sys_enter -e raw_syscalls:sys_exit --exclude-perf -a --switch-output
[ perf record: dump data: Woken up 1 times ]
[ perf record: Dump perf.data.2017110101285323 ]
[ perf record: dump data: Woken up 1 times ]
[ perf record: Dump perf.data.2017110101290053 ]
[ perf record: dump data: Woken up 1 times ]
[ perf record: Dump perf.data.2017110101290446 ]
^C[ perf record: Woken up 1 times to write data ]
[ perf record: Dump perf.data.2017110101290837 ]
[ perf record: Captured and wrote 0.826 MB perf.data.<timestamp> ]
# ./perf script -i ./perf.data.2017110101285323 | head -n3
python 2545 [000] 11064.268083: raw_syscalls:sys_exit: NR 1 = 4
python 2545 [000] 11064.268084: raw_syscalls:sys_enter: NR 1 (1, 12cc330, 4, 7fc237280370, 7fc2373d0700, 2c7b0)
python 2545 [000] 11064.268086: raw_syscalls:sys_exit: NR 1 = 4
# ./perf script -i ./perf.data.2017110101290 | head -n3
failed to open ./perf.data.2017110101290: No such file or directory
# ./perf script -i ./perf.data.2017110101290053 | head -n3
python 2545 [000] 11071.564062: raw_syscalls:sys_enter: NR 1 (1, 12cc330, 4, 7fc237280370, 7fc2373d0700, 2c7b0)
python 2545 [000] 11071.564064: raw_syscalls:sys_exit: NR 1 = 4
python 2545 [000] 11071.564066: raw_syscalls:sys_enter: NR 1 (1, 12cc330, 4, 7fc237280370, 7fc2373d0700, 2c7b0)
# ./perf script -i ./perf.data.2017110101290 | head -n3
perf.data.2017110101290053 perf.data.2017110101290446 perf.data.2017110101290837
# ./perf script -i ./perf.data.2017110101290446 | head -n3
sshd 1321 [000] 11075.499473: raw_syscalls:sys_exit: NR 14 = 0
sshd 1321 [000] 11075.499474: raw_syscalls:sys_enter: NR 14 (2, 7ffe98899490, 0, 8, 0, 3000)
sshd 1321 [000] 11075.499474: raw_syscalls:sys_exit: NR 14 = 0
# ./perf script -i ./perf.data.2017110101290837 | head -n3
python 2545 [000] 11079.280844: raw_syscalls:sys_exit: NR 1 = 4
python 2545 [000] 11079.280847: raw_syscalls:sys_enter: NR 1 (1, 12cc330, 4, 7fc237280370, 7fc2373d0700, 2c7b0)
python 2545 [000] 11079.280850: raw_syscalls:sys_exit: NR 1 = 4
Signed-off-by: Wang Nan <wangnan0@huawei.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Kan Liang <kan.liang@intel.com>
Cc: Mengting Zhang <zhangmengting@huawei.com>
Link: http://lkml.kernel.org/r/20171204165107.95327-2-wangnan0@huawei.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-12-05 00:51:05 +08:00
|
|
|
/*
|
|
|
|
* Delay setting mp.prot: set it before calling perf_mmap__mmap.
|
|
|
|
* Its value is decided by evsel's write_backward.
|
|
|
|
* So &mp should not be passed through const pointer.
|
|
|
|
*/
|
2019-10-07 20:53:10 +08:00
|
|
|
struct mmap_params mp = {
|
|
|
|
.nr_cblocks = nr_cblocks,
|
|
|
|
.affinity = affinity,
|
|
|
|
.flush = flush,
|
|
|
|
.comp_level = comp_level
|
|
|
|
};
|
2019-10-07 20:53:29 +08:00
|
|
|
struct perf_evlist_mmap_ops ops = {
|
2019-10-07 20:53:28 +08:00
|
|
|
.idx = perf_evlist__mmap_cb_idx,
|
|
|
|
.get = perf_evlist__mmap_cb_get,
|
|
|
|
.mmap = perf_evlist__mmap_cb_mmap,
|
2019-10-07 20:53:26 +08:00
|
|
|
};
|
2011-11-09 19:10:47 +08:00
|
|
|
|
2019-08-06 21:14:05 +08:00
|
|
|
evlist->core.mmap_len = evlist__mmap_size(pages);
|
|
|
|
pr_debug("mmap size %zuB\n", evlist->core.mmap_len);
|
2011-01-30 20:46:46 +08:00
|
|
|
|
2019-08-06 21:14:05 +08:00
|
|
|
auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->core.mmap_len,
|
2015-04-09 23:53:42 +08:00
|
|
|
auxtrace_pages, auxtrace_overwrite);
|
|
|
|
|
2019-10-07 20:53:29 +08:00
|
|
|
return perf_evlist__mmap_ops(&evlist->core, &ops, &mp.core);
|
2011-01-30 20:46:46 +08:00
|
|
|
}
|
2011-01-30 21:59:43 +08:00
|
|
|
|
2019-07-28 18:45:35 +08:00
|
|
|
int evlist__mmap(struct evlist *evlist, unsigned int pages)
|
2015-04-09 23:53:42 +08:00
|
|
|
{
|
2019-07-28 18:45:35 +08:00
|
|
|
return evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS, 1, 0);
|
2015-04-09 23:53:42 +08:00
|
|
|
}
|
|
|
|
|
2020-12-01 01:56:52 +08:00
|
|
|
int evlist__create_maps(struct evlist *evlist, struct target *target)
|
2011-01-30 21:59:43 +08:00
|
|
|
{
|
2018-02-13 04:32:36 +08:00
|
|
|
bool all_threads = (target->per_thread && target->system_wide);
|
2019-07-21 19:23:49 +08:00
|
|
|
struct perf_cpu_map *cpus;
|
2019-07-21 19:23:50 +08:00
|
|
|
struct perf_thread_map *threads;
|
2011-01-30 21:59:43 +08:00
|
|
|
|
2018-02-13 04:32:36 +08:00
|
|
|
/*
|
|
|
|
* If specify '-a' and '--per-thread' to perf record, perf record
|
|
|
|
* will override '--per-thread'. target->per_thread = false and
|
|
|
|
* target->system_wide = true.
|
|
|
|
*
|
|
|
|
* If specify '--per-thread' only to perf record,
|
|
|
|
* target->per_thread = true and target->system_wide = false.
|
|
|
|
*
|
|
|
|
* So target->per_thread && target->system_wide is false.
|
|
|
|
* For perf record, thread_map__new_str doesn't call
|
|
|
|
* thread_map__new_all_cpus. That will keep perf record's
|
|
|
|
* current behavior.
|
|
|
|
*
|
|
|
|
* For perf stat, it allows the case that target->per_thread and
|
|
|
|
* target->system_wide are all true. It means to collect system-wide
|
|
|
|
* per-thread data. thread_map__new_str will call
|
|
|
|
* thread_map__new_all_cpus to enumerate all threads.
|
|
|
|
*/
|
2017-12-05 22:03:09 +08:00
|
|
|
threads = thread_map__new_str(target->pid, target->tid, target->uid,
|
2018-02-13 04:32:36 +08:00
|
|
|
all_threads);
|
2011-01-30 21:59:43 +08:00
|
|
|
|
2015-09-08 15:58:57 +08:00
|
|
|
if (!threads)
|
2011-01-30 21:59:43 +08:00
|
|
|
return -1;
|
|
|
|
|
2013-12-05 06:56:40 +08:00
|
|
|
if (target__uses_dummy_map(target))
|
2019-07-21 19:24:16 +08:00
|
|
|
cpus = perf_cpu_map__dummy_new();
|
2012-05-16 17:45:48 +08:00
|
|
|
else
|
2019-07-21 19:24:30 +08:00
|
|
|
cpus = perf_cpu_map__new(target->cpu_list);
|
2011-01-30 21:59:43 +08:00
|
|
|
|
2015-09-08 15:58:57 +08:00
|
|
|
if (!cpus)
|
2011-01-30 21:59:43 +08:00
|
|
|
goto out_delete_threads;
|
|
|
|
|
2021-07-23 14:34:32 +08:00
|
|
|
evlist->core.has_user_cpus = !!target->cpu_list && !target->hybrid;
|
2015-09-08 15:58:52 +08:00
|
|
|
|
2019-07-21 19:24:43 +08:00
|
|
|
perf_evlist__set_maps(&evlist->core, cpus, threads);
|
2015-09-08 15:58:51 +08:00
|
|
|
|
2020-09-15 11:18:11 +08:00
|
|
|
/* as evlist now has references, put count here */
|
|
|
|
perf_cpu_map__put(cpus);
|
|
|
|
perf_thread_map__put(threads);
|
|
|
|
|
2015-09-08 15:58:51 +08:00
|
|
|
return 0;
|
2011-01-30 21:59:43 +08:00
|
|
|
|
|
|
|
out_delete_threads:
|
2019-07-21 19:24:20 +08:00
|
|
|
perf_thread_map__put(threads);
|
2011-01-30 21:59:43 +08:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-11-30 20:38:02 +08:00
|
|
|
int evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel)
|
2011-02-26 11:51:54 +08:00
|
|
|
{
|
2019-07-21 19:23:51 +08:00
|
|
|
struct evsel *evsel;
|
2012-09-27 02:07:39 +08:00
|
|
|
int err = 0;
|
2011-02-26 11:51:54 +08:00
|
|
|
|
2016-06-23 22:26:15 +08:00
|
|
|
evlist__for_each_entry(evlist, evsel) {
|
2012-09-27 02:07:39 +08:00
|
|
|
if (evsel->filter == NULL)
|
2011-02-26 11:51:54 +08:00
|
|
|
continue;
|
2012-09-27 02:07:39 +08:00
|
|
|
|
2015-08-21 14:23:14 +08:00
|
|
|
/*
|
|
|
|
* filters only work for tracepoint event, which doesn't have cpu limit.
|
|
|
|
* So evlist and evsel should always be same.
|
|
|
|
*/
|
2019-07-21 19:24:52 +08:00
|
|
|
err = perf_evsel__apply_filter(&evsel->core, evsel->filter);
|
2015-03-25 06:23:47 +08:00
|
|
|
if (err) {
|
|
|
|
*err_evsel = evsel;
|
2012-09-27 02:07:39 +08:00
|
|
|
break;
|
2015-03-25 06:23:47 +08:00
|
|
|
}
|
2011-02-26 11:51:54 +08:00
|
|
|
}
|
|
|
|
|
2012-09-27 02:07:39 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-11-30 20:38:02 +08:00
|
|
|
int evlist__set_tp_filter(struct evlist *evlist, const char *filter)
|
2012-09-27 02:07:39 +08:00
|
|
|
{
|
2019-07-21 19:23:51 +08:00
|
|
|
struct evsel *evsel;
|
2012-09-27 02:07:39 +08:00
|
|
|
int err = 0;
|
|
|
|
|
2019-10-08 03:43:03 +08:00
|
|
|
if (filter == NULL)
|
|
|
|
return -1;
|
|
|
|
|
2016-06-23 22:26:15 +08:00
|
|
|
evlist__for_each_entry(evlist, evsel) {
|
2019-07-21 19:24:29 +08:00
|
|
|
if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
|
2016-02-26 17:31:53 +08:00
|
|
|
continue;
|
|
|
|
|
2020-04-30 03:19:05 +08:00
|
|
|
err = evsel__set_filter(evsel, filter);
|
2012-09-27 02:07:39 +08:00
|
|
|
if (err)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
2011-02-26 11:51:54 +08:00
|
|
|
}
|
2011-05-21 23:49:00 +08:00
|
|
|
|
2020-11-30 20:38:02 +08:00
|
|
|
int evlist__append_tp_filter(struct evlist *evlist, const char *filter)
|
2019-10-08 03:52:17 +08:00
|
|
|
{
|
|
|
|
struct evsel *evsel;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
if (filter == NULL)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
evlist__for_each_entry(evlist, evsel) {
|
|
|
|
if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
|
|
|
|
continue;
|
|
|
|
|
2020-04-30 03:19:05 +08:00
|
|
|
err = evsel__append_tp_filter(evsel, filter);
|
2019-10-08 03:52:17 +08:00
|
|
|
if (err)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-10-15 07:10:50 +08:00
|
|
|
char *asprintf__tp_filter_pids(size_t npids, pid_t *pids)
|
2015-02-22 02:09:55 +08:00
|
|
|
{
|
|
|
|
char *filter;
|
2015-02-22 03:33:47 +08:00
|
|
|
size_t i;
|
2015-02-22 02:09:55 +08:00
|
|
|
|
2015-02-22 03:33:47 +08:00
|
|
|
for (i = 0; i < npids; ++i) {
|
|
|
|
if (i == 0) {
|
|
|
|
if (asprintf(&filter, "common_pid != %d", pids[i]) < 0)
|
2019-10-08 03:43:03 +08:00
|
|
|
return NULL;
|
2015-02-22 03:33:47 +08:00
|
|
|
} else {
|
|
|
|
char *tmp;
|
|
|
|
|
|
|
|
if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0)
|
|
|
|
goto out_free;
|
|
|
|
|
|
|
|
free(filter);
|
|
|
|
filter = tmp;
|
|
|
|
}
|
|
|
|
}
|
2015-02-22 02:09:55 +08:00
|
|
|
|
2019-10-08 03:43:03 +08:00
|
|
|
return filter;
|
2015-02-22 03:33:47 +08:00
|
|
|
out_free:
|
2019-10-08 03:43:03 +08:00
|
|
|
free(filter);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-11-30 20:38:02 +08:00
|
|
|
int evlist__set_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids)
|
2019-10-08 03:43:03 +08:00
|
|
|
{
|
|
|
|
char *filter = asprintf__tp_filter_pids(npids, pids);
|
2020-11-30 20:38:02 +08:00
|
|
|
int ret = evlist__set_tp_filter(evlist, filter);
|
2019-10-08 03:43:03 +08:00
|
|
|
|
2015-02-22 02:09:55 +08:00
|
|
|
free(filter);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-11-30 20:38:02 +08:00
|
|
|
int evlist__set_tp_filter_pid(struct evlist *evlist, pid_t pid)
|
2015-02-22 03:33:47 +08:00
|
|
|
{
|
2020-11-30 20:38:02 +08:00
|
|
|
return evlist__set_tp_filter_pids(evlist, 1, &pid);
|
2015-02-22 03:33:47 +08:00
|
|
|
}
|
|
|
|
|
2020-11-30 20:38:02 +08:00
|
|
|
int evlist__append_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids)
|
2019-10-08 04:00:34 +08:00
|
|
|
{
|
|
|
|
char *filter = asprintf__tp_filter_pids(npids, pids);
|
2020-11-30 20:38:02 +08:00
|
|
|
int ret = evlist__append_tp_filter(evlist, filter);
|
2019-10-08 04:00:34 +08:00
|
|
|
|
|
|
|
free(filter);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-11-30 20:38:02 +08:00
|
|
|
int evlist__append_tp_filter_pid(struct evlist *evlist, pid_t pid)
|
2019-10-08 04:00:34 +08:00
|
|
|
{
|
2020-11-30 20:38:02 +08:00
|
|
|
return evlist__append_tp_filter_pids(evlist, 1, &pid);
|
2019-10-08 04:00:34 +08:00
|
|
|
}
|
|
|
|
|
2020-06-17 20:24:21 +08:00
|
|
|
bool evlist__valid_sample_type(struct evlist *evlist)
|
2011-05-21 23:49:00 +08:00
|
|
|
{
|
2019-07-21 19:23:51 +08:00
|
|
|
struct evsel *pos;
|
2011-06-02 22:04:54 +08:00
|
|
|
|
2019-07-21 19:24:28 +08:00
|
|
|
if (evlist->core.nr_entries == 1)
|
2013-08-27 16:23:09 +08:00
|
|
|
return true;
|
|
|
|
|
|
|
|
if (evlist->id_pos < 0 || evlist->is_pos < 0)
|
|
|
|
return false;
|
|
|
|
|
2016-06-23 22:26:15 +08:00
|
|
|
evlist__for_each_entry(evlist, pos) {
|
2013-08-27 16:23:09 +08:00
|
|
|
if (pos->id_pos != evlist->id_pos ||
|
|
|
|
pos->is_pos != evlist->is_pos)
|
2011-06-02 22:04:54 +08:00
|
|
|
return false;
|
2011-05-21 23:49:00 +08:00
|
|
|
}
|
|
|
|
|
2011-06-02 22:04:54 +08:00
|
|
|
return true;
|
2011-05-21 23:49:00 +08:00
|
|
|
}
|
|
|
|
|
2020-06-17 20:24:21 +08:00
|
|
|
u64 __evlist__combined_sample_type(struct evlist *evlist)
|
2011-06-02 22:04:54 +08:00
|
|
|
{
|
2019-07-21 19:23:51 +08:00
|
|
|
struct evsel *evsel;
|
2013-08-27 16:23:09 +08:00
|
|
|
|
|
|
|
if (evlist->combined_sample_type)
|
|
|
|
return evlist->combined_sample_type;
|
|
|
|
|
2016-06-23 22:26:15 +08:00
|
|
|
evlist__for_each_entry(evlist, evsel)
|
2019-07-21 19:24:29 +08:00
|
|
|
evlist->combined_sample_type |= evsel->core.attr.sample_type;
|
2013-08-27 16:23:09 +08:00
|
|
|
|
|
|
|
return evlist->combined_sample_type;
|
|
|
|
}
|
|
|
|
|
2020-06-17 20:24:21 +08:00
|
|
|
u64 evlist__combined_sample_type(struct evlist *evlist)
|
2013-08-27 16:23:09 +08:00
|
|
|
{
|
|
|
|
evlist->combined_sample_type = 0;
|
2020-06-17 20:24:21 +08:00
|
|
|
return __evlist__combined_sample_type(evlist);
|
2011-06-02 22:04:54 +08:00
|
|
|
}
|
|
|
|
|
2020-06-17 20:31:25 +08:00
|
|
|
u64 evlist__combined_branch_type(struct evlist *evlist)
|
2015-07-18 23:24:47 +08:00
|
|
|
{
|
2019-07-21 19:23:51 +08:00
|
|
|
struct evsel *evsel;
|
2015-07-18 23:24:47 +08:00
|
|
|
u64 branch_type = 0;
|
|
|
|
|
2016-06-23 22:26:15 +08:00
|
|
|
evlist__for_each_entry(evlist, evsel)
|
2019-07-21 19:24:29 +08:00
|
|
|
branch_type |= evsel->core.attr.branch_sample_type;
|
2015-07-18 23:24:47 +08:00
|
|
|
return branch_type;
|
|
|
|
}
|
|
|
|
|
2020-12-01 02:07:49 +08:00
|
|
|
bool evlist__valid_read_format(struct evlist *evlist)
|
2012-10-10 23:38:13 +08:00
|
|
|
{
|
2019-09-03 16:39:52 +08:00
|
|
|
struct evsel *first = evlist__first(evlist), *pos = first;
|
2019-07-21 19:24:29 +08:00
|
|
|
u64 read_format = first->core.attr.read_format;
|
|
|
|
u64 sample_type = first->core.attr.sample_type;
|
2012-10-10 23:38:13 +08:00
|
|
|
|
2016-06-23 22:26:15 +08:00
|
|
|
evlist__for_each_entry(evlist, pos) {
|
2020-04-01 18:16:12 +08:00
|
|
|
if (read_format != pos->core.attr.read_format) {
|
|
|
|
pr_debug("Read format differs %#" PRIx64 " vs %#" PRIx64 "\n",
|
|
|
|
read_format, (u64)pos->core.attr.read_format);
|
|
|
|
}
|
2012-10-10 23:38:13 +08:00
|
|
|
}
|
|
|
|
|
2021-03-24 00:09:15 +08:00
|
|
|
/* PERF_SAMPLE_READ implies PERF_FORMAT_ID. */
|
2012-10-10 23:38:13 +08:00
|
|
|
if ((sample_type & PERF_SAMPLE_READ) &&
|
|
|
|
!(read_format & PERF_FORMAT_ID)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-12-01 02:13:12 +08:00
|
|
|
u16 evlist__id_hdr_size(struct evlist *evlist)
|
2011-11-12 08:28:50 +08:00
|
|
|
{
|
2019-09-03 16:39:52 +08:00
|
|
|
struct evsel *first = evlist__first(evlist);
|
2011-11-12 08:28:50 +08:00
|
|
|
|
2022-07-11 17:31:48 +08:00
|
|
|
return first->core.attr.sample_id_all ? evsel__id_hdr_size(first) : 0;
|
2011-11-12 08:28:50 +08:00
|
|
|
}
|
|
|
|
|
2020-06-17 20:29:48 +08:00
|
|
|
bool evlist__valid_sample_id_all(struct evlist *evlist)
|
2011-05-21 23:49:00 +08:00
|
|
|
{
|
2019-09-03 16:39:52 +08:00
|
|
|
struct evsel *first = evlist__first(evlist), *pos = first;
|
2011-06-02 22:04:54 +08:00
|
|
|
|
2016-06-23 22:26:15 +08:00
|
|
|
evlist__for_each_entry_continue(evlist, pos) {
|
2019-07-21 19:24:29 +08:00
|
|
|
if (first->core.attr.sample_id_all != pos->core.attr.sample_id_all)
|
2011-06-02 22:04:54 +08:00
|
|
|
return false;
|
2011-05-21 23:49:00 +08:00
|
|
|
}
|
|
|
|
|
2011-06-02 22:04:54 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-06-17 20:29:48 +08:00
|
|
|
bool evlist__sample_id_all(struct evlist *evlist)
|
2011-06-02 22:04:54 +08:00
|
|
|
{
|
2019-09-03 16:39:52 +08:00
|
|
|
struct evsel *first = evlist__first(evlist);
|
2019-07-21 19:24:29 +08:00
|
|
|
return first->core.attr.sample_id_all;
|
2011-05-21 23:49:00 +08:00
|
|
|
}
|
2011-10-06 06:11:32 +08:00
|
|
|
|
2020-12-01 02:01:08 +08:00
|
|
|
void evlist__set_selected(struct evlist *evlist, struct evsel *evsel)
|
2011-10-06 06:11:32 +08:00
|
|
|
{
|
|
|
|
evlist->selected = evsel;
|
|
|
|
}
|
2011-10-25 20:42:19 +08:00
|
|
|
|
2019-07-21 19:24:07 +08:00
|
|
|
void evlist__close(struct evlist *evlist)
|
2013-03-15 13:48:48 +08:00
|
|
|
{
|
2019-07-21 19:23:51 +08:00
|
|
|
struct evsel *evsel;
|
2022-01-05 14:13:37 +08:00
|
|
|
struct evlist_cpu_iterator evlist_cpu_itr;
|
2019-11-21 08:15:17 +08:00
|
|
|
struct affinity affinity;
|
2013-03-15 13:48:48 +08:00
|
|
|
|
2019-11-21 08:15:17 +08:00
|
|
|
/*
|
2022-03-29 07:26:44 +08:00
|
|
|
* With perf record core.user_requested_cpus is usually NULL.
|
2019-11-21 08:15:17 +08:00
|
|
|
* Use the old method to handle this for now.
|
|
|
|
*/
|
2022-03-29 07:26:44 +08:00
|
|
|
if (!evlist->core.user_requested_cpus ||
|
|
|
|
cpu_map__is_dummy(evlist->core.user_requested_cpus)) {
|
2019-11-21 08:15:17 +08:00
|
|
|
evlist__for_each_entry_reverse(evlist, evsel)
|
|
|
|
evsel__close(evsel);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (affinity__setup(&affinity) < 0)
|
|
|
|
return;
|
|
|
|
|
2022-01-05 14:13:37 +08:00
|
|
|
evlist__for_each_cpu(evlist_cpu_itr, evlist, &affinity) {
|
|
|
|
perf_evsel__close_cpu(&evlist_cpu_itr.evsel->core,
|
|
|
|
evlist_cpu_itr.cpu_map_idx);
|
2019-11-21 08:15:17 +08:00
|
|
|
}
|
2022-01-05 14:13:37 +08:00
|
|
|
|
2019-11-21 08:15:17 +08:00
|
|
|
affinity__cleanup(&affinity);
|
|
|
|
evlist__for_each_entry_reverse(evlist, evsel) {
|
|
|
|
perf_evsel__free_fd(&evsel->core);
|
|
|
|
perf_evsel__free_id(&evsel->core);
|
|
|
|
}
|
2021-02-25 11:51:48 +08:00
|
|
|
perf_evlist__reset_id_hash(&evlist->core);
|
2013-03-15 13:48:48 +08:00
|
|
|
}
|
|
|
|
|
2020-12-01 01:56:52 +08:00
|
|
|
static int evlist__create_syswide_maps(struct evlist *evlist)
|
2014-10-11 02:55:15 +08:00
|
|
|
{
|
2019-07-21 19:23:49 +08:00
|
|
|
struct perf_cpu_map *cpus;
|
2019-07-21 19:23:50 +08:00
|
|
|
struct perf_thread_map *threads;
|
2014-10-11 02:55:15 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Try reading /sys/devices/system/cpu/online to get
|
|
|
|
* an all cpus map.
|
|
|
|
*
|
|
|
|
* FIXME: -ENOMEM is the best we can do here, the cpu_map
|
|
|
|
* code needs an overhaul to properly forward the
|
|
|
|
* error, and we may not want to do that fallback to a
|
|
|
|
* default cpu identity map :-\
|
|
|
|
*/
|
2019-07-21 19:24:30 +08:00
|
|
|
cpus = perf_cpu_map__new(NULL);
|
2015-09-08 15:59:00 +08:00
|
|
|
if (!cpus)
|
2014-10-11 02:55:15 +08:00
|
|
|
goto out;
|
|
|
|
|
2019-07-21 19:24:19 +08:00
|
|
|
threads = perf_thread_map__new_dummy();
|
2015-09-08 15:59:00 +08:00
|
|
|
if (!threads)
|
|
|
|
goto out_put;
|
2014-10-11 02:55:15 +08:00
|
|
|
|
2019-07-21 19:24:43 +08:00
|
|
|
perf_evlist__set_maps(&evlist->core, cpus, threads);
|
2020-09-15 11:18:11 +08:00
|
|
|
|
|
|
|
perf_thread_map__put(threads);
|
2015-09-08 15:59:00 +08:00
|
|
|
out_put:
|
2019-07-21 19:24:17 +08:00
|
|
|
perf_cpu_map__put(cpus);
|
2020-09-15 11:18:11 +08:00
|
|
|
out:
|
2022-03-28 09:55:32 +08:00
|
|
|
return -ENOMEM;
|
2014-10-11 02:55:15 +08:00
|
|
|
}
|
|
|
|
|
2019-07-21 19:24:06 +08:00
|
|
|
int evlist__open(struct evlist *evlist)
|
2011-10-25 20:42:19 +08:00
|
|
|
{
|
2019-07-21 19:23:51 +08:00
|
|
|
struct evsel *evsel;
|
2013-03-15 13:48:48 +08:00
|
|
|
int err;
|
2011-10-25 20:42:19 +08:00
|
|
|
|
2014-10-11 02:55:15 +08:00
|
|
|
/*
|
|
|
|
* Default: one fd per CPU, all threads, aka systemwide
|
|
|
|
* as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
|
|
|
|
*/
|
2022-03-29 07:26:44 +08:00
|
|
|
if (evlist->core.threads == NULL && evlist->core.user_requested_cpus == NULL) {
|
2020-12-01 01:56:52 +08:00
|
|
|
err = evlist__create_syswide_maps(evlist);
|
2014-10-11 02:55:15 +08:00
|
|
|
if (err < 0)
|
|
|
|
goto out_err;
|
|
|
|
}
|
|
|
|
|
2020-12-01 01:44:40 +08:00
|
|
|
evlist__update_id_pos(evlist);
|
2013-09-07 03:40:11 +08:00
|
|
|
|
2016-06-23 22:26:15 +08:00
|
|
|
evlist__for_each_entry(evlist, evsel) {
|
2019-07-21 19:24:39 +08:00
|
|
|
err = evsel__open(evsel, evsel->core.cpus, evsel->core.threads);
|
2011-10-25 20:42:19 +08:00
|
|
|
if (err < 0)
|
|
|
|
goto out_err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
out_err:
|
2019-07-21 19:24:07 +08:00
|
|
|
evlist__close(evlist);
|
2012-02-23 11:13:36 +08:00
|
|
|
errno = -err;
|
2011-10-25 20:42:19 +08:00
|
|
|
return err;
|
|
|
|
}
|
2011-11-09 18:47:15 +08:00
|
|
|
|
2020-11-30 20:26:54 +08:00
|
|
|
int evlist__prepare_workload(struct evlist *evlist, struct target *target, const char *argv[],
|
|
|
|
bool pipe_output, void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
|
2011-11-09 18:47:15 +08:00
|
|
|
{
|
|
|
|
int child_ready_pipe[2], go_pipe[2];
|
|
|
|
char bf;
|
|
|
|
|
|
|
|
if (pipe(child_ready_pipe) < 0) {
|
|
|
|
perror("failed to create 'ready' pipe");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pipe(go_pipe) < 0) {
|
|
|
|
perror("failed to create 'go' pipe");
|
|
|
|
goto out_close_ready_pipe;
|
|
|
|
}
|
|
|
|
|
|
|
|
evlist->workload.pid = fork();
|
|
|
|
if (evlist->workload.pid < 0) {
|
|
|
|
perror("failed to fork");
|
|
|
|
goto out_close_pipes;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!evlist->workload.pid) {
|
2014-07-28 23:39:50 +08:00
|
|
|
int ret;
|
|
|
|
|
2013-03-11 15:43:16 +08:00
|
|
|
if (pipe_output)
|
2011-11-09 18:47:15 +08:00
|
|
|
dup2(2, 1);
|
|
|
|
|
2013-05-26 07:50:39 +08:00
|
|
|
signal(SIGTERM, SIG_DFL);
|
|
|
|
|
2011-11-09 18:47:15 +08:00
|
|
|
close(child_ready_pipe[0]);
|
|
|
|
close(go_pipe[1]);
|
|
|
|
fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
|
|
|
|
|
2021-03-15 21:13:22 +08:00
|
|
|
/*
|
|
|
|
* Change the name of this process not to confuse --exclude-perf users
|
|
|
|
* that sees 'perf' in the window up to the execvp() and thinks that
|
|
|
|
* perf samples are not being excluded.
|
|
|
|
*/
|
|
|
|
prctl(PR_SET_NAME, "perf-exec");
|
|
|
|
|
2011-11-09 18:47:15 +08:00
|
|
|
/*
|
|
|
|
* Tell the parent we're ready to go
|
|
|
|
*/
|
|
|
|
close(child_ready_pipe[1]);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Wait until the parent tells us to go.
|
|
|
|
*/
|
2014-07-28 23:39:50 +08:00
|
|
|
ret = read(go_pipe[0], &bf, 1);
|
|
|
|
/*
|
|
|
|
* The parent will ask for the execvp() to be performed by
|
|
|
|
* writing exactly one byte, in workload.cork_fd, usually via
|
2020-11-30 20:26:54 +08:00
|
|
|
* evlist__start_workload().
|
2014-07-28 23:39:50 +08:00
|
|
|
*
|
2015-02-04 00:29:05 +08:00
|
|
|
* For cancelling the workload without actually running it,
|
2014-07-28 23:39:50 +08:00
|
|
|
* the parent will just close workload.cork_fd, without writing
|
|
|
|
* anything, i.e. read will return zero and we just exit()
|
|
|
|
* here.
|
|
|
|
*/
|
|
|
|
if (ret != 1) {
|
|
|
|
if (ret == -1)
|
|
|
|
perror("unable to read pipe");
|
|
|
|
exit(ret);
|
|
|
|
}
|
2011-11-09 18:47:15 +08:00
|
|
|
|
|
|
|
execvp(argv[0], (char **)argv);
|
|
|
|
|
2014-01-04 01:56:49 +08:00
|
|
|
if (exec_error) {
|
2014-01-03 02:11:25 +08:00
|
|
|
union sigval val;
|
|
|
|
|
|
|
|
val.sival_int = errno;
|
|
|
|
if (sigqueue(getppid(), SIGUSR1, val))
|
|
|
|
perror(argv[0]);
|
|
|
|
} else
|
|
|
|
perror(argv[0]);
|
2011-11-09 18:47:15 +08:00
|
|
|
exit(-1);
|
|
|
|
}
|
|
|
|
|
2014-01-04 01:56:49 +08:00
|
|
|
if (exec_error) {
|
|
|
|
struct sigaction act = {
|
|
|
|
.sa_flags = SA_SIGINFO,
|
|
|
|
.sa_sigaction = exec_error,
|
|
|
|
};
|
|
|
|
sigaction(SIGUSR1, &act, NULL);
|
|
|
|
}
|
|
|
|
|
2014-10-11 01:29:49 +08:00
|
|
|
if (target__none(target)) {
|
2019-07-21 19:24:42 +08:00
|
|
|
if (evlist->core.threads == NULL) {
|
2014-10-11 01:29:49 +08:00
|
|
|
fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n",
|
|
|
|
__func__, __LINE__);
|
|
|
|
goto out_close_pipes;
|
|
|
|
}
|
2019-07-21 19:24:42 +08:00
|
|
|
perf_thread_map__set_pid(evlist->core.threads, 0, evlist->workload.pid);
|
2014-10-11 01:29:49 +08:00
|
|
|
}
|
2011-11-09 18:47:15 +08:00
|
|
|
|
|
|
|
close(child_ready_pipe[1]);
|
|
|
|
close(go_pipe[0]);
|
|
|
|
/*
|
|
|
|
* wait for child to settle
|
|
|
|
*/
|
|
|
|
if (read(child_ready_pipe[0], &bf, 1) == -1) {
|
|
|
|
perror("unable to read pipe");
|
|
|
|
goto out_close_pipes;
|
|
|
|
}
|
|
|
|
|
2013-06-26 15:14:15 +08:00
|
|
|
fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
|
2011-11-09 18:47:15 +08:00
|
|
|
evlist->workload.cork_fd = go_pipe[1];
|
|
|
|
close(child_ready_pipe[0]);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_close_pipes:
|
|
|
|
close(go_pipe[0]);
|
|
|
|
close(go_pipe[1]);
|
|
|
|
out_close_ready_pipe:
|
|
|
|
close(child_ready_pipe[0]);
|
|
|
|
close(child_ready_pipe[1]);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-11-30 20:26:54 +08:00
|
|
|
int evlist__start_workload(struct evlist *evlist)
|
2011-11-09 18:47:15 +08:00
|
|
|
{
|
|
|
|
if (evlist->workload.cork_fd > 0) {
|
2013-07-03 03:27:21 +08:00
|
|
|
char bf = 0;
|
2013-06-26 15:14:15 +08:00
|
|
|
int ret;
|
2011-11-09 18:47:15 +08:00
|
|
|
/*
|
|
|
|
* Remove the cork, let it rip!
|
|
|
|
*/
|
2013-06-26 15:14:15 +08:00
|
|
|
ret = write(evlist->workload.cork_fd, &bf, 1);
|
|
|
|
if (ret < 0)
|
2017-01-10 21:41:00 +08:00
|
|
|
perror("unable to write to pipe");
|
2013-06-26 15:14:15 +08:00
|
|
|
|
|
|
|
close(evlist->workload.cork_fd);
|
|
|
|
return ret;
|
2011-11-09 18:47:15 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2012-08-02 22:42:57 +08:00
|
|
|
|
2020-11-30 20:43:07 +08:00
|
|
|
int evlist__parse_sample(struct evlist *evlist, union perf_event *event, struct perf_sample *sample)
|
2012-08-02 22:42:57 +08:00
|
|
|
{
|
2020-12-01 01:17:57 +08:00
|
|
|
struct evsel *evsel = evlist__event2evsel(evlist, event);
|
2022-07-11 17:31:56 +08:00
|
|
|
int ret;
|
2013-08-27 16:23:09 +08:00
|
|
|
|
|
|
|
if (!evsel)
|
|
|
|
return -EFAULT;
|
2022-07-11 17:31:56 +08:00
|
|
|
ret = evsel__parse_sample(evsel, event, sample);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
if (perf_guest && sample->id) {
|
|
|
|
struct perf_sample_id *sid = evlist__id2sid(evlist, sample->id);
|
|
|
|
|
|
|
|
if (sid) {
|
|
|
|
sample->machine_pid = sid->machine_pid;
|
|
|
|
sample->vcpu = sid->vcpu.cpu;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
2012-08-02 22:42:57 +08:00
|
|
|
}
|
2012-09-07 01:54:11 +08:00
|
|
|
|
2020-11-30 20:43:07 +08:00
|
|
|
int evlist__parse_sample_timestamp(struct evlist *evlist, union perf_event *event, u64 *timestamp)
|
2017-08-03 19:10:28 +08:00
|
|
|
{
|
2020-12-01 01:17:57 +08:00
|
|
|
struct evsel *evsel = evlist__event2evsel(evlist, event);
|
2017-08-03 19:10:28 +08:00
|
|
|
|
|
|
|
if (!evsel)
|
|
|
|
return -EFAULT;
|
2020-04-30 22:03:49 +08:00
|
|
|
return evsel__parse_sample_timestamp(evsel, event, timestamp);
|
2017-08-03 19:10:28 +08:00
|
|
|
}
|
|
|
|
|
2020-06-17 20:19:46 +08:00
|
|
|
int evlist__strerror_open(struct evlist *evlist, int err, char *buf, size_t size)
|
perf trace: Improve messages related to /proc/sys/kernel/perf_event_paranoid
kernel/events/core.c has:
/*
* perf event paranoia level:
* -1 - not paranoid at all
* 0 - disallow raw tracepoint access for unpriv
* 1 - disallow cpu events for unpriv
* 2 - disallow kernel profiling for unpriv
*/
int sysctl_perf_event_paranoid __read_mostly = 1;
So, with the default being 1, a non-root user can trace his stuff:
[acme@zoo ~]$ cat /proc/sys/kernel/perf_event_paranoid
1
[acme@zoo ~]$ yes > /dev/null &
[1] 15338
[acme@zoo ~]$ trace -p 15338 | head -5
0.005 ( 0.005 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.045 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.085 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.125 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.165 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
[acme@zoo ~]$
[acme@zoo ~]$ trace --duration 1 sleep 1
1002.148 (1001.218 ms): nanosleep(rqtp: 0x7fff46c79250 ) = 0
[acme@zoo ~]$
[acme@zoo ~]$ trace -- usleep 1 | tail -5
0.905 ( 0.002 ms): brk( ) = 0x1c82000
0.910 ( 0.003 ms): brk(brk: 0x1ca3000 ) = 0x1ca3000
0.913 ( 0.001 ms): brk( ) = 0x1ca3000
0.990 ( 0.059 ms): nanosleep(rqtp: 0x7fffe31a3280 ) = 0
0.995 ( 0.000 ms): exit_group(
[acme@zoo ~]$
But can't do system wide tracing:
[acme@zoo ~]$ trace
Error: Operation not permitted.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 1.
[acme@zoo ~]$
[acme@zoo ~]$ trace --cpu 0
Error: Operation not permitted.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 1.
[acme@zoo ~]$
If the paranoid level is >= 2, i.e. turn this perf stuff off for !root users:
[acme@zoo ~]$ sudo sh -c 'echo 2 > /proc/sys/kernel/perf_event_paranoid'
[acme@zoo ~]$ cat /proc/sys/kernel/perf_event_paranoid
2
[acme@zoo ~]$
[acme@zoo ~]$ trace usleep 1
Error: Permission denied.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For your workloads it needs to be <= 1
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 2.
[acme@zoo ~]$
[acme@zoo ~]$ trace
Error: Permission denied.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For your workloads it needs to be <= 1
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 2.
[acme@zoo ~]$
[acme@zoo ~]$ trace --cpu 1
Error: Permission denied.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For your workloads it needs to be <= 1
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 2.
[acme@zoo ~]$
If the user manages to get what he/she wants, convincing root not
to be paranoid at all...
[root@zoo ~]# echo -1 > /proc/sys/kernel/perf_event_paranoid
[root@zoo ~]# cat /proc/sys/kernel/perf_event_paranoid
-1
[root@zoo ~]#
[acme@zoo ~]$ ps -eo user,pid,comm | grep Xorg
root 729 Xorg
[acme@zoo ~]$
[acme@zoo ~]$ trace -a --duration 0.001 -e \!select,ioctl,writev | grep Xorg | head -5
23.143 ( 0.003 ms): Xorg/729 setitimer(which: REAL, value: 0x7fffaadf16e0 ) = 0
23.152 ( 0.004 ms): Xorg/729 read(fd: 31, buf: 0x2544af0, count: 4096 ) = 8
23.161 ( 0.002 ms): Xorg/729 read(fd: 31, buf: 0x2544af0, count: 4096 ) = -1 EAGAIN Resource temporarily unavailable
23.175 ( 0.002 ms): Xorg/729 setitimer(which: REAL, value: 0x7fffaadf16e0 ) = 0
23.235 ( 0.002 ms): Xorg/729 setitimer(which: REAL, value: 0x7fffaadf16e0 ) = 0
[acme@zoo ~]$
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-di28olfwd28rvkox7v3hqhu1@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-10-18 04:38:29 +08:00
|
|
|
{
|
|
|
|
int printed, value;
|
tools: Introduce str_error_r()
The tools so far have been using the strerror_r() GNU variant, that
returns a string, be it the buffer passed or something else.
But that, besides being tricky in cases where we expect that the
function using strerror_r() returns the error formatted in a provided
buffer (we have to check if it returned something else and copy that
instead), breaks the build on systems not using glibc, like Alpine
Linux, where musl libc is used.
So, introduce yet another wrapper, str_error_r(), that has the GNU
interface, but uses the portable XSI variant of strerror_r(), so that
users rest asured that the provided buffer is used and it is what is
returned.
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Wang Nan <wangnan0@huawei.com>
Link: http://lkml.kernel.org/n/tip-d4t42fnf48ytlk8rjxs822tf@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-07-06 22:56:20 +08:00
|
|
|
char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
|
perf trace: Improve messages related to /proc/sys/kernel/perf_event_paranoid
kernel/events/core.c has:
/*
* perf event paranoia level:
* -1 - not paranoid at all
* 0 - disallow raw tracepoint access for unpriv
* 1 - disallow cpu events for unpriv
* 2 - disallow kernel profiling for unpriv
*/
int sysctl_perf_event_paranoid __read_mostly = 1;
So, with the default being 1, a non-root user can trace his stuff:
[acme@zoo ~]$ cat /proc/sys/kernel/perf_event_paranoid
1
[acme@zoo ~]$ yes > /dev/null &
[1] 15338
[acme@zoo ~]$ trace -p 15338 | head -5
0.005 ( 0.005 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.045 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.085 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.125 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.165 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
[acme@zoo ~]$
[acme@zoo ~]$ trace --duration 1 sleep 1
1002.148 (1001.218 ms): nanosleep(rqtp: 0x7fff46c79250 ) = 0
[acme@zoo ~]$
[acme@zoo ~]$ trace -- usleep 1 | tail -5
0.905 ( 0.002 ms): brk( ) = 0x1c82000
0.910 ( 0.003 ms): brk(brk: 0x1ca3000 ) = 0x1ca3000
0.913 ( 0.001 ms): brk( ) = 0x1ca3000
0.990 ( 0.059 ms): nanosleep(rqtp: 0x7fffe31a3280 ) = 0
0.995 ( 0.000 ms): exit_group(
[acme@zoo ~]$
But can't do system wide tracing:
[acme@zoo ~]$ trace
Error: Operation not permitted.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 1.
[acme@zoo ~]$
[acme@zoo ~]$ trace --cpu 0
Error: Operation not permitted.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 1.
[acme@zoo ~]$
If the paranoid level is >= 2, i.e. turn this perf stuff off for !root users:
[acme@zoo ~]$ sudo sh -c 'echo 2 > /proc/sys/kernel/perf_event_paranoid'
[acme@zoo ~]$ cat /proc/sys/kernel/perf_event_paranoid
2
[acme@zoo ~]$
[acme@zoo ~]$ trace usleep 1
Error: Permission denied.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For your workloads it needs to be <= 1
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 2.
[acme@zoo ~]$
[acme@zoo ~]$ trace
Error: Permission denied.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For your workloads it needs to be <= 1
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 2.
[acme@zoo ~]$
[acme@zoo ~]$ trace --cpu 1
Error: Permission denied.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For your workloads it needs to be <= 1
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 2.
[acme@zoo ~]$
If the user manages to get what he/she wants, convincing root not
to be paranoid at all...
[root@zoo ~]# echo -1 > /proc/sys/kernel/perf_event_paranoid
[root@zoo ~]# cat /proc/sys/kernel/perf_event_paranoid
-1
[root@zoo ~]#
[acme@zoo ~]$ ps -eo user,pid,comm | grep Xorg
root 729 Xorg
[acme@zoo ~]$
[acme@zoo ~]$ trace -a --duration 0.001 -e \!select,ioctl,writev | grep Xorg | head -5
23.143 ( 0.003 ms): Xorg/729 setitimer(which: REAL, value: 0x7fffaadf16e0 ) = 0
23.152 ( 0.004 ms): Xorg/729 read(fd: 31, buf: 0x2544af0, count: 4096 ) = 8
23.161 ( 0.002 ms): Xorg/729 read(fd: 31, buf: 0x2544af0, count: 4096 ) = -1 EAGAIN Resource temporarily unavailable
23.175 ( 0.002 ms): Xorg/729 setitimer(which: REAL, value: 0x7fffaadf16e0 ) = 0
23.235 ( 0.002 ms): Xorg/729 setitimer(which: REAL, value: 0x7fffaadf16e0 ) = 0
[acme@zoo ~]$
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-di28olfwd28rvkox7v3hqhu1@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-10-18 04:38:29 +08:00
|
|
|
|
|
|
|
switch (err) {
|
|
|
|
case EACCES:
|
|
|
|
case EPERM:
|
|
|
|
printed = scnprintf(buf, size,
|
|
|
|
"Error:\t%s.\n"
|
|
|
|
"Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
|
|
|
|
|
2013-12-11 20:36:23 +08:00
|
|
|
value = perf_event_paranoid();
|
perf trace: Improve messages related to /proc/sys/kernel/perf_event_paranoid
kernel/events/core.c has:
/*
* perf event paranoia level:
* -1 - not paranoid at all
* 0 - disallow raw tracepoint access for unpriv
* 1 - disallow cpu events for unpriv
* 2 - disallow kernel profiling for unpriv
*/
int sysctl_perf_event_paranoid __read_mostly = 1;
So, with the default being 1, a non-root user can trace his stuff:
[acme@zoo ~]$ cat /proc/sys/kernel/perf_event_paranoid
1
[acme@zoo ~]$ yes > /dev/null &
[1] 15338
[acme@zoo ~]$ trace -p 15338 | head -5
0.005 ( 0.005 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.045 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.085 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.125 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.165 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
[acme@zoo ~]$
[acme@zoo ~]$ trace --duration 1 sleep 1
1002.148 (1001.218 ms): nanosleep(rqtp: 0x7fff46c79250 ) = 0
[acme@zoo ~]$
[acme@zoo ~]$ trace -- usleep 1 | tail -5
0.905 ( 0.002 ms): brk( ) = 0x1c82000
0.910 ( 0.003 ms): brk(brk: 0x1ca3000 ) = 0x1ca3000
0.913 ( 0.001 ms): brk( ) = 0x1ca3000
0.990 ( 0.059 ms): nanosleep(rqtp: 0x7fffe31a3280 ) = 0
0.995 ( 0.000 ms): exit_group(
[acme@zoo ~]$
But can't do system wide tracing:
[acme@zoo ~]$ trace
Error: Operation not permitted.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 1.
[acme@zoo ~]$
[acme@zoo ~]$ trace --cpu 0
Error: Operation not permitted.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 1.
[acme@zoo ~]$
If the paranoid level is >= 2, i.e. turn this perf stuff off for !root users:
[acme@zoo ~]$ sudo sh -c 'echo 2 > /proc/sys/kernel/perf_event_paranoid'
[acme@zoo ~]$ cat /proc/sys/kernel/perf_event_paranoid
2
[acme@zoo ~]$
[acme@zoo ~]$ trace usleep 1
Error: Permission denied.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For your workloads it needs to be <= 1
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 2.
[acme@zoo ~]$
[acme@zoo ~]$ trace
Error: Permission denied.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For your workloads it needs to be <= 1
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 2.
[acme@zoo ~]$
[acme@zoo ~]$ trace --cpu 1
Error: Permission denied.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For your workloads it needs to be <= 1
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 2.
[acme@zoo ~]$
If the user manages to get what he/she wants, convincing root not
to be paranoid at all...
[root@zoo ~]# echo -1 > /proc/sys/kernel/perf_event_paranoid
[root@zoo ~]# cat /proc/sys/kernel/perf_event_paranoid
-1
[root@zoo ~]#
[acme@zoo ~]$ ps -eo user,pid,comm | grep Xorg
root 729 Xorg
[acme@zoo ~]$
[acme@zoo ~]$ trace -a --duration 0.001 -e \!select,ioctl,writev | grep Xorg | head -5
23.143 ( 0.003 ms): Xorg/729 setitimer(which: REAL, value: 0x7fffaadf16e0 ) = 0
23.152 ( 0.004 ms): Xorg/729 read(fd: 31, buf: 0x2544af0, count: 4096 ) = 8
23.161 ( 0.002 ms): Xorg/729 read(fd: 31, buf: 0x2544af0, count: 4096 ) = -1 EAGAIN Resource temporarily unavailable
23.175 ( 0.002 ms): Xorg/729 setitimer(which: REAL, value: 0x7fffaadf16e0 ) = 0
23.235 ( 0.002 ms): Xorg/729 setitimer(which: REAL, value: 0x7fffaadf16e0 ) = 0
[acme@zoo ~]$
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-di28olfwd28rvkox7v3hqhu1@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-10-18 04:38:29 +08:00
|
|
|
|
|
|
|
printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
|
|
|
|
|
|
|
|
if (value >= 2) {
|
|
|
|
printed += scnprintf(buf + printed, size - printed,
|
|
|
|
"For your workloads it needs to be <= 1\nHint:\t");
|
|
|
|
}
|
|
|
|
printed += scnprintf(buf + printed, size - printed,
|
2014-06-11 04:18:54 +08:00
|
|
|
"For system wide tracing it needs to be set to -1.\n");
|
perf trace: Improve messages related to /proc/sys/kernel/perf_event_paranoid
kernel/events/core.c has:
/*
* perf event paranoia level:
* -1 - not paranoid at all
* 0 - disallow raw tracepoint access for unpriv
* 1 - disallow cpu events for unpriv
* 2 - disallow kernel profiling for unpriv
*/
int sysctl_perf_event_paranoid __read_mostly = 1;
So, with the default being 1, a non-root user can trace his stuff:
[acme@zoo ~]$ cat /proc/sys/kernel/perf_event_paranoid
1
[acme@zoo ~]$ yes > /dev/null &
[1] 15338
[acme@zoo ~]$ trace -p 15338 | head -5
0.005 ( 0.005 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.045 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.085 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.125 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.165 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
[acme@zoo ~]$
[acme@zoo ~]$ trace --duration 1 sleep 1
1002.148 (1001.218 ms): nanosleep(rqtp: 0x7fff46c79250 ) = 0
[acme@zoo ~]$
[acme@zoo ~]$ trace -- usleep 1 | tail -5
0.905 ( 0.002 ms): brk( ) = 0x1c82000
0.910 ( 0.003 ms): brk(brk: 0x1ca3000 ) = 0x1ca3000
0.913 ( 0.001 ms): brk( ) = 0x1ca3000
0.990 ( 0.059 ms): nanosleep(rqtp: 0x7fffe31a3280 ) = 0
0.995 ( 0.000 ms): exit_group(
[acme@zoo ~]$
But can't do system wide tracing:
[acme@zoo ~]$ trace
Error: Operation not permitted.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 1.
[acme@zoo ~]$
[acme@zoo ~]$ trace --cpu 0
Error: Operation not permitted.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 1.
[acme@zoo ~]$
If the paranoid level is >= 2, i.e. turn this perf stuff off for !root users:
[acme@zoo ~]$ sudo sh -c 'echo 2 > /proc/sys/kernel/perf_event_paranoid'
[acme@zoo ~]$ cat /proc/sys/kernel/perf_event_paranoid
2
[acme@zoo ~]$
[acme@zoo ~]$ trace usleep 1
Error: Permission denied.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For your workloads it needs to be <= 1
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 2.
[acme@zoo ~]$
[acme@zoo ~]$ trace
Error: Permission denied.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For your workloads it needs to be <= 1
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 2.
[acme@zoo ~]$
[acme@zoo ~]$ trace --cpu 1
Error: Permission denied.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For your workloads it needs to be <= 1
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 2.
[acme@zoo ~]$
If the user manages to get what he/she wants, convincing root not
to be paranoid at all...
[root@zoo ~]# echo -1 > /proc/sys/kernel/perf_event_paranoid
[root@zoo ~]# cat /proc/sys/kernel/perf_event_paranoid
-1
[root@zoo ~]#
[acme@zoo ~]$ ps -eo user,pid,comm | grep Xorg
root 729 Xorg
[acme@zoo ~]$
[acme@zoo ~]$ trace -a --duration 0.001 -e \!select,ioctl,writev | grep Xorg | head -5
23.143 ( 0.003 ms): Xorg/729 setitimer(which: REAL, value: 0x7fffaadf16e0 ) = 0
23.152 ( 0.004 ms): Xorg/729 read(fd: 31, buf: 0x2544af0, count: 4096 ) = 8
23.161 ( 0.002 ms): Xorg/729 read(fd: 31, buf: 0x2544af0, count: 4096 ) = -1 EAGAIN Resource temporarily unavailable
23.175 ( 0.002 ms): Xorg/729 setitimer(which: REAL, value: 0x7fffaadf16e0 ) = 0
23.235 ( 0.002 ms): Xorg/729 setitimer(which: REAL, value: 0x7fffaadf16e0 ) = 0
[acme@zoo ~]$
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-di28olfwd28rvkox7v3hqhu1@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-10-18 04:38:29 +08:00
|
|
|
|
|
|
|
printed += scnprintf(buf + printed, size - printed,
|
2014-06-11 04:18:54 +08:00
|
|
|
"Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
|
|
|
|
"Hint:\tThe current value is %d.", value);
|
perf trace: Improve messages related to /proc/sys/kernel/perf_event_paranoid
kernel/events/core.c has:
/*
* perf event paranoia level:
* -1 - not paranoid at all
* 0 - disallow raw tracepoint access for unpriv
* 1 - disallow cpu events for unpriv
* 2 - disallow kernel profiling for unpriv
*/
int sysctl_perf_event_paranoid __read_mostly = 1;
So, with the default being 1, a non-root user can trace his stuff:
[acme@zoo ~]$ cat /proc/sys/kernel/perf_event_paranoid
1
[acme@zoo ~]$ yes > /dev/null &
[1] 15338
[acme@zoo ~]$ trace -p 15338 | head -5
0.005 ( 0.005 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.045 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.085 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.125 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.165 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
[acme@zoo ~]$
[acme@zoo ~]$ trace --duration 1 sleep 1
1002.148 (1001.218 ms): nanosleep(rqtp: 0x7fff46c79250 ) = 0
[acme@zoo ~]$
[acme@zoo ~]$ trace -- usleep 1 | tail -5
0.905 ( 0.002 ms): brk( ) = 0x1c82000
0.910 ( 0.003 ms): brk(brk: 0x1ca3000 ) = 0x1ca3000
0.913 ( 0.001 ms): brk( ) = 0x1ca3000
0.990 ( 0.059 ms): nanosleep(rqtp: 0x7fffe31a3280 ) = 0
0.995 ( 0.000 ms): exit_group(
[acme@zoo ~]$
But can't do system wide tracing:
[acme@zoo ~]$ trace
Error: Operation not permitted.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 1.
[acme@zoo ~]$
[acme@zoo ~]$ trace --cpu 0
Error: Operation not permitted.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 1.
[acme@zoo ~]$
If the paranoid level is >= 2, i.e. turn this perf stuff off for !root users:
[acme@zoo ~]$ sudo sh -c 'echo 2 > /proc/sys/kernel/perf_event_paranoid'
[acme@zoo ~]$ cat /proc/sys/kernel/perf_event_paranoid
2
[acme@zoo ~]$
[acme@zoo ~]$ trace usleep 1
Error: Permission denied.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For your workloads it needs to be <= 1
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 2.
[acme@zoo ~]$
[acme@zoo ~]$ trace
Error: Permission denied.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For your workloads it needs to be <= 1
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 2.
[acme@zoo ~]$
[acme@zoo ~]$ trace --cpu 1
Error: Permission denied.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For your workloads it needs to be <= 1
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 2.
[acme@zoo ~]$
If the user manages to get what he/she wants, convincing root not
to be paranoid at all...
[root@zoo ~]# echo -1 > /proc/sys/kernel/perf_event_paranoid
[root@zoo ~]# cat /proc/sys/kernel/perf_event_paranoid
-1
[root@zoo ~]#
[acme@zoo ~]$ ps -eo user,pid,comm | grep Xorg
root 729 Xorg
[acme@zoo ~]$
[acme@zoo ~]$ trace -a --duration 0.001 -e \!select,ioctl,writev | grep Xorg | head -5
23.143 ( 0.003 ms): Xorg/729 setitimer(which: REAL, value: 0x7fffaadf16e0 ) = 0
23.152 ( 0.004 ms): Xorg/729 read(fd: 31, buf: 0x2544af0, count: 4096 ) = 8
23.161 ( 0.002 ms): Xorg/729 read(fd: 31, buf: 0x2544af0, count: 4096 ) = -1 EAGAIN Resource temporarily unavailable
23.175 ( 0.002 ms): Xorg/729 setitimer(which: REAL, value: 0x7fffaadf16e0 ) = 0
23.235 ( 0.002 ms): Xorg/729 setitimer(which: REAL, value: 0x7fffaadf16e0 ) = 0
[acme@zoo ~]$
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-di28olfwd28rvkox7v3hqhu1@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-10-18 04:38:29 +08:00
|
|
|
break;
|
2016-02-19 00:34:09 +08:00
|
|
|
case EINVAL: {
|
2019-09-03 16:39:52 +08:00
|
|
|
struct evsel *first = evlist__first(evlist);
|
2016-02-19 00:34:09 +08:00
|
|
|
int max_freq;
|
|
|
|
|
|
|
|
if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0)
|
|
|
|
goto out_default;
|
|
|
|
|
2019-07-21 19:24:29 +08:00
|
|
|
if (first->core.attr.sample_freq < (u64)max_freq)
|
2016-02-19 00:34:09 +08:00
|
|
|
goto out_default;
|
|
|
|
|
|
|
|
printed = scnprintf(buf, size,
|
|
|
|
"Error:\t%s.\n"
|
|
|
|
"Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n"
|
|
|
|
"Hint:\tThe current value is %d and %" PRIu64 " is being requested.",
|
2019-07-21 19:24:29 +08:00
|
|
|
emsg, max_freq, first->core.attr.sample_freq);
|
2016-02-19 00:34:09 +08:00
|
|
|
break;
|
|
|
|
}
|
perf trace: Improve messages related to /proc/sys/kernel/perf_event_paranoid
kernel/events/core.c has:
/*
* perf event paranoia level:
* -1 - not paranoid at all
* 0 - disallow raw tracepoint access for unpriv
* 1 - disallow cpu events for unpriv
* 2 - disallow kernel profiling for unpriv
*/
int sysctl_perf_event_paranoid __read_mostly = 1;
So, with the default being 1, a non-root user can trace his stuff:
[acme@zoo ~]$ cat /proc/sys/kernel/perf_event_paranoid
1
[acme@zoo ~]$ yes > /dev/null &
[1] 15338
[acme@zoo ~]$ trace -p 15338 | head -5
0.005 ( 0.005 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.045 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.085 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.125 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.165 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
[acme@zoo ~]$
[acme@zoo ~]$ trace --duration 1 sleep 1
1002.148 (1001.218 ms): nanosleep(rqtp: 0x7fff46c79250 ) = 0
[acme@zoo ~]$
[acme@zoo ~]$ trace -- usleep 1 | tail -5
0.905 ( 0.002 ms): brk( ) = 0x1c82000
0.910 ( 0.003 ms): brk(brk: 0x1ca3000 ) = 0x1ca3000
0.913 ( 0.001 ms): brk( ) = 0x1ca3000
0.990 ( 0.059 ms): nanosleep(rqtp: 0x7fffe31a3280 ) = 0
0.995 ( 0.000 ms): exit_group(
[acme@zoo ~]$
But can't do system wide tracing:
[acme@zoo ~]$ trace
Error: Operation not permitted.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 1.
[acme@zoo ~]$
[acme@zoo ~]$ trace --cpu 0
Error: Operation not permitted.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 1.
[acme@zoo ~]$
If the paranoid level is >= 2, i.e. turn this perf stuff off for !root users:
[acme@zoo ~]$ sudo sh -c 'echo 2 > /proc/sys/kernel/perf_event_paranoid'
[acme@zoo ~]$ cat /proc/sys/kernel/perf_event_paranoid
2
[acme@zoo ~]$
[acme@zoo ~]$ trace usleep 1
Error: Permission denied.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For your workloads it needs to be <= 1
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 2.
[acme@zoo ~]$
[acme@zoo ~]$ trace
Error: Permission denied.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For your workloads it needs to be <= 1
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 2.
[acme@zoo ~]$
[acme@zoo ~]$ trace --cpu 1
Error: Permission denied.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For your workloads it needs to be <= 1
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 2.
[acme@zoo ~]$
If the user manages to get what he/she wants, convincing root not
to be paranoid at all...
[root@zoo ~]# echo -1 > /proc/sys/kernel/perf_event_paranoid
[root@zoo ~]# cat /proc/sys/kernel/perf_event_paranoid
-1
[root@zoo ~]#
[acme@zoo ~]$ ps -eo user,pid,comm | grep Xorg
root 729 Xorg
[acme@zoo ~]$
[acme@zoo ~]$ trace -a --duration 0.001 -e \!select,ioctl,writev | grep Xorg | head -5
23.143 ( 0.003 ms): Xorg/729 setitimer(which: REAL, value: 0x7fffaadf16e0 ) = 0
23.152 ( 0.004 ms): Xorg/729 read(fd: 31, buf: 0x2544af0, count: 4096 ) = 8
23.161 ( 0.002 ms): Xorg/729 read(fd: 31, buf: 0x2544af0, count: 4096 ) = -1 EAGAIN Resource temporarily unavailable
23.175 ( 0.002 ms): Xorg/729 setitimer(which: REAL, value: 0x7fffaadf16e0 ) = 0
23.235 ( 0.002 ms): Xorg/729 setitimer(which: REAL, value: 0x7fffaadf16e0 ) = 0
[acme@zoo ~]$
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-di28olfwd28rvkox7v3hqhu1@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-10-18 04:38:29 +08:00
|
|
|
default:
|
2016-02-19 00:34:09 +08:00
|
|
|
out_default:
|
perf trace: Improve messages related to /proc/sys/kernel/perf_event_paranoid
kernel/events/core.c has:
/*
* perf event paranoia level:
* -1 - not paranoid at all
* 0 - disallow raw tracepoint access for unpriv
* 1 - disallow cpu events for unpriv
* 2 - disallow kernel profiling for unpriv
*/
int sysctl_perf_event_paranoid __read_mostly = 1;
So, with the default being 1, a non-root user can trace his stuff:
[acme@zoo ~]$ cat /proc/sys/kernel/perf_event_paranoid
1
[acme@zoo ~]$ yes > /dev/null &
[1] 15338
[acme@zoo ~]$ trace -p 15338 | head -5
0.005 ( 0.005 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.045 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.085 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.125 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
0.165 ( 0.001 ms): write(fd: 1</dev/null>, buf: 0x7fe6db765000, count: 4096 ) = 4096
[acme@zoo ~]$
[acme@zoo ~]$ trace --duration 1 sleep 1
1002.148 (1001.218 ms): nanosleep(rqtp: 0x7fff46c79250 ) = 0
[acme@zoo ~]$
[acme@zoo ~]$ trace -- usleep 1 | tail -5
0.905 ( 0.002 ms): brk( ) = 0x1c82000
0.910 ( 0.003 ms): brk(brk: 0x1ca3000 ) = 0x1ca3000
0.913 ( 0.001 ms): brk( ) = 0x1ca3000
0.990 ( 0.059 ms): nanosleep(rqtp: 0x7fffe31a3280 ) = 0
0.995 ( 0.000 ms): exit_group(
[acme@zoo ~]$
But can't do system wide tracing:
[acme@zoo ~]$ trace
Error: Operation not permitted.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 1.
[acme@zoo ~]$
[acme@zoo ~]$ trace --cpu 0
Error: Operation not permitted.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 1.
[acme@zoo ~]$
If the paranoid level is >= 2, i.e. turn this perf stuff off for !root users:
[acme@zoo ~]$ sudo sh -c 'echo 2 > /proc/sys/kernel/perf_event_paranoid'
[acme@zoo ~]$ cat /proc/sys/kernel/perf_event_paranoid
2
[acme@zoo ~]$
[acme@zoo ~]$ trace usleep 1
Error: Permission denied.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For your workloads it needs to be <= 1
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 2.
[acme@zoo ~]$
[acme@zoo ~]$ trace
Error: Permission denied.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For your workloads it needs to be <= 1
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 2.
[acme@zoo ~]$
[acme@zoo ~]$ trace --cpu 1
Error: Permission denied.
Hint: Check /proc/sys/kernel/perf_event_paranoid setting.
Hint: For your workloads it needs to be <= 1
Hint: For system wide tracing it needs to be set to -1.
Hint: The current value is 2.
[acme@zoo ~]$
If the user manages to get what he/she wants, convincing root not
to be paranoid at all...
[root@zoo ~]# echo -1 > /proc/sys/kernel/perf_event_paranoid
[root@zoo ~]# cat /proc/sys/kernel/perf_event_paranoid
-1
[root@zoo ~]#
[acme@zoo ~]$ ps -eo user,pid,comm | grep Xorg
root 729 Xorg
[acme@zoo ~]$
[acme@zoo ~]$ trace -a --duration 0.001 -e \!select,ioctl,writev | grep Xorg | head -5
23.143 ( 0.003 ms): Xorg/729 setitimer(which: REAL, value: 0x7fffaadf16e0 ) = 0
23.152 ( 0.004 ms): Xorg/729 read(fd: 31, buf: 0x2544af0, count: 4096 ) = 8
23.161 ( 0.002 ms): Xorg/729 read(fd: 31, buf: 0x2544af0, count: 4096 ) = -1 EAGAIN Resource temporarily unavailable
23.175 ( 0.002 ms): Xorg/729 setitimer(which: REAL, value: 0x7fffaadf16e0 ) = 0
23.235 ( 0.002 ms): Xorg/729 setitimer(which: REAL, value: 0x7fffaadf16e0 ) = 0
[acme@zoo ~]$
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-di28olfwd28rvkox7v3hqhu1@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-10-18 04:38:29 +08:00
|
|
|
scnprintf(buf, size, "%s", emsg);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2013-12-11 20:36:35 +08:00
|
|
|
|
2020-06-17 20:19:46 +08:00
|
|
|
int evlist__strerror_mmap(struct evlist *evlist, int err, char *buf, size_t size)
|
2014-12-12 05:03:01 +08:00
|
|
|
{
|
tools: Introduce str_error_r()
The tools so far have been using the strerror_r() GNU variant, that
returns a string, be it the buffer passed or something else.
But that, besides being tricky in cases where we expect that the
function using strerror_r() returns the error formatted in a provided
buffer (we have to check if it returned something else and copy that
instead), breaks the build on systems not using glibc, like Alpine
Linux, where musl libc is used.
So, introduce yet another wrapper, str_error_r(), that has the GNU
interface, but uses the portable XSI variant of strerror_r(), so that
users rest asured that the provided buffer is used and it is what is
returned.
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Wang Nan <wangnan0@huawei.com>
Link: http://lkml.kernel.org/n/tip-d4t42fnf48ytlk8rjxs822tf@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-07-06 22:56:20 +08:00
|
|
|
char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
|
2019-08-06 21:14:05 +08:00
|
|
|
int pages_attempted = evlist->core.mmap_len / 1024, pages_max_per_user, printed = 0;
|
2014-12-12 05:03:01 +08:00
|
|
|
|
|
|
|
switch (err) {
|
|
|
|
case EPERM:
|
2014-12-13 02:59:51 +08:00
|
|
|
sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user);
|
2014-12-13 03:25:33 +08:00
|
|
|
printed += scnprintf(buf + printed, size - printed,
|
|
|
|
"Error:\t%s.\n"
|
2014-12-12 05:03:01 +08:00
|
|
|
"Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n"
|
2014-12-13 03:25:33 +08:00
|
|
|
"Hint:\tTried using %zd kB.\n",
|
2014-12-13 02:59:51 +08:00
|
|
|
emsg, pages_max_per_user, pages_attempted);
|
2014-12-13 03:25:33 +08:00
|
|
|
|
|
|
|
if (pages_attempted >= pages_max_per_user) {
|
|
|
|
printed += scnprintf(buf + printed, size - printed,
|
|
|
|
"Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n",
|
|
|
|
pages_max_per_user + pages_attempted);
|
|
|
|
}
|
|
|
|
|
|
|
|
printed += scnprintf(buf + printed, size - printed,
|
|
|
|
"Hint:\tTry using a smaller -m/--mmap-pages value.");
|
2014-12-12 05:03:01 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
scnprintf(buf, size, "%s", emsg);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-12-01 01:52:44 +08:00
|
|
|
void evlist__to_front(struct evlist *evlist, struct evsel *move_evsel)
|
2013-12-11 20:36:35 +08:00
|
|
|
{
|
2019-07-21 19:23:51 +08:00
|
|
|
struct evsel *evsel, *n;
|
2013-12-11 20:36:35 +08:00
|
|
|
LIST_HEAD(move);
|
|
|
|
|
2019-09-03 16:39:52 +08:00
|
|
|
if (move_evsel == evlist__first(evlist))
|
2013-12-11 20:36:35 +08:00
|
|
|
return;
|
|
|
|
|
2016-06-23 22:26:15 +08:00
|
|
|
evlist__for_each_entry_safe(evlist, n, evsel) {
|
2021-07-06 23:17:00 +08:00
|
|
|
if (evsel__leader(evsel) == evsel__leader(move_evsel))
|
2019-07-21 19:24:22 +08:00
|
|
|
list_move_tail(&evsel->core.node, &move);
|
2013-12-11 20:36:35 +08:00
|
|
|
}
|
|
|
|
|
2019-07-21 19:24:23 +08:00
|
|
|
list_splice(&move, &evlist->core.entries);
|
2013-12-11 20:36:35 +08:00
|
|
|
}
|
2014-07-31 14:00:52 +08:00
|
|
|
|
2020-12-01 01:39:41 +08:00
|
|
|
struct evsel *evlist__get_tracking_event(struct evlist *evlist)
|
2020-06-29 17:19:51 +08:00
|
|
|
{
|
|
|
|
struct evsel *evsel;
|
|
|
|
|
|
|
|
evlist__for_each_entry(evlist, evsel) {
|
|
|
|
if (evsel->tracking)
|
|
|
|
return evsel;
|
|
|
|
}
|
|
|
|
|
|
|
|
return evlist__first(evlist);
|
|
|
|
}
|
|
|
|
|
2020-12-01 01:39:41 +08:00
|
|
|
void evlist__set_tracking_event(struct evlist *evlist, struct evsel *tracking_evsel)
|
2014-07-31 14:00:52 +08:00
|
|
|
{
|
2019-07-21 19:23:51 +08:00
|
|
|
struct evsel *evsel;
|
2014-07-31 14:00:52 +08:00
|
|
|
|
|
|
|
if (tracking_evsel->tracking)
|
|
|
|
return;
|
|
|
|
|
2016-06-23 22:26:15 +08:00
|
|
|
evlist__for_each_entry(evlist, evsel) {
|
2014-07-31 14:00:52 +08:00
|
|
|
if (evsel != tracking_evsel)
|
|
|
|
evsel->tracking = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
tracking_evsel->tracking = true;
|
|
|
|
}
|
perf tools: Enable passing event to BPF object
A new syntax is added to the parser so that the user can access
predefined perf events in BPF objects.
After this patch, BPF programs for perf are finally able to utilize
bpf_perf_event_read() introduced in commit 35578d798400 ("bpf: Implement
function bpf_perf_event_read() that get the selected hardware PMU
counter").
Test result:
# cat test_bpf_map_2.c
/************************ BEGIN **************************/
#include <uapi/linux/bpf.h>
#define SEC(NAME) __attribute__((section(NAME), used))
struct bpf_map_def {
unsigned int type;
unsigned int key_size;
unsigned int value_size;
unsigned int max_entries;
};
static int (*trace_printk)(const char *fmt, int fmt_size, ...) =
(void *)BPF_FUNC_trace_printk;
static int (*get_smp_processor_id)(void) =
(void *)BPF_FUNC_get_smp_processor_id;
static int (*perf_event_read)(struct bpf_map_def *, int) =
(void *)BPF_FUNC_perf_event_read;
struct bpf_map_def SEC("maps") pmu_map = {
.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
.key_size = sizeof(int),
.value_size = sizeof(int),
.max_entries = __NR_CPUS__,
};
SEC("func_write=sys_write")
int func_write(void *ctx)
{
unsigned long long val;
char fmt[] = "sys_write: pmu=%llu\n";
val = perf_event_read(&pmu_map, get_smp_processor_id());
trace_printk(fmt, sizeof(fmt), val);
return 0;
}
SEC("func_write_return=sys_write%return")
int func_write_return(void *ctx)
{
unsigned long long val = 0;
char fmt[] = "sys_write_return: pmu=%llu\n";
val = perf_event_read(&pmu_map, get_smp_processor_id());
trace_printk(fmt, sizeof(fmt), val);
return 0;
}
char _license[] SEC("license") = "GPL";
int _version SEC("version") = LINUX_VERSION_CODE;
/************************* END ***************************/
Normal case:
# echo "" > /sys/kernel/debug/tracing/trace
# perf record -i -e cycles -e './test_bpf_map_2.c/map:pmu_map.event=cycles/' ls /
[SNIP]
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.013 MB perf.data (7 samples) ]
# cat /sys/kernel/debug/tracing/trace | grep ls
ls-17066 [000] d... 938449.863301: : sys_write: pmu=1157327
ls-17066 [000] dN.. 938449.863342: : sys_write_return: pmu=1225218
ls-17066 [000] d... 938449.863349: : sys_write: pmu=1241922
ls-17066 [000] dN.. 938449.863369: : sys_write_return: pmu=1267445
Normal case (system wide):
# echo "" > /sys/kernel/debug/tracing/trace
# perf record -i -e cycles -e './test_bpf_map_2.c/map:pmu_map.event=cycles/' -a
^C[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.811 MB perf.data (120 samples) ]
# cat /sys/kernel/debug/tracing/trace | grep -v '18446744073709551594' | grep -v perf | head -n 20
[SNIP]
# TASK-PID CPU# |||| TIMESTAMP FUNCTION
# | | | |||| | |
gmain-30828 [002] d... 2740551.068992: : sys_write: pmu=84373
gmain-30828 [002] d... 2740551.068992: : sys_write_return: pmu=87696
gmain-30828 [002] d... 2740551.068996: : sys_write: pmu=100658
gmain-30828 [002] d... 2740551.068997: : sys_write_return: pmu=102572
Error case 1:
# perf record -e './test_bpf_map_2.c' ls /
[SNIP]
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.014 MB perf.data ]
# cat /sys/kernel/debug/tracing/trace | grep ls
ls-17115 [007] d... 2724279.665625: : sys_write: pmu=18446744073709551614
ls-17115 [007] dN.. 2724279.665651: : sys_write_return: pmu=18446744073709551614
ls-17115 [007] d... 2724279.665658: : sys_write: pmu=18446744073709551614
ls-17115 [007] dN.. 2724279.665677: : sys_write_return: pmu=18446744073709551614
(18446744073709551614 is 0xfffffffffffffffe (-2))
Error case 2:
# perf record -e cycles -e './test_bpf_map_2.c/map:pmu_map.event=evt/' -a
event syntax error: '..ps:pmu_map.event=evt/'
\___ Event not found for map setting
Hint: Valid config terms:
map:[<arraymap>].value=[value]
map:[<eventmap>].event=[event]
[SNIP]
Error case 3:
# ls /proc/2348/task/
2348 2505 2506 2507 2508
# perf record -i -e cycles -e './test_bpf_map_2.c/map:pmu_map.event=cycles/' -p 2348
ERROR: Apply config to BPF failed: Cannot set event to BPF map in multi-thread tracing
Error case 4:
# perf record -e cycles -e './test_bpf_map_2.c/map:pmu_map.event=cycles/' ls /
ERROR: Apply config to BPF failed: Doesn't support inherit event (Hint: use -i to turn off inherit)
Error case 5:
# perf record -i -e raw_syscalls:sys_enter -e './test_bpf_map_2.c/map:pmu_map.event=raw_syscalls:sys_enter/' ls
ERROR: Apply config to BPF failed: Can only put raw, hardware and BPF output event into a BPF map
Error case 6:
# perf record -i -e './test_bpf_map_2.c/map:pmu_map.event=123/' ls /
event syntax error: '.._map.event=123/'
\___ Incorrect value type for map
[SNIP]
Signed-off-by: Wang Nan <wangnan0@huawei.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Brendan Gregg <brendan.d.gregg@gmail.com>
Cc: Cody P Schafer <dev@codyps.com>
Cc: He Kuang <hekuang@huawei.com>
Cc: Jeremie Galarneau <jeremie.galarneau@efficios.com>
Cc: Kirill Smelkov <kirr@nexedi.com>
Cc: Li Zefan <lizefan@huawei.com>
Cc: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Zefan Li <lizefan@huawei.com>
Cc: pi3orama@163.com
Link: http://lkml.kernel.org/r/1456132275-98875-7-git-send-email-wangnan0@huawei.com
Signed-off-by: He Kuang <hekuang@huawei.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-02-22 17:10:33 +08:00
|
|
|
|
2020-11-30 20:48:07 +08:00
|
|
|
struct evsel *evlist__find_evsel_by_str(struct evlist *evlist, const char *str)
|
perf tools: Enable passing event to BPF object
A new syntax is added to the parser so that the user can access
predefined perf events in BPF objects.
After this patch, BPF programs for perf are finally able to utilize
bpf_perf_event_read() introduced in commit 35578d798400 ("bpf: Implement
function bpf_perf_event_read() that get the selected hardware PMU
counter").
Test result:
# cat test_bpf_map_2.c
/************************ BEGIN **************************/
#include <uapi/linux/bpf.h>
#define SEC(NAME) __attribute__((section(NAME), used))
struct bpf_map_def {
unsigned int type;
unsigned int key_size;
unsigned int value_size;
unsigned int max_entries;
};
static int (*trace_printk)(const char *fmt, int fmt_size, ...) =
(void *)BPF_FUNC_trace_printk;
static int (*get_smp_processor_id)(void) =
(void *)BPF_FUNC_get_smp_processor_id;
static int (*perf_event_read)(struct bpf_map_def *, int) =
(void *)BPF_FUNC_perf_event_read;
struct bpf_map_def SEC("maps") pmu_map = {
.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
.key_size = sizeof(int),
.value_size = sizeof(int),
.max_entries = __NR_CPUS__,
};
SEC("func_write=sys_write")
int func_write(void *ctx)
{
unsigned long long val;
char fmt[] = "sys_write: pmu=%llu\n";
val = perf_event_read(&pmu_map, get_smp_processor_id());
trace_printk(fmt, sizeof(fmt), val);
return 0;
}
SEC("func_write_return=sys_write%return")
int func_write_return(void *ctx)
{
unsigned long long val = 0;
char fmt[] = "sys_write_return: pmu=%llu\n";
val = perf_event_read(&pmu_map, get_smp_processor_id());
trace_printk(fmt, sizeof(fmt), val);
return 0;
}
char _license[] SEC("license") = "GPL";
int _version SEC("version") = LINUX_VERSION_CODE;
/************************* END ***************************/
Normal case:
# echo "" > /sys/kernel/debug/tracing/trace
# perf record -i -e cycles -e './test_bpf_map_2.c/map:pmu_map.event=cycles/' ls /
[SNIP]
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.013 MB perf.data (7 samples) ]
# cat /sys/kernel/debug/tracing/trace | grep ls
ls-17066 [000] d... 938449.863301: : sys_write: pmu=1157327
ls-17066 [000] dN.. 938449.863342: : sys_write_return: pmu=1225218
ls-17066 [000] d... 938449.863349: : sys_write: pmu=1241922
ls-17066 [000] dN.. 938449.863369: : sys_write_return: pmu=1267445
Normal case (system wide):
# echo "" > /sys/kernel/debug/tracing/trace
# perf record -i -e cycles -e './test_bpf_map_2.c/map:pmu_map.event=cycles/' -a
^C[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.811 MB perf.data (120 samples) ]
# cat /sys/kernel/debug/tracing/trace | grep -v '18446744073709551594' | grep -v perf | head -n 20
[SNIP]
# TASK-PID CPU# |||| TIMESTAMP FUNCTION
# | | | |||| | |
gmain-30828 [002] d... 2740551.068992: : sys_write: pmu=84373
gmain-30828 [002] d... 2740551.068992: : sys_write_return: pmu=87696
gmain-30828 [002] d... 2740551.068996: : sys_write: pmu=100658
gmain-30828 [002] d... 2740551.068997: : sys_write_return: pmu=102572
Error case 1:
# perf record -e './test_bpf_map_2.c' ls /
[SNIP]
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.014 MB perf.data ]
# cat /sys/kernel/debug/tracing/trace | grep ls
ls-17115 [007] d... 2724279.665625: : sys_write: pmu=18446744073709551614
ls-17115 [007] dN.. 2724279.665651: : sys_write_return: pmu=18446744073709551614
ls-17115 [007] d... 2724279.665658: : sys_write: pmu=18446744073709551614
ls-17115 [007] dN.. 2724279.665677: : sys_write_return: pmu=18446744073709551614
(18446744073709551614 is 0xfffffffffffffffe (-2))
Error case 2:
# perf record -e cycles -e './test_bpf_map_2.c/map:pmu_map.event=evt/' -a
event syntax error: '..ps:pmu_map.event=evt/'
\___ Event not found for map setting
Hint: Valid config terms:
map:[<arraymap>].value=[value]
map:[<eventmap>].event=[event]
[SNIP]
Error case 3:
# ls /proc/2348/task/
2348 2505 2506 2507 2508
# perf record -i -e cycles -e './test_bpf_map_2.c/map:pmu_map.event=cycles/' -p 2348
ERROR: Apply config to BPF failed: Cannot set event to BPF map in multi-thread tracing
Error case 4:
# perf record -e cycles -e './test_bpf_map_2.c/map:pmu_map.event=cycles/' ls /
ERROR: Apply config to BPF failed: Doesn't support inherit event (Hint: use -i to turn off inherit)
Error case 5:
# perf record -i -e raw_syscalls:sys_enter -e './test_bpf_map_2.c/map:pmu_map.event=raw_syscalls:sys_enter/' ls
ERROR: Apply config to BPF failed: Can only put raw, hardware and BPF output event into a BPF map
Error case 6:
# perf record -i -e './test_bpf_map_2.c/map:pmu_map.event=123/' ls /
event syntax error: '.._map.event=123/'
\___ Incorrect value type for map
[SNIP]
Signed-off-by: Wang Nan <wangnan0@huawei.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Brendan Gregg <brendan.d.gregg@gmail.com>
Cc: Cody P Schafer <dev@codyps.com>
Cc: He Kuang <hekuang@huawei.com>
Cc: Jeremie Galarneau <jeremie.galarneau@efficios.com>
Cc: Kirill Smelkov <kirr@nexedi.com>
Cc: Li Zefan <lizefan@huawei.com>
Cc: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Zefan Li <lizefan@huawei.com>
Cc: pi3orama@163.com
Link: http://lkml.kernel.org/r/1456132275-98875-7-git-send-email-wangnan0@huawei.com
Signed-off-by: He Kuang <hekuang@huawei.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-02-22 17:10:33 +08:00
|
|
|
{
|
2019-07-21 19:23:51 +08:00
|
|
|
struct evsel *evsel;
|
perf tools: Enable passing event to BPF object
A new syntax is added to the parser so that the user can access
predefined perf events in BPF objects.
After this patch, BPF programs for perf are finally able to utilize
bpf_perf_event_read() introduced in commit 35578d798400 ("bpf: Implement
function bpf_perf_event_read() that get the selected hardware PMU
counter").
Test result:
# cat test_bpf_map_2.c
/************************ BEGIN **************************/
#include <uapi/linux/bpf.h>
#define SEC(NAME) __attribute__((section(NAME), used))
struct bpf_map_def {
unsigned int type;
unsigned int key_size;
unsigned int value_size;
unsigned int max_entries;
};
static int (*trace_printk)(const char *fmt, int fmt_size, ...) =
(void *)BPF_FUNC_trace_printk;
static int (*get_smp_processor_id)(void) =
(void *)BPF_FUNC_get_smp_processor_id;
static int (*perf_event_read)(struct bpf_map_def *, int) =
(void *)BPF_FUNC_perf_event_read;
struct bpf_map_def SEC("maps") pmu_map = {
.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
.key_size = sizeof(int),
.value_size = sizeof(int),
.max_entries = __NR_CPUS__,
};
SEC("func_write=sys_write")
int func_write(void *ctx)
{
unsigned long long val;
char fmt[] = "sys_write: pmu=%llu\n";
val = perf_event_read(&pmu_map, get_smp_processor_id());
trace_printk(fmt, sizeof(fmt), val);
return 0;
}
SEC("func_write_return=sys_write%return")
int func_write_return(void *ctx)
{
unsigned long long val = 0;
char fmt[] = "sys_write_return: pmu=%llu\n";
val = perf_event_read(&pmu_map, get_smp_processor_id());
trace_printk(fmt, sizeof(fmt), val);
return 0;
}
char _license[] SEC("license") = "GPL";
int _version SEC("version") = LINUX_VERSION_CODE;
/************************* END ***************************/
Normal case:
# echo "" > /sys/kernel/debug/tracing/trace
# perf record -i -e cycles -e './test_bpf_map_2.c/map:pmu_map.event=cycles/' ls /
[SNIP]
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.013 MB perf.data (7 samples) ]
# cat /sys/kernel/debug/tracing/trace | grep ls
ls-17066 [000] d... 938449.863301: : sys_write: pmu=1157327
ls-17066 [000] dN.. 938449.863342: : sys_write_return: pmu=1225218
ls-17066 [000] d... 938449.863349: : sys_write: pmu=1241922
ls-17066 [000] dN.. 938449.863369: : sys_write_return: pmu=1267445
Normal case (system wide):
# echo "" > /sys/kernel/debug/tracing/trace
# perf record -i -e cycles -e './test_bpf_map_2.c/map:pmu_map.event=cycles/' -a
^C[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.811 MB perf.data (120 samples) ]
# cat /sys/kernel/debug/tracing/trace | grep -v '18446744073709551594' | grep -v perf | head -n 20
[SNIP]
# TASK-PID CPU# |||| TIMESTAMP FUNCTION
# | | | |||| | |
gmain-30828 [002] d... 2740551.068992: : sys_write: pmu=84373
gmain-30828 [002] d... 2740551.068992: : sys_write_return: pmu=87696
gmain-30828 [002] d... 2740551.068996: : sys_write: pmu=100658
gmain-30828 [002] d... 2740551.068997: : sys_write_return: pmu=102572
Error case 1:
# perf record -e './test_bpf_map_2.c' ls /
[SNIP]
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.014 MB perf.data ]
# cat /sys/kernel/debug/tracing/trace | grep ls
ls-17115 [007] d... 2724279.665625: : sys_write: pmu=18446744073709551614
ls-17115 [007] dN.. 2724279.665651: : sys_write_return: pmu=18446744073709551614
ls-17115 [007] d... 2724279.665658: : sys_write: pmu=18446744073709551614
ls-17115 [007] dN.. 2724279.665677: : sys_write_return: pmu=18446744073709551614
(18446744073709551614 is 0xfffffffffffffffe (-2))
Error case 2:
# perf record -e cycles -e './test_bpf_map_2.c/map:pmu_map.event=evt/' -a
event syntax error: '..ps:pmu_map.event=evt/'
\___ Event not found for map setting
Hint: Valid config terms:
map:[<arraymap>].value=[value]
map:[<eventmap>].event=[event]
[SNIP]
Error case 3:
# ls /proc/2348/task/
2348 2505 2506 2507 2508
# perf record -i -e cycles -e './test_bpf_map_2.c/map:pmu_map.event=cycles/' -p 2348
ERROR: Apply config to BPF failed: Cannot set event to BPF map in multi-thread tracing
Error case 4:
# perf record -e cycles -e './test_bpf_map_2.c/map:pmu_map.event=cycles/' ls /
ERROR: Apply config to BPF failed: Doesn't support inherit event (Hint: use -i to turn off inherit)
Error case 5:
# perf record -i -e raw_syscalls:sys_enter -e './test_bpf_map_2.c/map:pmu_map.event=raw_syscalls:sys_enter/' ls
ERROR: Apply config to BPF failed: Can only put raw, hardware and BPF output event into a BPF map
Error case 6:
# perf record -i -e './test_bpf_map_2.c/map:pmu_map.event=123/' ls /
event syntax error: '.._map.event=123/'
\___ Incorrect value type for map
[SNIP]
Signed-off-by: Wang Nan <wangnan0@huawei.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Brendan Gregg <brendan.d.gregg@gmail.com>
Cc: Cody P Schafer <dev@codyps.com>
Cc: He Kuang <hekuang@huawei.com>
Cc: Jeremie Galarneau <jeremie.galarneau@efficios.com>
Cc: Kirill Smelkov <kirr@nexedi.com>
Cc: Li Zefan <lizefan@huawei.com>
Cc: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Zefan Li <lizefan@huawei.com>
Cc: pi3orama@163.com
Link: http://lkml.kernel.org/r/1456132275-98875-7-git-send-email-wangnan0@huawei.com
Signed-off-by: He Kuang <hekuang@huawei.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-02-22 17:10:33 +08:00
|
|
|
|
2016-06-23 22:26:15 +08:00
|
|
|
evlist__for_each_entry(evlist, evsel) {
|
perf tools: Enable passing event to BPF object
A new syntax is added to the parser so that the user can access
predefined perf events in BPF objects.
After this patch, BPF programs for perf are finally able to utilize
bpf_perf_event_read() introduced in commit 35578d798400 ("bpf: Implement
function bpf_perf_event_read() that get the selected hardware PMU
counter").
Test result:
# cat test_bpf_map_2.c
/************************ BEGIN **************************/
#include <uapi/linux/bpf.h>
#define SEC(NAME) __attribute__((section(NAME), used))
struct bpf_map_def {
unsigned int type;
unsigned int key_size;
unsigned int value_size;
unsigned int max_entries;
};
static int (*trace_printk)(const char *fmt, int fmt_size, ...) =
(void *)BPF_FUNC_trace_printk;
static int (*get_smp_processor_id)(void) =
(void *)BPF_FUNC_get_smp_processor_id;
static int (*perf_event_read)(struct bpf_map_def *, int) =
(void *)BPF_FUNC_perf_event_read;
struct bpf_map_def SEC("maps") pmu_map = {
.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
.key_size = sizeof(int),
.value_size = sizeof(int),
.max_entries = __NR_CPUS__,
};
SEC("func_write=sys_write")
int func_write(void *ctx)
{
unsigned long long val;
char fmt[] = "sys_write: pmu=%llu\n";
val = perf_event_read(&pmu_map, get_smp_processor_id());
trace_printk(fmt, sizeof(fmt), val);
return 0;
}
SEC("func_write_return=sys_write%return")
int func_write_return(void *ctx)
{
unsigned long long val = 0;
char fmt[] = "sys_write_return: pmu=%llu\n";
val = perf_event_read(&pmu_map, get_smp_processor_id());
trace_printk(fmt, sizeof(fmt), val);
return 0;
}
char _license[] SEC("license") = "GPL";
int _version SEC("version") = LINUX_VERSION_CODE;
/************************* END ***************************/
Normal case:
# echo "" > /sys/kernel/debug/tracing/trace
# perf record -i -e cycles -e './test_bpf_map_2.c/map:pmu_map.event=cycles/' ls /
[SNIP]
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.013 MB perf.data (7 samples) ]
# cat /sys/kernel/debug/tracing/trace | grep ls
ls-17066 [000] d... 938449.863301: : sys_write: pmu=1157327
ls-17066 [000] dN.. 938449.863342: : sys_write_return: pmu=1225218
ls-17066 [000] d... 938449.863349: : sys_write: pmu=1241922
ls-17066 [000] dN.. 938449.863369: : sys_write_return: pmu=1267445
Normal case (system wide):
# echo "" > /sys/kernel/debug/tracing/trace
# perf record -i -e cycles -e './test_bpf_map_2.c/map:pmu_map.event=cycles/' -a
^C[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.811 MB perf.data (120 samples) ]
# cat /sys/kernel/debug/tracing/trace | grep -v '18446744073709551594' | grep -v perf | head -n 20
[SNIP]
# TASK-PID CPU# |||| TIMESTAMP FUNCTION
# | | | |||| | |
gmain-30828 [002] d... 2740551.068992: : sys_write: pmu=84373
gmain-30828 [002] d... 2740551.068992: : sys_write_return: pmu=87696
gmain-30828 [002] d... 2740551.068996: : sys_write: pmu=100658
gmain-30828 [002] d... 2740551.068997: : sys_write_return: pmu=102572
Error case 1:
# perf record -e './test_bpf_map_2.c' ls /
[SNIP]
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.014 MB perf.data ]
# cat /sys/kernel/debug/tracing/trace | grep ls
ls-17115 [007] d... 2724279.665625: : sys_write: pmu=18446744073709551614
ls-17115 [007] dN.. 2724279.665651: : sys_write_return: pmu=18446744073709551614
ls-17115 [007] d... 2724279.665658: : sys_write: pmu=18446744073709551614
ls-17115 [007] dN.. 2724279.665677: : sys_write_return: pmu=18446744073709551614
(18446744073709551614 is 0xfffffffffffffffe (-2))
Error case 2:
# perf record -e cycles -e './test_bpf_map_2.c/map:pmu_map.event=evt/' -a
event syntax error: '..ps:pmu_map.event=evt/'
\___ Event not found for map setting
Hint: Valid config terms:
map:[<arraymap>].value=[value]
map:[<eventmap>].event=[event]
[SNIP]
Error case 3:
# ls /proc/2348/task/
2348 2505 2506 2507 2508
# perf record -i -e cycles -e './test_bpf_map_2.c/map:pmu_map.event=cycles/' -p 2348
ERROR: Apply config to BPF failed: Cannot set event to BPF map in multi-thread tracing
Error case 4:
# perf record -e cycles -e './test_bpf_map_2.c/map:pmu_map.event=cycles/' ls /
ERROR: Apply config to BPF failed: Doesn't support inherit event (Hint: use -i to turn off inherit)
Error case 5:
# perf record -i -e raw_syscalls:sys_enter -e './test_bpf_map_2.c/map:pmu_map.event=raw_syscalls:sys_enter/' ls
ERROR: Apply config to BPF failed: Can only put raw, hardware and BPF output event into a BPF map
Error case 6:
# perf record -i -e './test_bpf_map_2.c/map:pmu_map.event=123/' ls /
event syntax error: '.._map.event=123/'
\___ Incorrect value type for map
[SNIP]
Signed-off-by: Wang Nan <wangnan0@huawei.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Brendan Gregg <brendan.d.gregg@gmail.com>
Cc: Cody P Schafer <dev@codyps.com>
Cc: He Kuang <hekuang@huawei.com>
Cc: Jeremie Galarneau <jeremie.galarneau@efficios.com>
Cc: Kirill Smelkov <kirr@nexedi.com>
Cc: Li Zefan <lizefan@huawei.com>
Cc: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Zefan Li <lizefan@huawei.com>
Cc: pi3orama@163.com
Link: http://lkml.kernel.org/r/1456132275-98875-7-git-send-email-wangnan0@huawei.com
Signed-off-by: He Kuang <hekuang@huawei.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-02-22 17:10:33 +08:00
|
|
|
if (!evsel->name)
|
|
|
|
continue;
|
|
|
|
if (strcmp(str, evsel->name) == 0)
|
|
|
|
return evsel;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
2016-07-14 16:34:42 +08:00
|
|
|
|
2020-11-30 20:33:55 +08:00
|
|
|
void evlist__toggle_bkw_mmap(struct evlist *evlist, enum bkw_mmap_state state)
|
2016-07-14 16:34:42 +08:00
|
|
|
{
|
|
|
|
enum bkw_mmap_state old_state = evlist->bkw_mmap_state;
|
|
|
|
enum action {
|
|
|
|
NONE,
|
|
|
|
PAUSE,
|
|
|
|
RESUME,
|
|
|
|
} action = NONE;
|
|
|
|
|
2017-12-05 00:51:07 +08:00
|
|
|
if (!evlist->overwrite_mmap)
|
2016-07-14 16:34:42 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
switch (old_state) {
|
|
|
|
case BKW_MMAP_NOTREADY: {
|
|
|
|
if (state != BKW_MMAP_RUNNING)
|
2018-01-11 23:50:20 +08:00
|
|
|
goto state_err;
|
2016-07-14 16:34:42 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case BKW_MMAP_RUNNING: {
|
|
|
|
if (state != BKW_MMAP_DATA_PENDING)
|
|
|
|
goto state_err;
|
|
|
|
action = PAUSE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case BKW_MMAP_DATA_PENDING: {
|
|
|
|
if (state != BKW_MMAP_EMPTY)
|
|
|
|
goto state_err;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case BKW_MMAP_EMPTY: {
|
|
|
|
if (state != BKW_MMAP_RUNNING)
|
|
|
|
goto state_err;
|
|
|
|
action = RESUME;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
WARN_ONCE(1, "Shouldn't get there\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
evlist->bkw_mmap_state = state;
|
|
|
|
|
|
|
|
switch (action) {
|
|
|
|
case PAUSE:
|
2020-12-01 01:49:05 +08:00
|
|
|
evlist__pause(evlist);
|
2016-07-14 16:34:42 +08:00
|
|
|
break;
|
|
|
|
case RESUME:
|
2020-12-01 01:49:05 +08:00
|
|
|
evlist__resume(evlist);
|
2016-07-14 16:34:42 +08:00
|
|
|
break;
|
|
|
|
case NONE:
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
state_err:
|
|
|
|
return;
|
|
|
|
}
|
2017-11-14 22:01:06 +08:00
|
|
|
|
2020-12-01 02:07:49 +08:00
|
|
|
bool evlist__exclude_kernel(struct evlist *evlist)
|
2017-11-14 22:01:06 +08:00
|
|
|
{
|
2019-07-21 19:23:51 +08:00
|
|
|
struct evsel *evsel;
|
2017-11-14 22:01:06 +08:00
|
|
|
|
|
|
|
evlist__for_each_entry(evlist, evsel) {
|
2019-07-21 19:24:29 +08:00
|
|
|
if (!evsel->core.attr.exclude_kernel)
|
2017-11-14 22:01:06 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
2018-05-21 22:57:44 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Events in data file are not collect in groups, but we still want
|
|
|
|
* the group display. Set the artificial group and set the leader's
|
|
|
|
* forced_leader flag to notify the display code.
|
|
|
|
*/
|
2020-12-01 01:58:32 +08:00
|
|
|
void evlist__force_leader(struct evlist *evlist)
|
2018-05-21 22:57:44 +08:00
|
|
|
{
|
2021-07-06 23:17:01 +08:00
|
|
|
if (!evlist->core.nr_groups) {
|
2019-09-03 16:39:52 +08:00
|
|
|
struct evsel *leader = evlist__first(evlist);
|
2018-05-21 22:57:44 +08:00
|
|
|
|
2020-11-30 20:22:07 +08:00
|
|
|
evlist__set_leader(evlist);
|
2018-05-21 22:57:44 +08:00
|
|
|
leader->forced_leader = true;
|
|
|
|
}
|
|
|
|
}
|
2018-10-02 03:59:26 +08:00
|
|
|
|
2020-12-01 01:58:32 +08:00
|
|
|
struct evsel *evlist__reset_weak_group(struct evlist *evsel_list, struct evsel *evsel, bool close)
|
2018-10-02 03:59:26 +08:00
|
|
|
{
|
2019-07-21 19:23:51 +08:00
|
|
|
struct evsel *c2, *leader;
|
2018-10-02 03:59:26 +08:00
|
|
|
bool is_open = true;
|
|
|
|
|
2021-07-06 23:17:00 +08:00
|
|
|
leader = evsel__leader(evsel);
|
|
|
|
|
2018-10-02 03:59:26 +08:00
|
|
|
pr_debug("Weak group for %s/%d failed\n",
|
2019-07-21 19:24:46 +08:00
|
|
|
leader->name, leader->core.nr_members);
|
2018-10-02 03:59:26 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* for_each_group_member doesn't work here because it doesn't
|
|
|
|
* include the first entry.
|
|
|
|
*/
|
|
|
|
evlist__for_each_entry(evsel_list, c2) {
|
|
|
|
if (c2 == evsel)
|
|
|
|
is_open = false;
|
2021-07-06 23:17:00 +08:00
|
|
|
if (evsel__has_leader(c2, leader)) {
|
2019-11-21 08:15:19 +08:00
|
|
|
if (is_open && close)
|
2019-10-12 02:21:40 +08:00
|
|
|
perf_evsel__close(&c2->core);
|
perf evlist: Keep topdown counters in weak group
On Intel Icelake, topdown events must always be grouped with a slots
event as leader. When a metric is parsed a weak group is formed and
retried if perf_event_open fails. The retried events aren't grouped
breaking the slots leader requirement. This change modifies the weak
group "reset" behavior so that topdown events aren't broken from the
group for the retry.
$ perf stat -e '{slots,topdown-bad-spec,topdown-be-bound,topdown-fe-bound,topdown-retiring,branch-instructions,branch-misses,bus-cycles,cache-misses,cache-references,cpu-cycles,instructions,mem-loads,mem-stores,ref-cycles,baclears.any,ARITH.DIVIDER_ACTIVE}:W' -a sleep 1
Performance counter stats for 'system wide':
47,867,188,483 slots (92.27%)
<not supported> topdown-bad-spec
<not supported> topdown-be-bound
<not supported> topdown-fe-bound
<not supported> topdown-retiring
2,173,346,937 branch-instructions (92.27%)
10,540,253 branch-misses # 0.48% of all branches (92.29%)
96,291,140 bus-cycles (92.29%)
6,214,202 cache-misses # 20.120 % of all cache refs (92.29%)
30,886,082 cache-references (76.91%)
11,773,726,641 cpu-cycles (84.62%)
11,807,585,307 instructions # 1.00 insn per cycle (92.31%)
0 mem-loads (92.32%)
2,212,928,573 mem-stores (84.69%)
10,024,403,118 ref-cycles (92.35%)
16,232,978 baclears.any (92.35%)
23,832,633 ARITH.DIVIDER_ACTIVE (84.59%)
0.981070734 seconds time elapsed
After:
$ perf stat -e '{slots,topdown-bad-spec,topdown-be-bound,topdown-fe-bound,topdown-retiring,branch-instructions,branch-misses,bus-cycles,cache-misses,cache-references,cpu-cycles,instructions,mem-loads,mem-stores,ref-cycles,baclears.any,ARITH.DIVIDER_ACTIVE}:W' -a sleep 1
Performance counter stats for 'system wide':
31040189283 slots (92.27%)
8997514811 topdown-bad-spec # 28.2% bad speculation (92.27%)
10997536028 topdown-be-bound # 34.5% backend bound (92.27%)
4778060526 topdown-fe-bound # 15.0% frontend bound (92.27%)
7086628768 topdown-retiring # 22.2% retiring (92.27%)
1417611942 branch-instructions (92.26%)
5285529 branch-misses # 0.37% of all branches (92.28%)
62922469 bus-cycles (92.29%)
1440708 cache-misses # 8.292 % of all cache refs (92.30%)
17374098 cache-references (76.94%)
8040889520 cpu-cycles (84.63%)
7709992319 instructions # 0.96 insn per cycle (92.32%)
0 mem-loads (92.32%)
1515669558 mem-stores (84.68%)
6542411177 ref-cycles (92.35%)
4154149 baclears.any (92.35%)
20556152 ARITH.DIVIDER_ACTIVE (84.59%)
1.010799593 seconds time elapsed
Reviewed-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Florian Fischer <florian.fischer@muhq.space>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Garry <john.garry@huawei.com>
Cc: Kim Phillips <kim.phillips@amd.com>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Riccardo Mancini <rickyman7@gmail.com>
Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20220517052724.283874-2-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-05-17 13:27:23 +08:00
|
|
|
/*
|
|
|
|
* We want to close all members of the group and reopen
|
|
|
|
* them. Some events, like Intel topdown, require being
|
|
|
|
* in a group and so keep these in the group.
|
|
|
|
*/
|
perf stat: Always keep perf metrics topdown events in a group
If any member in a group has a different cpu mask than the other
members, the current perf stat disables group. when the perf metrics
topdown events are part of the group, the below <not supported> error
will be triggered.
$ perf stat -e "{slots,topdown-retiring,uncore_imc_free_running_0/dclk/}" -a sleep 1
WARNING: grouped events cpus do not match, disabling group:
anon group { slots, topdown-retiring, uncore_imc_free_running_0/dclk/ }
Performance counter stats for 'system wide':
141,465,174 slots
<not supported> topdown-retiring
1,605,330,334 uncore_imc_free_running_0/dclk/
The perf metrics topdown events must always be grouped with a slots
event as leader.
Factor out evsel__remove_from_group() to only remove the regular events
from the group.
Remove evsel__must_be_in_group(), since no one use it anymore.
With the patch, the topdown events aren't broken from the group for the
splitting.
$ perf stat -e "{slots,topdown-retiring,uncore_imc_free_running_0/dclk/}" -a sleep 1
WARNING: grouped events cpus do not match, disabling group:
anon group { slots, topdown-retiring, uncore_imc_free_running_0/dclk/ }
Performance counter stats for 'system wide':
346,110,588 slots
124,608,256 topdown-retiring
1,606,869,976 uncore_imc_free_running_0/dclk/
1.003877592 seconds time elapsed
Fixes: a9a1790247bdcf3b ("perf stat: Ensure group is defined on top of the same cpu mask")
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Acked-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20220518143900.1493980-3-kan.liang@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-05-18 22:38:58 +08:00
|
|
|
evsel__remove_from_group(c2, leader);
|
perf evlist: Keep topdown counters in weak group
On Intel Icelake, topdown events must always be grouped with a slots
event as leader. When a metric is parsed a weak group is formed and
retried if perf_event_open fails. The retried events aren't grouped
breaking the slots leader requirement. This change modifies the weak
group "reset" behavior so that topdown events aren't broken from the
group for the retry.
$ perf stat -e '{slots,topdown-bad-spec,topdown-be-bound,topdown-fe-bound,topdown-retiring,branch-instructions,branch-misses,bus-cycles,cache-misses,cache-references,cpu-cycles,instructions,mem-loads,mem-stores,ref-cycles,baclears.any,ARITH.DIVIDER_ACTIVE}:W' -a sleep 1
Performance counter stats for 'system wide':
47,867,188,483 slots (92.27%)
<not supported> topdown-bad-spec
<not supported> topdown-be-bound
<not supported> topdown-fe-bound
<not supported> topdown-retiring
2,173,346,937 branch-instructions (92.27%)
10,540,253 branch-misses # 0.48% of all branches (92.29%)
96,291,140 bus-cycles (92.29%)
6,214,202 cache-misses # 20.120 % of all cache refs (92.29%)
30,886,082 cache-references (76.91%)
11,773,726,641 cpu-cycles (84.62%)
11,807,585,307 instructions # 1.00 insn per cycle (92.31%)
0 mem-loads (92.32%)
2,212,928,573 mem-stores (84.69%)
10,024,403,118 ref-cycles (92.35%)
16,232,978 baclears.any (92.35%)
23,832,633 ARITH.DIVIDER_ACTIVE (84.59%)
0.981070734 seconds time elapsed
After:
$ perf stat -e '{slots,topdown-bad-spec,topdown-be-bound,topdown-fe-bound,topdown-retiring,branch-instructions,branch-misses,bus-cycles,cache-misses,cache-references,cpu-cycles,instructions,mem-loads,mem-stores,ref-cycles,baclears.any,ARITH.DIVIDER_ACTIVE}:W' -a sleep 1
Performance counter stats for 'system wide':
31040189283 slots (92.27%)
8997514811 topdown-bad-spec # 28.2% bad speculation (92.27%)
10997536028 topdown-be-bound # 34.5% backend bound (92.27%)
4778060526 topdown-fe-bound # 15.0% frontend bound (92.27%)
7086628768 topdown-retiring # 22.2% retiring (92.27%)
1417611942 branch-instructions (92.26%)
5285529 branch-misses # 0.37% of all branches (92.28%)
62922469 bus-cycles (92.29%)
1440708 cache-misses # 8.292 % of all cache refs (92.30%)
17374098 cache-references (76.94%)
8040889520 cpu-cycles (84.63%)
7709992319 instructions # 0.96 insn per cycle (92.32%)
0 mem-loads (92.32%)
1515669558 mem-stores (84.68%)
6542411177 ref-cycles (92.35%)
4154149 baclears.any (92.35%)
20556152 ARITH.DIVIDER_ACTIVE (84.59%)
1.010799593 seconds time elapsed
Reviewed-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Florian Fischer <florian.fischer@muhq.space>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Garry <john.garry@huawei.com>
Cc: Kim Phillips <kim.phillips@amd.com>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Riccardo Mancini <rickyman7@gmail.com>
Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20220517052724.283874-2-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-05-17 13:27:23 +08:00
|
|
|
|
2019-11-21 08:15:19 +08:00
|
|
|
/*
|
|
|
|
* Set this for all former members of the group
|
|
|
|
* to indicate they get reopened.
|
|
|
|
*/
|
|
|
|
c2->reset_group = true;
|
2018-10-02 03:59:26 +08:00
|
|
|
}
|
|
|
|
}
|
perf evlist: Keep topdown counters in weak group
On Intel Icelake, topdown events must always be grouped with a slots
event as leader. When a metric is parsed a weak group is formed and
retried if perf_event_open fails. The retried events aren't grouped
breaking the slots leader requirement. This change modifies the weak
group "reset" behavior so that topdown events aren't broken from the
group for the retry.
$ perf stat -e '{slots,topdown-bad-spec,topdown-be-bound,topdown-fe-bound,topdown-retiring,branch-instructions,branch-misses,bus-cycles,cache-misses,cache-references,cpu-cycles,instructions,mem-loads,mem-stores,ref-cycles,baclears.any,ARITH.DIVIDER_ACTIVE}:W' -a sleep 1
Performance counter stats for 'system wide':
47,867,188,483 slots (92.27%)
<not supported> topdown-bad-spec
<not supported> topdown-be-bound
<not supported> topdown-fe-bound
<not supported> topdown-retiring
2,173,346,937 branch-instructions (92.27%)
10,540,253 branch-misses # 0.48% of all branches (92.29%)
96,291,140 bus-cycles (92.29%)
6,214,202 cache-misses # 20.120 % of all cache refs (92.29%)
30,886,082 cache-references (76.91%)
11,773,726,641 cpu-cycles (84.62%)
11,807,585,307 instructions # 1.00 insn per cycle (92.31%)
0 mem-loads (92.32%)
2,212,928,573 mem-stores (84.69%)
10,024,403,118 ref-cycles (92.35%)
16,232,978 baclears.any (92.35%)
23,832,633 ARITH.DIVIDER_ACTIVE (84.59%)
0.981070734 seconds time elapsed
After:
$ perf stat -e '{slots,topdown-bad-spec,topdown-be-bound,topdown-fe-bound,topdown-retiring,branch-instructions,branch-misses,bus-cycles,cache-misses,cache-references,cpu-cycles,instructions,mem-loads,mem-stores,ref-cycles,baclears.any,ARITH.DIVIDER_ACTIVE}:W' -a sleep 1
Performance counter stats for 'system wide':
31040189283 slots (92.27%)
8997514811 topdown-bad-spec # 28.2% bad speculation (92.27%)
10997536028 topdown-be-bound # 34.5% backend bound (92.27%)
4778060526 topdown-fe-bound # 15.0% frontend bound (92.27%)
7086628768 topdown-retiring # 22.2% retiring (92.27%)
1417611942 branch-instructions (92.26%)
5285529 branch-misses # 0.37% of all branches (92.28%)
62922469 bus-cycles (92.29%)
1440708 cache-misses # 8.292 % of all cache refs (92.30%)
17374098 cache-references (76.94%)
8040889520 cpu-cycles (84.63%)
7709992319 instructions # 0.96 insn per cycle (92.32%)
0 mem-loads (92.32%)
1515669558 mem-stores (84.68%)
6542411177 ref-cycles (92.35%)
4154149 baclears.any (92.35%)
20556152 ARITH.DIVIDER_ACTIVE (84.59%)
1.010799593 seconds time elapsed
Reviewed-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Florian Fischer <florian.fischer@muhq.space>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Garry <john.garry@huawei.com>
Cc: Kim Phillips <kim.phillips@amd.com>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Riccardo Mancini <rickyman7@gmail.com>
Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20220517052724.283874-2-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-05-17 13:27:23 +08:00
|
|
|
/* Reset the leader count if all entries were removed. */
|
|
|
|
if (leader->core.nr_members == 1)
|
|
|
|
leader->core.nr_members = 0;
|
2018-10-02 03:59:26 +08:00
|
|
|
return leader;
|
|
|
|
}
|
2020-07-17 15:01:33 +08:00
|
|
|
|
2020-09-02 18:57:07 +08:00
|
|
|
static int evlist__parse_control_fifo(const char *str, int *ctl_fd, int *ctl_fd_ack, bool *ctl_fd_close)
|
|
|
|
{
|
|
|
|
char *s, *p;
|
|
|
|
int ret = 0, fd;
|
|
|
|
|
|
|
|
if (strncmp(str, "fifo:", 5))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
str += 5;
|
|
|
|
if (!*str || *str == ',')
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
s = strdup(str);
|
|
|
|
if (!s)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
p = strchr(s, ',');
|
|
|
|
if (p)
|
|
|
|
*p = '\0';
|
|
|
|
|
|
|
|
/*
|
|
|
|
* O_RDWR avoids POLLHUPs which is necessary to allow the other
|
|
|
|
* end of a FIFO to be repeatedly opened and closed.
|
|
|
|
*/
|
|
|
|
fd = open(s, O_RDWR | O_NONBLOCK | O_CLOEXEC);
|
|
|
|
if (fd < 0) {
|
|
|
|
pr_err("Failed to open '%s'\n", s);
|
|
|
|
ret = -errno;
|
|
|
|
goto out_free;
|
|
|
|
}
|
|
|
|
*ctl_fd = fd;
|
|
|
|
*ctl_fd_close = true;
|
|
|
|
|
|
|
|
if (p && *++p) {
|
|
|
|
/* O_RDWR | O_NONBLOCK means the other end need not be open */
|
|
|
|
fd = open(p, O_RDWR | O_NONBLOCK | O_CLOEXEC);
|
|
|
|
if (fd < 0) {
|
|
|
|
pr_err("Failed to open '%s'\n", p);
|
|
|
|
ret = -errno;
|
|
|
|
goto out_free;
|
|
|
|
}
|
|
|
|
*ctl_fd_ack = fd;
|
|
|
|
}
|
|
|
|
|
|
|
|
out_free:
|
|
|
|
free(s);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int evlist__parse_control(const char *str, int *ctl_fd, int *ctl_fd_ack, bool *ctl_fd_close)
|
2020-09-01 17:37:53 +08:00
|
|
|
{
|
|
|
|
char *comma = NULL, *endptr = NULL;
|
|
|
|
|
2020-09-02 18:57:07 +08:00
|
|
|
*ctl_fd_close = false;
|
|
|
|
|
2020-09-01 17:37:53 +08:00
|
|
|
if (strncmp(str, "fd:", 3))
|
2020-09-02 18:57:07 +08:00
|
|
|
return evlist__parse_control_fifo(str, ctl_fd, ctl_fd_ack, ctl_fd_close);
|
2020-09-01 17:37:53 +08:00
|
|
|
|
|
|
|
*ctl_fd = strtoul(&str[3], &endptr, 0);
|
|
|
|
if (endptr == &str[3])
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
comma = strchr(str, ',');
|
|
|
|
if (comma) {
|
|
|
|
if (endptr != comma)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
*ctl_fd_ack = strtoul(comma + 1, &endptr, 0);
|
|
|
|
if (endptr == comma + 1 || *endptr != '\0')
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-09-03 20:29:37 +08:00
|
|
|
void evlist__close_control(int ctl_fd, int ctl_fd_ack, bool *ctl_fd_close)
|
|
|
|
{
|
|
|
|
if (*ctl_fd_close) {
|
|
|
|
*ctl_fd_close = false;
|
|
|
|
close(ctl_fd);
|
|
|
|
if (ctl_fd_ack >= 0)
|
|
|
|
close(ctl_fd_ack);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-17 15:01:33 +08:00
|
|
|
int evlist__initialize_ctlfd(struct evlist *evlist, int fd, int ack)
|
|
|
|
{
|
|
|
|
if (fd == -1) {
|
|
|
|
pr_debug("Control descriptor is not initialized\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
evlist->ctl_fd.pos = perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN,
|
2022-08-24 15:28:12 +08:00
|
|
|
fdarray_flag__nonfilterable |
|
|
|
|
fdarray_flag__non_perf_event);
|
2020-07-17 15:01:33 +08:00
|
|
|
if (evlist->ctl_fd.pos < 0) {
|
|
|
|
evlist->ctl_fd.pos = -1;
|
|
|
|
pr_err("Failed to add ctl fd entry: %m\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
evlist->ctl_fd.fd = fd;
|
|
|
|
evlist->ctl_fd.ack = ack;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool evlist__ctlfd_initialized(struct evlist *evlist)
|
|
|
|
{
|
|
|
|
return evlist->ctl_fd.pos >= 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int evlist__finalize_ctlfd(struct evlist *evlist)
|
|
|
|
{
|
|
|
|
struct pollfd *entries = evlist->core.pollfd.entries;
|
|
|
|
|
|
|
|
if (!evlist__ctlfd_initialized(evlist))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
entries[evlist->ctl_fd.pos].fd = -1;
|
|
|
|
entries[evlist->ctl_fd.pos].events = 0;
|
|
|
|
entries[evlist->ctl_fd.pos].revents = 0;
|
|
|
|
|
|
|
|
evlist->ctl_fd.pos = -1;
|
|
|
|
evlist->ctl_fd.ack = -1;
|
|
|
|
evlist->ctl_fd.fd = -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int evlist__ctlfd_recv(struct evlist *evlist, enum evlist_ctl_cmd *cmd,
|
|
|
|
char *cmd_data, size_t data_size)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
char c;
|
|
|
|
size_t bytes_read = 0;
|
|
|
|
|
2020-09-01 17:37:54 +08:00
|
|
|
*cmd = EVLIST_CTL_CMD_UNSUPPORTED;
|
2020-07-17 15:01:33 +08:00
|
|
|
memset(cmd_data, 0, data_size);
|
|
|
|
data_size--;
|
|
|
|
|
|
|
|
do {
|
|
|
|
err = read(evlist->ctl_fd.fd, &c, 1);
|
|
|
|
if (err > 0) {
|
|
|
|
if (c == '\n' || c == '\0')
|
|
|
|
break;
|
|
|
|
cmd_data[bytes_read++] = c;
|
|
|
|
if (bytes_read == data_size)
|
|
|
|
break;
|
2020-09-01 17:37:54 +08:00
|
|
|
continue;
|
|
|
|
} else if (err == -1) {
|
|
|
|
if (errno == EINTR)
|
|
|
|
continue;
|
|
|
|
if (errno == EAGAIN || errno == EWOULDBLOCK)
|
|
|
|
err = 0;
|
|
|
|
else
|
2020-07-17 15:01:33 +08:00
|
|
|
pr_err("Failed to read from ctlfd %d: %m\n", evlist->ctl_fd.fd);
|
|
|
|
}
|
2020-09-01 17:37:54 +08:00
|
|
|
break;
|
2020-07-17 15:01:33 +08:00
|
|
|
} while (1);
|
|
|
|
|
|
|
|
pr_debug("Message from ctl_fd: \"%s%s\"\n", cmd_data,
|
|
|
|
bytes_read == data_size ? "" : c == '\n' ? "\\n" : "\\0");
|
|
|
|
|
2020-09-01 17:37:54 +08:00
|
|
|
if (bytes_read > 0) {
|
2020-07-17 15:01:33 +08:00
|
|
|
if (!strncmp(cmd_data, EVLIST_CTL_CMD_ENABLE_TAG,
|
|
|
|
(sizeof(EVLIST_CTL_CMD_ENABLE_TAG)-1))) {
|
|
|
|
*cmd = EVLIST_CTL_CMD_ENABLE;
|
|
|
|
} else if (!strncmp(cmd_data, EVLIST_CTL_CMD_DISABLE_TAG,
|
|
|
|
(sizeof(EVLIST_CTL_CMD_DISABLE_TAG)-1))) {
|
|
|
|
*cmd = EVLIST_CTL_CMD_DISABLE;
|
2020-09-01 17:37:57 +08:00
|
|
|
} else if (!strncmp(cmd_data, EVLIST_CTL_CMD_SNAPSHOT_TAG,
|
|
|
|
(sizeof(EVLIST_CTL_CMD_SNAPSHOT_TAG)-1))) {
|
|
|
|
*cmd = EVLIST_CTL_CMD_SNAPSHOT;
|
|
|
|
pr_debug("is snapshot\n");
|
perf tools: Add 'evlist' control command
Add a new 'evlist' control command to display all the evlist events.
When it is received, perf will scan and print current evlist into perf
record terminal.
The interface string for control file is:
evlist [-v|-g|-F]
The syntax follows perf evlist command:
-F Show just the sample frequency used for each event.
-v Show all fields.
-g Show event group information.
Example session:
terminal 1:
# mkfifo control ack
# perf record --control=fifo:control,ack -e '{cycles,instructions}'
terminal 2:
# echo evlist > control
terminal 1:
cycles
instructions
dummy:HG
terminal 2:
# echo 'evlist -v' > control
terminal 1:
cycles: size: 120, { sample_period, sample_freq }: 4000, sample_type: \
IP|TID|TIME|ID|CPU|PERIOD, read_format: ID, disabled: 1, inherit: 1, freq: 1, \
sample_id_all: 1, exclude_guest: 1
instructions: size: 120, config: 0x1, { sample_period, sample_freq }: 4000, \
sample_type: IP|TID|TIME|ID|CPU|PERIOD, read_format: ID, inherit: 1, freq: 1, \
sample_id_all: 1, exclude_guest: 1
dummy:HG: type: 1, size: 120, config: 0x9, { sample_period, sample_freq }: 4000, \
sample_type: IP|TID|TIME|ID|CPU|PERIOD, read_format: ID, inherit: 1, mmap: 1, \
comm: 1, freq: 1, task: 1, sample_id_all: 1, mmap2: 1, comm_exec: 1, ksymbol: 1, \
bpf_event: 1
terminal 2:
# echo 'evlist -g' > control
terminal 1:
{cycles,instructions}
dummy:HG
terminal 2:
# echo 'evlist -F' > control
terminal 1:
cycles: sample_freq=4000
instructions: sample_freq=4000
dummy:HG: sample_freq=4000
This new evlist command is handy to get real event names when
wildcards are used.
Adding evsel_fprintf.c object to python/perf.so build, because
it's now evlist.c dependency.
Adding PYTHON_PERF define for python/perf.so compilation, so we
can use it to compile in only evsel__fprintf from evsel_fprintf.c
object.
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexei Budankov <abudankov@huawei.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Michael Petlan <mpetlan@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lore.kernel.org/lkml/20201226232038.390883-3-jolsa@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-12-27 07:20:36 +08:00
|
|
|
} else if (!strncmp(cmd_data, EVLIST_CTL_CMD_EVLIST_TAG,
|
|
|
|
(sizeof(EVLIST_CTL_CMD_EVLIST_TAG)-1))) {
|
|
|
|
*cmd = EVLIST_CTL_CMD_EVLIST;
|
2020-12-27 07:20:37 +08:00
|
|
|
} else if (!strncmp(cmd_data, EVLIST_CTL_CMD_STOP_TAG,
|
|
|
|
(sizeof(EVLIST_CTL_CMD_STOP_TAG)-1))) {
|
|
|
|
*cmd = EVLIST_CTL_CMD_STOP;
|
2020-12-27 07:20:38 +08:00
|
|
|
} else if (!strncmp(cmd_data, EVLIST_CTL_CMD_PING_TAG,
|
|
|
|
(sizeof(EVLIST_CTL_CMD_PING_TAG)-1))) {
|
|
|
|
*cmd = EVLIST_CTL_CMD_PING;
|
2020-07-17 15:01:33 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-01 17:37:54 +08:00
|
|
|
return bytes_read ? (int)bytes_read : err;
|
2020-07-17 15:01:33 +08:00
|
|
|
}
|
|
|
|
|
2020-09-01 17:37:57 +08:00
|
|
|
int evlist__ctlfd_ack(struct evlist *evlist)
|
2020-07-17 15:01:33 +08:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (evlist->ctl_fd.ack == -1)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err = write(evlist->ctl_fd.ack, EVLIST_CTL_CMD_ACK_TAG,
|
|
|
|
sizeof(EVLIST_CTL_CMD_ACK_TAG));
|
|
|
|
if (err == -1)
|
|
|
|
pr_err("failed to write to ctl_ack_fd %d: %m\n", evlist->ctl_fd.ack);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
perf tools: Allow to enable/disable events via control file
Adding new control events to enable/disable specific event.
The interface string for control file are:
'enable <EVENT NAME>'
'disable <EVENT NAME>'
when received the command, perf will scan the current evlist
for <EVENT NAME> and if found it's enabled/disabled.
Example session:
terminal 1:
# mkfifo control ack perf.pipe
# perf record --control=fifo:control,ack -D -1 --no-buffering -e 'sched:*' -o - > perf.pipe
terminal 2:
# cat perf.pipe | perf --no-pager script -i -
terminal 1:
Events disabled
NOTE Above message will show only after read side of the pipe ('>')
is started on 'terminal 2'. The 'terminal 1's bash does not execute
perf before that, hence the delyaed perf record message.
terminal 3:
# echo 'enable sched:sched_process_fork' > control
terminal 1:
event sched:sched_process_fork enabled
terminal 2:
bash 33349 [034] 149587.674295: sched:sched_process_fork: comm=bash pid=33349 child_comm=bash child_pid=34056
bash 33349 [034] 149588.239521: sched:sched_process_fork: comm=bash pid=33349 child_comm=bash child_pid=34057
terminal 3:
# echo 'enable sched:sched_wakeup_new' > control
terminal 1:
event sched:sched_wakeup_new enabled
terminal 2:
bash 33349 [034] 149632.228023: sched:sched_process_fork: comm=bash pid=33349 child_comm=bash child_pid=34059
bash 33349 [034] 149632.228050: sched:sched_wakeup_new: bash:34059 [120] success=1 CPU:036
bash 33349 [034] 149633.950005: sched:sched_process_fork: comm=bash pid=33349 child_comm=bash child_pid=34060
bash 33349 [034] 149633.950030: sched:sched_wakeup_new: bash:34060 [120] success=1 CPU:036
Committer testing:
If I use 'sched:*' and then enable all events, I can't get 'perf record'
to react to further commands, so I tested it with:
[root@five ~]# perf record --control=fifo:control,ack -D -1 --no-buffering -e 'sched:sched_process_*' -o - > perf.pipe
Events disabled
Events enabled
Events disabled
And then it works as expected, so we need to fix this pre-existing
problem.
Another issue, we need to check if a event is already enabled or
disabled and change the message to be clearer, i.e.:
[root@five ~]# perf record --control=fifo:control,ack -D -1 --no-buffering -e 'sched:sched_process_*' -o - > perf.pipe
Events disabled
If we receive a 'disable' command, then it should say:
[root@five ~]# perf record --control=fifo:control,ack -D -1 --no-buffering -e 'sched:sched_process_*' -o - > perf.pipe
Events disabled
Events already disabled
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexei Budankov <abudankov@huawei.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Michael Petlan <mpetlan@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lore.kernel.org/lkml/20201226232038.390883-2-jolsa@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-12-27 07:20:35 +08:00
|
|
|
static int get_cmd_arg(char *cmd_data, size_t cmd_size, char **arg)
|
|
|
|
{
|
|
|
|
char *data = cmd_data + cmd_size;
|
|
|
|
|
|
|
|
/* no argument */
|
|
|
|
if (!*data)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* there's argument */
|
|
|
|
if (*data == ' ') {
|
|
|
|
*arg = data + 1;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* malformed */
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int evlist__ctlfd_enable(struct evlist *evlist, char *cmd_data, bool enable)
|
|
|
|
{
|
|
|
|
struct evsel *evsel;
|
|
|
|
char *name;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = get_cmd_arg(cmd_data,
|
|
|
|
enable ? sizeof(EVLIST_CTL_CMD_ENABLE_TAG) - 1 :
|
|
|
|
sizeof(EVLIST_CTL_CMD_DISABLE_TAG) - 1,
|
|
|
|
&name);
|
|
|
|
if (err < 0) {
|
|
|
|
pr_info("failed: wrong command\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (err) {
|
|
|
|
evsel = evlist__find_evsel_by_str(evlist, name);
|
|
|
|
if (evsel) {
|
|
|
|
if (enable)
|
|
|
|
evlist__enable_evsel(evlist, name);
|
|
|
|
else
|
|
|
|
evlist__disable_evsel(evlist, name);
|
|
|
|
pr_info("Event %s %s\n", evsel->name,
|
|
|
|
enable ? "enabled" : "disabled");
|
|
|
|
} else {
|
|
|
|
pr_info("failed: can't find '%s' event\n", name);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (enable) {
|
|
|
|
evlist__enable(evlist);
|
|
|
|
pr_info(EVLIST_ENABLED_MSG);
|
|
|
|
} else {
|
|
|
|
evlist__disable(evlist);
|
|
|
|
pr_info(EVLIST_DISABLED_MSG);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
perf tools: Add 'evlist' control command
Add a new 'evlist' control command to display all the evlist events.
When it is received, perf will scan and print current evlist into perf
record terminal.
The interface string for control file is:
evlist [-v|-g|-F]
The syntax follows perf evlist command:
-F Show just the sample frequency used for each event.
-v Show all fields.
-g Show event group information.
Example session:
terminal 1:
# mkfifo control ack
# perf record --control=fifo:control,ack -e '{cycles,instructions}'
terminal 2:
# echo evlist > control
terminal 1:
cycles
instructions
dummy:HG
terminal 2:
# echo 'evlist -v' > control
terminal 1:
cycles: size: 120, { sample_period, sample_freq }: 4000, sample_type: \
IP|TID|TIME|ID|CPU|PERIOD, read_format: ID, disabled: 1, inherit: 1, freq: 1, \
sample_id_all: 1, exclude_guest: 1
instructions: size: 120, config: 0x1, { sample_period, sample_freq }: 4000, \
sample_type: IP|TID|TIME|ID|CPU|PERIOD, read_format: ID, inherit: 1, freq: 1, \
sample_id_all: 1, exclude_guest: 1
dummy:HG: type: 1, size: 120, config: 0x9, { sample_period, sample_freq }: 4000, \
sample_type: IP|TID|TIME|ID|CPU|PERIOD, read_format: ID, inherit: 1, mmap: 1, \
comm: 1, freq: 1, task: 1, sample_id_all: 1, mmap2: 1, comm_exec: 1, ksymbol: 1, \
bpf_event: 1
terminal 2:
# echo 'evlist -g' > control
terminal 1:
{cycles,instructions}
dummy:HG
terminal 2:
# echo 'evlist -F' > control
terminal 1:
cycles: sample_freq=4000
instructions: sample_freq=4000
dummy:HG: sample_freq=4000
This new evlist command is handy to get real event names when
wildcards are used.
Adding evsel_fprintf.c object to python/perf.so build, because
it's now evlist.c dependency.
Adding PYTHON_PERF define for python/perf.so compilation, so we
can use it to compile in only evsel__fprintf from evsel_fprintf.c
object.
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexei Budankov <abudankov@huawei.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Michael Petlan <mpetlan@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lore.kernel.org/lkml/20201226232038.390883-3-jolsa@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-12-27 07:20:36 +08:00
|
|
|
static int evlist__ctlfd_list(struct evlist *evlist, char *cmd_data)
|
|
|
|
{
|
|
|
|
struct perf_attr_details details = { .verbose = false, };
|
|
|
|
struct evsel *evsel;
|
|
|
|
char *arg;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = get_cmd_arg(cmd_data,
|
|
|
|
sizeof(EVLIST_CTL_CMD_EVLIST_TAG) - 1,
|
|
|
|
&arg);
|
|
|
|
if (err < 0) {
|
|
|
|
pr_info("failed: wrong command\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (err) {
|
|
|
|
if (!strcmp(arg, "-v")) {
|
|
|
|
details.verbose = true;
|
|
|
|
} else if (!strcmp(arg, "-g")) {
|
|
|
|
details.event_group = true;
|
|
|
|
} else if (!strcmp(arg, "-F")) {
|
|
|
|
details.freq = true;
|
|
|
|
} else {
|
|
|
|
pr_info("failed: wrong command\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
evlist__for_each_entry(evlist, evsel)
|
|
|
|
evsel__fprintf(evsel, &details, stderr);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-07-17 15:01:33 +08:00
|
|
|
int evlist__ctlfd_process(struct evlist *evlist, enum evlist_ctl_cmd *cmd)
|
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
char cmd_data[EVLIST_CTL_CMD_MAX_LEN];
|
|
|
|
int ctlfd_pos = evlist->ctl_fd.pos;
|
|
|
|
struct pollfd *entries = evlist->core.pollfd.entries;
|
|
|
|
|
|
|
|
if (!evlist__ctlfd_initialized(evlist) || !entries[ctlfd_pos].revents)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (entries[ctlfd_pos].revents & POLLIN) {
|
|
|
|
err = evlist__ctlfd_recv(evlist, cmd, cmd_data,
|
|
|
|
EVLIST_CTL_CMD_MAX_LEN);
|
|
|
|
if (err > 0) {
|
|
|
|
switch (*cmd) {
|
|
|
|
case EVLIST_CTL_CMD_ENABLE:
|
|
|
|
case EVLIST_CTL_CMD_DISABLE:
|
perf tools: Allow to enable/disable events via control file
Adding new control events to enable/disable specific event.
The interface string for control file are:
'enable <EVENT NAME>'
'disable <EVENT NAME>'
when received the command, perf will scan the current evlist
for <EVENT NAME> and if found it's enabled/disabled.
Example session:
terminal 1:
# mkfifo control ack perf.pipe
# perf record --control=fifo:control,ack -D -1 --no-buffering -e 'sched:*' -o - > perf.pipe
terminal 2:
# cat perf.pipe | perf --no-pager script -i -
terminal 1:
Events disabled
NOTE Above message will show only after read side of the pipe ('>')
is started on 'terminal 2'. The 'terminal 1's bash does not execute
perf before that, hence the delyaed perf record message.
terminal 3:
# echo 'enable sched:sched_process_fork' > control
terminal 1:
event sched:sched_process_fork enabled
terminal 2:
bash 33349 [034] 149587.674295: sched:sched_process_fork: comm=bash pid=33349 child_comm=bash child_pid=34056
bash 33349 [034] 149588.239521: sched:sched_process_fork: comm=bash pid=33349 child_comm=bash child_pid=34057
terminal 3:
# echo 'enable sched:sched_wakeup_new' > control
terminal 1:
event sched:sched_wakeup_new enabled
terminal 2:
bash 33349 [034] 149632.228023: sched:sched_process_fork: comm=bash pid=33349 child_comm=bash child_pid=34059
bash 33349 [034] 149632.228050: sched:sched_wakeup_new: bash:34059 [120] success=1 CPU:036
bash 33349 [034] 149633.950005: sched:sched_process_fork: comm=bash pid=33349 child_comm=bash child_pid=34060
bash 33349 [034] 149633.950030: sched:sched_wakeup_new: bash:34060 [120] success=1 CPU:036
Committer testing:
If I use 'sched:*' and then enable all events, I can't get 'perf record'
to react to further commands, so I tested it with:
[root@five ~]# perf record --control=fifo:control,ack -D -1 --no-buffering -e 'sched:sched_process_*' -o - > perf.pipe
Events disabled
Events enabled
Events disabled
And then it works as expected, so we need to fix this pre-existing
problem.
Another issue, we need to check if a event is already enabled or
disabled and change the message to be clearer, i.e.:
[root@five ~]# perf record --control=fifo:control,ack -D -1 --no-buffering -e 'sched:sched_process_*' -o - > perf.pipe
Events disabled
If we receive a 'disable' command, then it should say:
[root@five ~]# perf record --control=fifo:control,ack -D -1 --no-buffering -e 'sched:sched_process_*' -o - > perf.pipe
Events disabled
Events already disabled
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexei Budankov <abudankov@huawei.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Michael Petlan <mpetlan@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lore.kernel.org/lkml/20201226232038.390883-2-jolsa@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-12-27 07:20:35 +08:00
|
|
|
err = evlist__ctlfd_enable(evlist, cmd_data,
|
|
|
|
*cmd == EVLIST_CTL_CMD_ENABLE);
|
2020-07-17 15:01:33 +08:00
|
|
|
break;
|
perf tools: Add 'evlist' control command
Add a new 'evlist' control command to display all the evlist events.
When it is received, perf will scan and print current evlist into perf
record terminal.
The interface string for control file is:
evlist [-v|-g|-F]
The syntax follows perf evlist command:
-F Show just the sample frequency used for each event.
-v Show all fields.
-g Show event group information.
Example session:
terminal 1:
# mkfifo control ack
# perf record --control=fifo:control,ack -e '{cycles,instructions}'
terminal 2:
# echo evlist > control
terminal 1:
cycles
instructions
dummy:HG
terminal 2:
# echo 'evlist -v' > control
terminal 1:
cycles: size: 120, { sample_period, sample_freq }: 4000, sample_type: \
IP|TID|TIME|ID|CPU|PERIOD, read_format: ID, disabled: 1, inherit: 1, freq: 1, \
sample_id_all: 1, exclude_guest: 1
instructions: size: 120, config: 0x1, { sample_period, sample_freq }: 4000, \
sample_type: IP|TID|TIME|ID|CPU|PERIOD, read_format: ID, inherit: 1, freq: 1, \
sample_id_all: 1, exclude_guest: 1
dummy:HG: type: 1, size: 120, config: 0x9, { sample_period, sample_freq }: 4000, \
sample_type: IP|TID|TIME|ID|CPU|PERIOD, read_format: ID, inherit: 1, mmap: 1, \
comm: 1, freq: 1, task: 1, sample_id_all: 1, mmap2: 1, comm_exec: 1, ksymbol: 1, \
bpf_event: 1
terminal 2:
# echo 'evlist -g' > control
terminal 1:
{cycles,instructions}
dummy:HG
terminal 2:
# echo 'evlist -F' > control
terminal 1:
cycles: sample_freq=4000
instructions: sample_freq=4000
dummy:HG: sample_freq=4000
This new evlist command is handy to get real event names when
wildcards are used.
Adding evsel_fprintf.c object to python/perf.so build, because
it's now evlist.c dependency.
Adding PYTHON_PERF define for python/perf.so compilation, so we
can use it to compile in only evsel__fprintf from evsel_fprintf.c
object.
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexei Budankov <abudankov@huawei.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Michael Petlan <mpetlan@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lore.kernel.org/lkml/20201226232038.390883-3-jolsa@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-12-27 07:20:36 +08:00
|
|
|
case EVLIST_CTL_CMD_EVLIST:
|
|
|
|
err = evlist__ctlfd_list(evlist, cmd_data);
|
|
|
|
break;
|
2020-09-01 17:37:57 +08:00
|
|
|
case EVLIST_CTL_CMD_SNAPSHOT:
|
2020-12-27 07:20:37 +08:00
|
|
|
case EVLIST_CTL_CMD_STOP:
|
2020-12-27 07:20:38 +08:00
|
|
|
case EVLIST_CTL_CMD_PING:
|
2020-09-01 17:37:57 +08:00
|
|
|
break;
|
2020-07-17 15:01:33 +08:00
|
|
|
case EVLIST_CTL_CMD_ACK:
|
|
|
|
case EVLIST_CTL_CMD_UNSUPPORTED:
|
|
|
|
default:
|
|
|
|
pr_debug("ctlfd: unsupported %d\n", *cmd);
|
|
|
|
break;
|
|
|
|
}
|
2020-09-01 17:37:57 +08:00
|
|
|
if (!(*cmd == EVLIST_CTL_CMD_ACK || *cmd == EVLIST_CTL_CMD_UNSUPPORTED ||
|
|
|
|
*cmd == EVLIST_CTL_CMD_SNAPSHOT))
|
2020-07-17 15:01:33 +08:00
|
|
|
evlist__ctlfd_ack(evlist);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (entries[ctlfd_pos].revents & (POLLHUP | POLLERR))
|
|
|
|
evlist__finalize_ctlfd(evlist);
|
|
|
|
else
|
|
|
|
entries[ctlfd_pos].revents = 0;
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
2020-09-24 20:44:53 +08:00
|
|
|
|
perf record: Allow multiple recording time ranges
AUX area traces can produce too much data to record successfully or
analyze subsequently. Add another means to reduce data collection by
allowing multiple recording time ranges.
This is useful, for instance, in cases where a workload produces
predictably reproducible events in specific time ranges.
Today we only have perf record -D <msecs> to start at a specific region, or
some complicated approach using snapshot mode and external scripts sending
signals or using the fifos. But these approaches are difficult to set up
compared with simply having perf do it.
Extend perf record option -D/--delay option to specifying relative time
stamps for start stop controlled by perf with the right time offset, for
instance:
perf record -e intel_pt// -D 10-20,30-40
to record 10ms to 20ms into the trace and 30ms to 40ms.
Example:
The example workload is:
$ cat repeat-usleep.c
int usleep(useconds_t usec);
int usage(int ret, const char *msg)
{
if (msg)
fprintf(stderr, "%s\n", msg);
fprintf(stderr, "Usage is: repeat-usleep <microseconds>\n");
return ret;
}
int main(int argc, char *argv[])
{
unsigned long usecs;
char *end_ptr;
if (argc != 2)
return usage(1, "Error: Wrong number of arguments!");
errno = 0;
usecs = strtoul(argv[1], &end_ptr, 0);
if (errno || *end_ptr || usecs > UINT_MAX)
return usage(1, "Error: Invalid argument!");
while (1) {
int ret = usleep(usecs);
if (ret & errno != EINTR)
return usage(1, "Error: usleep() failed!");
}
return 0;
}
$ perf record -e intel_pt//u --delay 10-20,40-70,110-160 -- ./repeat-usleep 500
Events disabled
Events enabled
Events disabled
Events enabled
Events disabled
Events enabled
Events disabled
[ perf record: Woken up 5 times to write data ]
[ perf record: Captured and wrote 0.204 MB perf.data ]
Terminated
A dlfilter is used to determine continuous data collection (timestamps
less than 1ms apart):
$ cat dlfilter-show-delays.c
static __u64 start_time;
static __u64 last_time;
int start(void **data, void *ctx)
{
printf("%-17s\t%-9s\t%-6s\n", " Time", " Duration", " Delay");
return 0;
}
int filter_event_early(void *data, const struct perf_dlfilter_sample *sample, void *ctx)
{
__u64 delta;
if (!sample->time)
return 1;
if (!last_time)
goto out;
delta = sample->time - last_time;
if (delta < 1000000)
goto out2;;
printf("%17.9f\t%9.1f\t%6.1f\n", start_time / 1000000000.0, (last_time - start_time) / 1000000.0, delta / 1000000.0);
out:
start_time = sample->time;
out2:
last_time = sample->time;
return 1;
}
int stop(void *data, void *ctx)
{
printf("%17.9f\t%9.1f\n", start_time / 1000000000.0, (last_time - start_time) / 1000000.0);
return 0;
}
The result shows the times roughly match the --delay option:
$ perf script --itrace=qb --dlfilter dlfilter-show-delays.so
Time Duration Delay
39215.302317300 9.7 20.5
39215.332480217 30.4 40.9
39215.403837717 49.8
Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lore.kernel.org/r/20220824072814.16422-6-adrian.hunter@intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-08-24 15:28:14 +08:00
|
|
|
/**
|
|
|
|
* struct event_enable_time - perf record -D/--delay single time range.
|
|
|
|
* @start: start of time range to enable events in milliseconds
|
|
|
|
* @end: end of time range to enable events in milliseconds
|
|
|
|
*
|
|
|
|
* N.B. this structure is also accessed as an array of int.
|
|
|
|
*/
|
|
|
|
struct event_enable_time {
|
|
|
|
int start;
|
|
|
|
int end;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int parse_event_enable_time(const char *str, struct event_enable_time *range, bool first)
|
|
|
|
{
|
|
|
|
const char *fmt = first ? "%u - %u %n" : " , %u - %u %n";
|
|
|
|
int ret, start, end, n;
|
|
|
|
|
|
|
|
ret = sscanf(str, fmt, &start, &end, &n);
|
|
|
|
if (ret != 2 || end <= start)
|
|
|
|
return -EINVAL;
|
|
|
|
if (range) {
|
|
|
|
range->start = start;
|
|
|
|
range->end = end;
|
|
|
|
}
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t parse_event_enable_times(const char *str, struct event_enable_time *range)
|
|
|
|
{
|
|
|
|
int incr = !!range;
|
|
|
|
bool first = true;
|
|
|
|
ssize_t ret, cnt;
|
|
|
|
|
|
|
|
for (cnt = 0; *str; cnt++) {
|
|
|
|
ret = parse_event_enable_time(str, range, first);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
/* Check no overlap */
|
|
|
|
if (!first && range && range->start <= range[-1].end)
|
|
|
|
return -EINVAL;
|
|
|
|
str += ret;
|
|
|
|
range += incr;
|
|
|
|
first = false;
|
|
|
|
}
|
|
|
|
return cnt;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* struct event_enable_timer - control structure for perf record -D/--delay.
|
|
|
|
* @evlist: event list
|
|
|
|
* @times: time ranges that events are enabled (N.B. this is also accessed as an
|
|
|
|
* array of int)
|
|
|
|
* @times_cnt: number of time ranges
|
|
|
|
* @timerfd: timer file descriptor
|
|
|
|
* @pollfd_pos: position in @evlist array of file descriptors to poll (fdarray)
|
|
|
|
* @times_step: current position in (int *)@times)[],
|
|
|
|
* refer event_enable_timer__process()
|
|
|
|
*
|
|
|
|
* Note, this structure is only used when there are time ranges, not when there
|
|
|
|
* is only an initial delay.
|
|
|
|
*/
|
|
|
|
struct event_enable_timer {
|
|
|
|
struct evlist *evlist;
|
|
|
|
struct event_enable_time *times;
|
|
|
|
size_t times_cnt;
|
|
|
|
int timerfd;
|
|
|
|
int pollfd_pos;
|
|
|
|
size_t times_step;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int str_to_delay(const char *str)
|
|
|
|
{
|
|
|
|
char *endptr;
|
|
|
|
long d;
|
|
|
|
|
|
|
|
d = strtol(str, &endptr, 10);
|
|
|
|
if (*endptr || d > INT_MAX || d < -1)
|
|
|
|
return 0;
|
|
|
|
return d;
|
|
|
|
}
|
|
|
|
|
|
|
|
int evlist__parse_event_enable_time(struct evlist *evlist, struct record_opts *opts,
|
|
|
|
const char *str, int unset)
|
|
|
|
{
|
|
|
|
enum fdarray_flags flags = fdarray_flag__nonfilterable | fdarray_flag__non_perf_event;
|
|
|
|
struct event_enable_timer *eet;
|
|
|
|
ssize_t times_cnt;
|
|
|
|
ssize_t ret;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (unset)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
opts->initial_delay = str_to_delay(str);
|
|
|
|
if (opts->initial_delay)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ret = parse_event_enable_times(str, NULL);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
times_cnt = ret;
|
|
|
|
if (times_cnt == 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
eet = zalloc(sizeof(*eet));
|
|
|
|
if (!eet)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
eet->times = calloc(times_cnt, sizeof(*eet->times));
|
|
|
|
if (!eet->times) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto free_eet;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (parse_event_enable_times(str, eet->times) != times_cnt) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto free_eet_times;
|
|
|
|
}
|
|
|
|
|
|
|
|
eet->times_cnt = times_cnt;
|
|
|
|
|
|
|
|
eet->timerfd = timerfd_create(CLOCK_MONOTONIC, TFD_CLOEXEC);
|
|
|
|
if (eet->timerfd == -1) {
|
|
|
|
err = -errno;
|
|
|
|
pr_err("timerfd_create failed: %s\n", strerror(errno));
|
|
|
|
goto free_eet_times;
|
|
|
|
}
|
|
|
|
|
|
|
|
eet->pollfd_pos = perf_evlist__add_pollfd(&evlist->core, eet->timerfd, NULL, POLLIN, flags);
|
|
|
|
if (eet->pollfd_pos < 0) {
|
|
|
|
err = eet->pollfd_pos;
|
|
|
|
goto close_timerfd;
|
|
|
|
}
|
|
|
|
|
|
|
|
eet->evlist = evlist;
|
|
|
|
evlist->eet = eet;
|
|
|
|
opts->initial_delay = eet->times[0].start;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
close_timerfd:
|
|
|
|
close(eet->timerfd);
|
|
|
|
free_eet_times:
|
|
|
|
free(eet->times);
|
|
|
|
free_eet:
|
|
|
|
free(eet);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int event_enable_timer__set_timer(struct event_enable_timer *eet, int ms)
|
|
|
|
{
|
|
|
|
struct itimerspec its = {
|
|
|
|
.it_value.tv_sec = ms / MSEC_PER_SEC,
|
|
|
|
.it_value.tv_nsec = (ms % MSEC_PER_SEC) * NSEC_PER_MSEC,
|
|
|
|
};
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
if (timerfd_settime(eet->timerfd, 0, &its, NULL) < 0) {
|
|
|
|
err = -errno;
|
|
|
|
pr_err("timerfd_settime failed: %s\n", strerror(errno));
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
int event_enable_timer__start(struct event_enable_timer *eet)
|
|
|
|
{
|
|
|
|
int ms;
|
|
|
|
|
|
|
|
if (!eet)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ms = eet->times[0].end - eet->times[0].start;
|
|
|
|
eet->times_step = 1;
|
|
|
|
|
|
|
|
return event_enable_timer__set_timer(eet, ms);
|
|
|
|
}
|
|
|
|
|
|
|
|
int event_enable_timer__process(struct event_enable_timer *eet)
|
|
|
|
{
|
|
|
|
struct pollfd *entries;
|
|
|
|
short revents;
|
|
|
|
|
|
|
|
if (!eet)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
entries = eet->evlist->core.pollfd.entries;
|
|
|
|
revents = entries[eet->pollfd_pos].revents;
|
|
|
|
entries[eet->pollfd_pos].revents = 0;
|
|
|
|
|
|
|
|
if (revents & POLLIN) {
|
|
|
|
size_t step = eet->times_step;
|
|
|
|
size_t pos = step / 2;
|
|
|
|
|
|
|
|
if (step & 1) {
|
|
|
|
evlist__disable_non_dummy(eet->evlist);
|
|
|
|
pr_info(EVLIST_DISABLED_MSG);
|
|
|
|
if (pos >= eet->times_cnt - 1) {
|
|
|
|
/* Disarm timer */
|
|
|
|
event_enable_timer__set_timer(eet, 0);
|
|
|
|
return 1; /* Stop */
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
evlist__enable_non_dummy(eet->evlist);
|
|
|
|
pr_info(EVLIST_ENABLED_MSG);
|
|
|
|
}
|
|
|
|
|
|
|
|
step += 1;
|
|
|
|
pos = step / 2;
|
|
|
|
|
|
|
|
if (pos < eet->times_cnt) {
|
|
|
|
int *times = (int *)eet->times; /* Accessing 'times' as array of int */
|
|
|
|
int ms = times[step] - times[step - 1];
|
|
|
|
|
|
|
|
eet->times_step = step;
|
|
|
|
return event_enable_timer__set_timer(eet, ms);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void event_enable_timer__exit(struct event_enable_timer **ep)
|
|
|
|
{
|
|
|
|
if (!ep || !*ep)
|
|
|
|
return;
|
|
|
|
free((*ep)->times);
|
|
|
|
zfree(ep);
|
|
|
|
}
|
|
|
|
|
2020-09-24 20:44:53 +08:00
|
|
|
struct evsel *evlist__find_evsel(struct evlist *evlist, int idx)
|
|
|
|
{
|
|
|
|
struct evsel *evsel;
|
|
|
|
|
|
|
|
evlist__for_each_entry(evlist, evsel) {
|
2021-07-06 23:16:59 +08:00
|
|
|
if (evsel->core.idx == idx)
|
2020-09-24 20:44:53 +08:00
|
|
|
return evsel;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
2021-04-14 21:08:16 +08:00
|
|
|
|
|
|
|
int evlist__scnprintf_evsels(struct evlist *evlist, size_t size, char *bf)
|
|
|
|
{
|
|
|
|
struct evsel *evsel;
|
|
|
|
int printed = 0;
|
|
|
|
|
|
|
|
evlist__for_each_entry(evlist, evsel) {
|
|
|
|
if (evsel__is_dummy_event(evsel))
|
|
|
|
continue;
|
|
|
|
if (size > (strlen(evsel__name(evsel)) + (printed ? 2 : 1))) {
|
|
|
|
printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "," : "", evsel__name(evsel));
|
|
|
|
} else {
|
|
|
|
printed += scnprintf(bf + printed, size - printed, "%s...", printed ? "," : "");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return printed;
|
|
|
|
}
|
2021-05-27 08:16:09 +08:00
|
|
|
|
|
|
|
void evlist__check_mem_load_aux(struct evlist *evlist)
|
|
|
|
{
|
|
|
|
struct evsel *leader, *evsel, *pos;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For some platforms, the 'mem-loads' event is required to use
|
|
|
|
* together with 'mem-loads-aux' within a group and 'mem-loads-aux'
|
|
|
|
* must be the group leader. Now we disable this group before reporting
|
|
|
|
* because 'mem-loads-aux' is just an auxiliary event. It doesn't carry
|
|
|
|
* any valid memory load information.
|
|
|
|
*/
|
|
|
|
evlist__for_each_entry(evlist, evsel) {
|
2021-07-06 23:17:00 +08:00
|
|
|
leader = evsel__leader(evsel);
|
2021-05-27 08:16:09 +08:00
|
|
|
if (leader == evsel)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (leader->name && strstr(leader->name, "mem-loads-aux")) {
|
|
|
|
for_each_group_evsel(pos, leader) {
|
2021-07-06 23:17:00 +08:00
|
|
|
evsel__set_leader(pos, pos);
|
2021-05-27 08:16:09 +08:00
|
|
|
pos->core.nr_members = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|