perf/core improvements and fixes:

perf record:
 
   Alexey Budankov:
 
   - Allow mixing --user-regs with --call-graph=dwarf, making sure that
     the minimal set of registers for DWARF unwinding is present in the
     set of user registers requested to be present in each sample, while
     warning the user that this may make callchains unreliable if more
     that the minimal set of registers is needed to unwind.
 
   yuzhoujian:
 
   - Add support to collect callchains from kernel or user space only,
     IOW allow setting the perf_event_attr.exclude_callchain_{kernel,user}
     bits from the command line.
 
 perf trace:
 
   Arnaldo Carvalho de Melo:
 
   - Remove x86_64 specific syscall numbers from the augmented_raw_syscalls
     BPF in-kernel collector of augmented raw_syscalls:sys_{enter,exit}
     payloads, use instead the syscall numbers obtainer either by the
     arch specific syscalltbl generators or from audit-libs.
 
   - Allow 'perf trace' to ask for the number of bytes to collect for
     string arguments, for now ask for PATH_MAX, i.e. the whole
     pathnames, which ends up being just a way to speficy which syscall
     args are pathnames and thus should be read using bpf_probe_read_str().
 
   - Skip unknown syscalls when expanding strace like syscall groups.
     This helps using the 'string' group of syscalls to work in arm64,
     where some of the syscalls present in x86_64 that deal with
     strings, for instance 'access', are deprecated and this should not
     be asked for tracing.
 
   Leo Yan:
 
   - Exit when failing to build eBPF program.
 
 perf config:
 
   Arnaldo Carvalho de Melo:
 
   - Bail out when a handler returns failure for a key-value pair. This
     helps with cases where processing a key-value pair is not just a
     matter of setting some tool specific knob, involving, for instance
     building a BPF program to then attach to the list of events 'perf
     trace' will use, e.g. augmented_raw_syscalls.c.
 
 perf.data:
 
   Kan Liang:
 
   - Read and store die ID information available in new Intel processors
     in CPUID.1F in the CPU topology written in the perf.data header.
 
 perf stat:
 
   Kan Liang:
 
   - Support per-die aggregation.
 
 Documentation:
 
   Arnaldo Carvalho de Melo:
 
   - Update perf.data documentation about the CPU_TOPOLOGY, MEM_TOPOLOGY,
     CLOCKID and DIR_FORMAT headers.
 
   Song Liu:
 
   - Add description of headers HEADER_BPF_PROG_INFO and HEADER_BPF_BTF.
 
   Leo Yan:
 
   - Update default value for llvm.clang-bpf-cmd-template in 'man perf-config'.
 
 JVMTI:
 
   Jiri Olsa:
 
   - Address gcc string overflow warning for strncpy()
 
 core:
 
   - Remove superfluous nthreads system_wide setup in perf_evsel__alloc_fd().
 
 Intel PT:
 
   Adrian Hunter:
 
   - Add support for samples to contain IPC ratio, collecting cycles
     information from CYC packets, showing the IPC info periodically, because
     Intel PT does not update the cycle count on every branch or instruction,
     the incremental values will often be zero.  When there are values, they
     will be the number of instructions and number of cycles since the last
     update, and thus represent the average IPC since the last IPC value.
 
     E.g.:
 
     # perf record --cpu 1 -m200000 -a -e intel_pt/cyc/u sleep 0.0001
     rounding mmap pages size to 1024M (262144 pages)
     [ perf record: Woken up 0 times to write data ]
     [ perf record: Captured and wrote 2.208 MB perf.data ]
     # perf script --insn-trace --xed -F+ipc,-dso,-cpu,-tid
     #
     <SNIP + add line numbering to make sense of IPC counts e.g.: (18/3)>
     1   cc1 63501.650479626: 7f5219ac27bf _int_free+0x3f   jnz 0x7f5219ac2af0       IPC: 0.81 (36/44)
     2   cc1 63501.650479626: 7f5219ac27c5 _int_free+0x45   cmp $0x1f, %rbp
     3   cc1 63501.650479626: 7f5219ac27c9 _int_free+0x49   jbe 0x7f5219ac2b00
     4   cc1 63501.650479626: 7f5219ac27cf _int_free+0x4f   test $0x8, %al
     5   cc1 63501.650479626: 7f5219ac27d1 _int_free+0x51   jnz 0x7f5219ac2b00
     6   cc1 63501.650479626: 7f5219ac27d7 _int_free+0x57   movq  0x13c58a(%rip), %rcx
     7   cc1 63501.650479626: 7f5219ac27de _int_free+0x5e   mov %rdi, %r12
     8   cc1 63501.650479626: 7f5219ac27e1 _int_free+0x61   movq  %fs:(%rcx), %rax
     9   cc1 63501.650479626: 7f5219ac27e5 _int_free+0x65   test %rax, %rax
    10   cc1 63501.650479626: 7f5219ac27e8 _int_free+0x68   jz 0x7f5219ac2821
    11   cc1 63501.650479626: 7f5219ac27ea _int_free+0x6a   leaq  -0x11(%rbp), %rdi
    12   cc1 63501.650479626: 7f5219ac27ee _int_free+0x6e   mov %rdi, %rsi
    13   cc1 63501.650479626: 7f5219ac27f1 _int_free+0x71   shr $0x4, %rsi
    14   cc1 63501.650479626: 7f5219ac27f5 _int_free+0x75   cmpq  %rsi, 0x13caf4(%rip)
    15   cc1 63501.650479626: 7f5219ac27fc _int_free+0x7c   jbe 0x7f5219ac2821
    16   cc1 63501.650479626: 7f5219ac2821 _int_free+0xa1   cmpq  0x13f138(%rip), %rbp
    17   cc1 63501.650479626: 7f5219ac2828 _int_free+0xa8   jnbe 0x7f5219ac28d8
    18   cc1 63501.650479626: 7f5219ac28d8 _int_free+0x158  testb  $0x2, 0x8(%rbx)
    19   cc1 63501.650479628: 7f5219ac28dc _int_free+0x15c  jnz 0x7f5219ac2ab0       IPC: 6.00 (18/3)
     <SNIP>
 
   - Allow using time ranges with Intel PT, i.e. these features, already
     present but not optimially usable with Intel PT, should be now:
 
         Select the second 10% time slice:
 
         $ perf script --time 10%/2
 
         Select from 0% to 10% time slice:
 
         $ perf script --time 0%-10%
 
         Select the first and second 10% time slices:
 
         $ perf script --time 10%/1,10%/2
 
         Select from 0% to 10% and 30% to 40% slices:
 
         $ perf script --time 0%-10%,30%-40%
 
 cs-etm (ARM):
 
   Mathieu Poirier:
 
   - Add support for CPU-wide trace scenarios.
 
 s390:
 
   Thomas Richter:
 
   - Fix missing kvm module load for s390.
 
   - Fix OOM error in TUI mode on s390
 
   - Support s390 diag event display when doing analysis on !s390
     architectures.
 
 Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQR2GiIUctdOfX2qHhGyPKLppCJ+JwUCXP/1xQAKCRCyPKLppCJ+
 J9xcAQCwOITAshE7op7HbKUPtkqiMNu+hpNa3skhxEpGHvKO0AEArpBXtuvEP8EU
 PZsp+8vcVrlZ+dZutttgvkRz25mScg8=
 =kfFb
 -----END PGP SIGNATURE-----

Merge tag 'perf-core-for-mingo-5.3-20190611' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core

Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:

perf record:

  Alexey Budankov:

  - Allow mixing --user-regs with --call-graph=dwarf, making sure that
    the minimal set of registers for DWARF unwinding is present in the
    set of user registers requested to be present in each sample, while
    warning the user that this may make callchains unreliable if more
    that the minimal set of registers is needed to unwind.

  yuzhoujian:

  - Add support to collect callchains from kernel or user space only,
    IOW allow setting the perf_event_attr.exclude_callchain_{kernel,user}
    bits from the command line.

perf trace:

  Arnaldo Carvalho de Melo:

  - Remove x86_64 specific syscall numbers from the augmented_raw_syscalls
    BPF in-kernel collector of augmented raw_syscalls:sys_{enter,exit}
    payloads, use instead the syscall numbers obtainer either by the
    arch specific syscalltbl generators or from audit-libs.

  - Allow 'perf trace' to ask for the number of bytes to collect for
    string arguments, for now ask for PATH_MAX, i.e. the whole
    pathnames, which ends up being just a way to speficy which syscall
    args are pathnames and thus should be read using bpf_probe_read_str().

  - Skip unknown syscalls when expanding strace like syscall groups.
    This helps using the 'string' group of syscalls to work in arm64,
    where some of the syscalls present in x86_64 that deal with
    strings, for instance 'access', are deprecated and this should not
    be asked for tracing.

  Leo Yan:

  - Exit when failing to build eBPF program.

perf config:

  Arnaldo Carvalho de Melo:

  - Bail out when a handler returns failure for a key-value pair. This
    helps with cases where processing a key-value pair is not just a
    matter of setting some tool specific knob, involving, for instance
    building a BPF program to then attach to the list of events 'perf
    trace' will use, e.g. augmented_raw_syscalls.c.

perf.data:

  Kan Liang:

  - Read and store die ID information available in new Intel processors
    in CPUID.1F in the CPU topology written in the perf.data header.

perf stat:

  Kan Liang:

  - Support per-die aggregation.

Documentation:

  Arnaldo Carvalho de Melo:

  - Update perf.data documentation about the CPU_TOPOLOGY, MEM_TOPOLOGY,
    CLOCKID and DIR_FORMAT headers.

  Song Liu:

  - Add description of headers HEADER_BPF_PROG_INFO and HEADER_BPF_BTF.

  Leo Yan:

  - Update default value for llvm.clang-bpf-cmd-template in 'man perf-config'.

JVMTI:

  Jiri Olsa:

  - Address gcc string overflow warning for strncpy()

core:

  - Remove superfluous nthreads system_wide setup in perf_evsel__alloc_fd().

Intel PT:

  Adrian Hunter:

  - Add support for samples to contain IPC ratio, collecting cycles
    information from CYC packets, showing the IPC info periodically, because
    Intel PT does not update the cycle count on every branch or instruction,
    the incremental values will often be zero.  When there are values, they
    will be the number of instructions and number of cycles since the last
    update, and thus represent the average IPC since the last IPC value.

    E.g.:

    # perf record --cpu 1 -m200000 -a -e intel_pt/cyc/u sleep 0.0001
    rounding mmap pages size to 1024M (262144 pages)
    [ perf record: Woken up 0 times to write data ]
    [ perf record: Captured and wrote 2.208 MB perf.data ]
    # perf script --insn-trace --xed -F+ipc,-dso,-cpu,-tid
    #
    <SNIP + add line numbering to make sense of IPC counts e.g.: (18/3)>
    1   cc1 63501.650479626: 7f5219ac27bf _int_free+0x3f   jnz 0x7f5219ac2af0       IPC: 0.81 (36/44)
    2   cc1 63501.650479626: 7f5219ac27c5 _int_free+0x45   cmp $0x1f, %rbp
    3   cc1 63501.650479626: 7f5219ac27c9 _int_free+0x49   jbe 0x7f5219ac2b00
    4   cc1 63501.650479626: 7f5219ac27cf _int_free+0x4f   test $0x8, %al
    5   cc1 63501.650479626: 7f5219ac27d1 _int_free+0x51   jnz 0x7f5219ac2b00
    6   cc1 63501.650479626: 7f5219ac27d7 _int_free+0x57   movq  0x13c58a(%rip), %rcx
    7   cc1 63501.650479626: 7f5219ac27de _int_free+0x5e   mov %rdi, %r12
    8   cc1 63501.650479626: 7f5219ac27e1 _int_free+0x61   movq  %fs:(%rcx), %rax
    9   cc1 63501.650479626: 7f5219ac27e5 _int_free+0x65   test %rax, %rax
   10   cc1 63501.650479626: 7f5219ac27e8 _int_free+0x68   jz 0x7f5219ac2821
   11   cc1 63501.650479626: 7f5219ac27ea _int_free+0x6a   leaq  -0x11(%rbp), %rdi
   12   cc1 63501.650479626: 7f5219ac27ee _int_free+0x6e   mov %rdi, %rsi
   13   cc1 63501.650479626: 7f5219ac27f1 _int_free+0x71   shr $0x4, %rsi
   14   cc1 63501.650479626: 7f5219ac27f5 _int_free+0x75   cmpq  %rsi, 0x13caf4(%rip)
   15   cc1 63501.650479626: 7f5219ac27fc _int_free+0x7c   jbe 0x7f5219ac2821
   16   cc1 63501.650479626: 7f5219ac2821 _int_free+0xa1   cmpq  0x13f138(%rip), %rbp
   17   cc1 63501.650479626: 7f5219ac2828 _int_free+0xa8   jnbe 0x7f5219ac28d8
   18   cc1 63501.650479626: 7f5219ac28d8 _int_free+0x158  testb  $0x2, 0x8(%rbx)
   19   cc1 63501.650479628: 7f5219ac28dc _int_free+0x15c  jnz 0x7f5219ac2ab0       IPC: 6.00 (18/3)
    <SNIP>

  - Allow using time ranges with Intel PT, i.e. these features, already
    present but not optimially usable with Intel PT, should be now:

        Select the second 10% time slice:

        $ perf script --time 10%/2

        Select from 0% to 10% time slice:

        $ perf script --time 0%-10%

        Select the first and second 10% time slices:

        $ perf script --time 10%/1,10%/2

        Select from 0% to 10% and 30% to 40% slices:

        $ perf script --time 0%-10%,30%-40%

cs-etm (ARM):

  Mathieu Poirier:

  - Add support for CPU-wide trace scenarios.

s390:

  Thomas Richter:

  - Fix missing kvm module load for s390.

  - Fix OOM error in TUI mode on s390

  - Support s390 diag event display when doing analysis on !s390
    architectures.

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2019-06-17 20:48:14 +02:00
commit 3ce5aceb5d
58 changed files with 3582 additions and 864 deletions

View File

@ -0,0 +1,41 @@
Database Export
===============
perf tool's python scripting engine:
tools/perf/util/scripting-engines/trace-event-python.c
supports scripts:
tools/perf/scripts/python/export-to-sqlite.py
tools/perf/scripts/python/export-to-postgresql.py
which export data to a SQLite3 or PostgreSQL database.
The export process provides records with unique sequential ids which allows the
data to be imported directly to a database and provides the relationships
between tables.
Over time it is possible to continue to expand the export while maintaining
backward and forward compatibility, by following some simple rules:
1. Because of the nature of SQL, existing tables and columns can continue to be
used so long as the names and meanings (and to some extent data types) remain
the same.
2. New tables and columns can be added, without affecting existing SQL queries,
so long as the new names are unique.
3. Scripts that use a database (e.g. exported-sql-viewer.py) can maintain
backward compatibility by testing for the presence of new tables and columns
before using them. e.g. function IsSelectable() in exported-sql-viewer.py
4. The export scripts themselves maintain forward compatibility (i.e. an existing
script will continue to work with new versions of perf) by accepting a variable
number of arguments (e.g. def call_return_table(*x)) i.e. perf can pass more
arguments which old scripts will ignore.
5. The scripting engine tests for the existence of script handler functions
before calling them. The scripting engine can also test for the support of new
or optional features by checking for the existence and value of script global
variables.

View File

@ -103,6 +103,36 @@ The flags are "bcrosyiABEx" which stand for branch, call, return, conditional,
system, asynchronous, interrupt, transaction abort, trace begin, trace end, and
in transaction, respectively.
Another interesting field that is not printed by default is 'ipc' which can be
displayed as follows:
perf script --itrace=be -F+ipc
There are two ways that instructions-per-cycle (IPC) can be calculated depending
on the recording.
If the 'cyc' config term (see config terms section below) was used, then IPC is
calculated using the cycle count from CYC packets, otherwise MTC packets are
used - refer to the 'mtc' config term. When MTC is used, however, the values
are less accurate because the timing is less accurate.
Because Intel PT does not update the cycle count on every branch or instruction,
the values will often be zero. When there are values, they will be the number
of instructions and number of cycles since the last update, and thus represent
the average IPC since the last IPC for that event type. Note IPC for "branches"
events is calculated separately from IPC for "instructions" events.
Also note that the IPC instruction count may or may not include the current
instruction. If the cycle count is associated with an asynchronous branch
(e.g. page fault or interrupt), then the instruction count does not include the
current instruction, otherwise it does. That is consistent with whether or not
that instruction has retired when the cycle count is updated.
Another note, in the case of "branches" events, non-taken branches are not
presently sampled, so IPC values for them do not appear e.g. a CYC packet with a
TNT packet that starts with a non-taken branch. To see every possible IPC
value, "instructions" events can be used e.g. --itrace=i0ns
While it is possible to create scripts to analyze the data, an alternative
approach is available to export the data to a sqlite or postgresql database.
Refer to script export-to-sqlite.py or export-to-postgresql.py for more details,

View File

@ -564,9 +564,12 @@ llvm.*::
llvm.clang-bpf-cmd-template::
Cmdline template. Below lines show its default value. Environment
variable is used to pass options.
"$CLANG_EXEC -D__KERNEL__ $CLANG_OPTIONS $KERNEL_INC_OPTIONS \
-Wno-unused-value -Wno-pointer-sign -working-directory \
$WORKING_DIR -c $CLANG_SOURCE -target bpf -O2 -o -"
"$CLANG_EXEC -D__KERNEL__ -D__NR_CPUS__=$NR_CPUS "\
"-DLINUX_VERSION_CODE=$LINUX_VERSION_CODE " \
"$CLANG_OPTIONS $PERF_BPF_INC_OPTIONS $KERNEL_INC_OPTIONS " \
"-Wno-unused-value -Wno-pointer-sign " \
"-working-directory $WORKING_DIR " \
"-c \"$CLANG_SOURCE\" -target bpf $CLANG_EMIT_LLVM -O2 -o - $LLVM_OPTIONS_PIPE"
llvm.clang-opt::
Options passed to clang.

View File

@ -142,12 +142,14 @@ OPTIONS
perf diff --time 0%-10%,30%-40%
It also supports analyzing samples within a given time window
<start>,<stop>. Times have the format seconds.microseconds. If 'start'
is not given (i.e., time string is ',x.y') then analysis starts at
the beginning of the file. If stop time is not given (i.e, time
string is 'x.y,') then analysis goes to the end of the file. Time string is
'a1.b1,c1.d1:a2.b2,c2.d2'. Use ':' to separate timestamps for different
perf.data files.
<start>,<stop>. Times have the format seconds.nanoseconds. If 'start'
is not given (i.e. time string is ',x.y') then analysis starts at
the beginning of the file. If stop time is not given (i.e. time
string is 'x.y,') then analysis goes to the end of the file.
Multiple ranges can be separated by spaces, which requires the argument
to be quoted e.g. --time "1234.567,1234.789 1235,"
Time string is'a1.b1,c1.d1:a2.b2,c2.d2'. Use ':' to separate timestamps
for different perf.data files.
For example, we get the timestamp information from 'perf script'.

View File

@ -490,6 +490,17 @@ Configure all used events to run in kernel space.
--all-user::
Configure all used events to run in user space.
--kernel-callchains::
Collect callchains only from kernel space. I.e. this option sets
perf_event_attr.exclude_callchain_user to 1.
--user-callchains::
Collect callchains only from user space. I.e. this option sets
perf_event_attr.exclude_callchain_kernel to 1.
Don't use both --kernel-callchains and --user-callchains at the same time or no
callchains will be collected.
--timestamp-filename
Append timestamp to output file name.

View File

@ -412,12 +412,13 @@ OPTIONS
--time::
Only analyze samples within given time window: <start>,<stop>. Times
have the format seconds.microseconds. If start is not given (i.e., time
have the format seconds.nanoseconds. If start is not given (i.e. time
string is ',x.y') then analysis starts at the beginning of the file. If
stop time is not given (i.e, time string is 'x.y,') then analysis goes
to end of file.
stop time is not given (i.e. time string is 'x.y,') then analysis goes
to end of file. Multiple ranges can be separated by spaces, which
requires the argument to be quoted e.g. --time "1234.567,1234.789 1235,"
Also support time percent with multiple time range. Time string is
Also support time percent with multiple time ranges. Time string is
'a%/n,b%/m,...' or 'a%-b%,c%-%d,...'.
For example:

View File

@ -117,7 +117,7 @@ OPTIONS
Comma separated list of fields to print. Options are:
comm, tid, pid, time, cpu, event, trace, ip, sym, dso, addr, symoff,
srcline, period, iregs, uregs, brstack, brstacksym, flags, bpf-output, brstackinsn,
brstackoff, callindent, insn, insnlen, synth, phys_addr, metric, misc, srccode.
brstackoff, callindent, insn, insnlen, synth, phys_addr, metric, misc, srccode, ipc.
Field list can be prepended with the type, trace, sw or hw,
to indicate to which event type the field list applies.
e.g., -F sw:comm,tid,time,ip,sym and -F trace:time,cpu,trace
@ -203,6 +203,9 @@ OPTIONS
The synth field is used by synthesized events which may be created when
Instruction Trace decoding.
The ipc (instructions per cycle) field is synthesized and may have a value when
Instruction Trace decoding.
Finally, a user may not set fields to none for all event types.
i.e., -F "" is not allowed.
@ -358,12 +361,13 @@ include::itrace.txt[]
--time::
Only analyze samples within given time window: <start>,<stop>. Times
have the format seconds.microseconds. If start is not given (i.e., time
have the format seconds.nanoseconds. If start is not given (i.e. time
string is ',x.y') then analysis starts at the beginning of the file. If
stop time is not given (i.e, time string is 'x.y,') then analysis goes
to end of file.
stop time is not given (i.e. time string is 'x.y,') then analysis goes
to end of file. Multiple ranges can be separated by spaces, which
requires the argument to be quoted e.g. --time "1234.567,1234.789 1235,"
Also support time percent with multipe time range. Time string is
Also support time percent with multiple time ranges. Time string is
'a%/n,b%/m,...' or 'a%-b%,c%-%d,...'.
For example:

View File

@ -200,6 +200,13 @@ use --per-socket in addition to -a. (system-wide). The output includes the
socket number and the number of online processors on that socket. This is
useful to gauge the amount of aggregation.
--per-die::
Aggregate counts per processor die for system-wide mode measurements. This
is a useful mode to detect imbalance between dies. To enable this mode,
use --per-die in addition to -a. (system-wide). The output includes the
die number and the number of online processors on that die. This is
useful to gauge the amount of aggregation.
--per-core::
Aggregate counts per physical processor for system-wide mode measurements. This
is a useful mode to detect imbalance between physical cores. To enable this mode,
@ -239,6 +246,9 @@ Input file name.
--per-socket::
Aggregate counts per processor socket for system-wide mode measurements.
--per-die::
Aggregate counts per processor die for system-wide mode measurements.
--per-core::
Aggregate counts per physical processor for system-wide mode measurements.

View File

@ -151,25 +151,45 @@ struct {
HEADER_CPU_TOPOLOGY = 13,
String lists defining the core and CPU threads topology.
The string lists are followed by a variable length array
which contains core_id and socket_id of each cpu.
The number of entries can be determined by the size of the
section minus the sizes of both string lists.
struct {
/*
* First revision of HEADER_CPU_TOPOLOGY
*
* See 'struct perf_header_string_list' definition earlier
* in this file.
*/
struct perf_header_string_list cores; /* Variable length */
struct perf_header_string_list threads; /* Variable length */
/*
* Second revision of HEADER_CPU_TOPOLOGY, older tools
* will not consider what comes next
*/
struct {
uint32_t core_id;
uint32_t socket_id;
} cpus[nr]; /* Variable length records */
/* 'nr' comes from previously processed HEADER_NRCPUS's nr_cpu_avail */
/*
* Third revision of HEADER_CPU_TOPOLOGY, older tools
* will not consider what comes next
*/
struct perf_header_string_list dies; /* Variable length */
uint32_t die_id[nr_cpus_avail]; /* from previously processed HEADER_NR_CPUS, VLA */
};
Example:
sibling cores : 0-3
sibling sockets : 0-8
sibling dies : 0-3
sibling dies : 4-7
sibling threads : 0-1
sibling threads : 2-3
sibling threads : 4-5
sibling threads : 6-7
HEADER_NUMA_TOPOLOGY = 14,
@ -272,6 +292,69 @@ struct {
Two uint64_t for the time of first sample and the time of last sample.
HEADER_SAMPLE_TOPOLOGY = 22,
Physical memory map and its node assignments.
The format of data in MEM_TOPOLOGY is as follows:
0 - version | for future changes
8 - block_size_bytes | /sys/devices/system/memory/block_size_bytes
16 - count | number of nodes
For each node we store map of physical indexes:
32 - node id | node index
40 - size | size of bitmap
48 - bitmap | bitmap of memory indexes that belongs to node
| /sys/devices/system/node/node<NODE>/memory<INDEX>
The MEM_TOPOLOGY can be displayed with following command:
$ perf report --header-only -I
...
# memory nodes (nr 1, block size 0x8000000):
# 0 [7G]: 0-23,32-69
HEADER_CLOCKID = 23,
One uint64_t for the clockid frequency, specified, for instance, via 'perf
record -k' (see clock_gettime()), to enable timestamps derived metrics
conversion into wall clock time on the reporting stage.
HEADER_DIR_FORMAT = 24,
The data files layout is described by HEADER_DIR_FORMAT feature. Currently it
holds only version number (1):
uint64_t version;
The current version holds only version value (1) means that data files:
- Follow the 'data.*' name format.
- Contain raw events data in standard perf format as read from kernel (and need
to be sorted)
Future versions are expected to describe different data files layout according
to special needs.
HEADER_BPF_PROG_INFO = 25,
struct bpf_prog_info_linear, which contains detailed information about
a BPF program, including type, id, tag, jited/xlated instructions, etc.
HEADER_BPF_BTF = 26,
Contains BPF Type Format (BTF). For more information about BTF, please
refer to Documentation/bpf/btf.rst.
struct {
u32 id;
u32 data_size;
char data[];
};
HEADER_COMPRESSED = 27,
struct {

View File

@ -413,6 +413,9 @@ ifdef CORESIGHT
$(call feature_check,libopencsd)
ifeq ($(feature-libopencsd), 1)
CFLAGS += -DHAVE_CSTRACE_SUPPORT $(LIBOPENCSD_CFLAGS)
ifeq ($(feature-reallocarray), 0)
CFLAGS += -DCOMPAT_NEED_REALLOCARRAY
endif
LDFLAGS += $(LIBOPENCSD_LDFLAGS)
EXTLIBS += $(OPENCSDLIBS)
$(call detected,CONFIG_LIBOPENCSD)

View File

@ -31,12 +31,159 @@ struct cs_etm_recording {
struct auxtrace_record itr;
struct perf_pmu *cs_etm_pmu;
struct perf_evlist *evlist;
int wrapped_cnt;
bool *wrapped;
bool snapshot_mode;
size_t snapshot_size;
};
static const char *metadata_etmv3_ro[CS_ETM_PRIV_MAX] = {
[CS_ETM_ETMCCER] = "mgmt/etmccer",
[CS_ETM_ETMIDR] = "mgmt/etmidr",
};
static const char *metadata_etmv4_ro[CS_ETMV4_PRIV_MAX] = {
[CS_ETMV4_TRCIDR0] = "trcidr/trcidr0",
[CS_ETMV4_TRCIDR1] = "trcidr/trcidr1",
[CS_ETMV4_TRCIDR2] = "trcidr/trcidr2",
[CS_ETMV4_TRCIDR8] = "trcidr/trcidr8",
[CS_ETMV4_TRCAUTHSTATUS] = "mgmt/trcauthstatus",
};
static bool cs_etm_is_etmv4(struct auxtrace_record *itr, int cpu);
static int cs_etm_set_context_id(struct auxtrace_record *itr,
struct perf_evsel *evsel, int cpu)
{
struct cs_etm_recording *ptr;
struct perf_pmu *cs_etm_pmu;
char path[PATH_MAX];
int err = -EINVAL;
u32 val;
ptr = container_of(itr, struct cs_etm_recording, itr);
cs_etm_pmu = ptr->cs_etm_pmu;
if (!cs_etm_is_etmv4(itr, cpu))
goto out;
/* Get a handle on TRCIRD2 */
snprintf(path, PATH_MAX, "cpu%d/%s",
cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR2]);
err = perf_pmu__scan_file(cs_etm_pmu, path, "%x", &val);
/* There was a problem reading the file, bailing out */
if (err != 1) {
pr_err("%s: can't read file %s\n",
CORESIGHT_ETM_PMU_NAME, path);
goto out;
}
/*
* TRCIDR2.CIDSIZE, bit [9-5], indicates whether contextID tracing
* is supported:
* 0b00000 Context ID tracing is not supported.
* 0b00100 Maximum of 32-bit Context ID size.
* All other values are reserved.
*/
val = BMVAL(val, 5, 9);
if (!val || val != 0x4) {
err = -EINVAL;
goto out;
}
/* All good, let the kernel know */
evsel->attr.config |= (1 << ETM_OPT_CTXTID);
err = 0;
out:
return err;
}
static int cs_etm_set_timestamp(struct auxtrace_record *itr,
struct perf_evsel *evsel, int cpu)
{
struct cs_etm_recording *ptr;
struct perf_pmu *cs_etm_pmu;
char path[PATH_MAX];
int err = -EINVAL;
u32 val;
ptr = container_of(itr, struct cs_etm_recording, itr);
cs_etm_pmu = ptr->cs_etm_pmu;
if (!cs_etm_is_etmv4(itr, cpu))
goto out;
/* Get a handle on TRCIRD0 */
snprintf(path, PATH_MAX, "cpu%d/%s",
cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0]);
err = perf_pmu__scan_file(cs_etm_pmu, path, "%x", &val);
/* There was a problem reading the file, bailing out */
if (err != 1) {
pr_err("%s: can't read file %s\n",
CORESIGHT_ETM_PMU_NAME, path);
goto out;
}
/*
* TRCIDR0.TSSIZE, bit [28-24], indicates whether global timestamping
* is supported:
* 0b00000 Global timestamping is not implemented
* 0b00110 Implementation supports a maximum timestamp of 48bits.
* 0b01000 Implementation supports a maximum timestamp of 64bits.
*/
val &= GENMASK(28, 24);
if (!val) {
err = -EINVAL;
goto out;
}
/* All good, let the kernel know */
evsel->attr.config |= (1 << ETM_OPT_TS);
err = 0;
out:
return err;
}
static int cs_etm_set_option(struct auxtrace_record *itr,
struct perf_evsel *evsel, u32 option)
{
int i, err = -EINVAL;
struct cpu_map *event_cpus = evsel->evlist->cpus;
struct cpu_map *online_cpus = cpu_map__new(NULL);
/* Set option of each CPU we have */
for (i = 0; i < cpu__max_cpu(); i++) {
if (!cpu_map__has(event_cpus, i) ||
!cpu_map__has(online_cpus, i))
continue;
switch (option) {
case ETM_OPT_CTXTID:
err = cs_etm_set_context_id(itr, evsel, i);
if (err)
goto out;
break;
case ETM_OPT_TS:
err = cs_etm_set_timestamp(itr, evsel, i);
if (err)
goto out;
break;
default:
goto out;
}
}
err = 0;
out:
cpu_map__put(online_cpus);
return err;
}
static int cs_etm_parse_snapshot_options(struct auxtrace_record *itr,
struct record_opts *opts,
const char *str)
@ -105,12 +252,16 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
container_of(itr, struct cs_etm_recording, itr);
struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
struct perf_evsel *evsel, *cs_etm_evsel = NULL;
const struct cpu_map *cpus = evlist->cpus;
struct cpu_map *cpus = evlist->cpus;
bool privileged = (geteuid() == 0 || perf_event_paranoid() < 0);
int err = 0;
ptr->evlist = evlist;
ptr->snapshot_mode = opts->auxtrace_snapshot_mode;
if (perf_can_record_switch_events())
opts->record_switch_events = true;
evlist__for_each_entry(evlist, evsel) {
if (evsel->attr.type == cs_etm_pmu->type) {
if (cs_etm_evsel) {
@ -241,19 +392,28 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
/*
* In the case of per-cpu mmaps, we need the CPU on the
* AUX event.
* AUX event. We also need the contextID in order to be notified
* when a context switch happened.
*/
if (!cpu_map__empty(cpus))
if (!cpu_map__empty(cpus)) {
perf_evsel__set_sample_bit(cs_etm_evsel, CPU);
err = cs_etm_set_option(itr, cs_etm_evsel, ETM_OPT_CTXTID);
if (err)
goto out;
err = cs_etm_set_option(itr, cs_etm_evsel, ETM_OPT_TS);
if (err)
goto out;
}
/* Add dummy event to keep tracking */
if (opts->full_auxtrace) {
struct perf_evsel *tracking_evsel;
int err;
err = parse_events(evlist, "dummy:u", NULL);
if (err)
return err;
goto out;
tracking_evsel = perf_evlist__last(evlist);
perf_evlist__set_tracking_event(evlist, tracking_evsel);
@ -266,7 +426,8 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
perf_evsel__set_sample_bit(tracking_evsel, TIME);
}
return 0;
out:
return err;
}
static u64 cs_etm_get_config(struct auxtrace_record *itr)
@ -314,6 +475,8 @@ static u64 cs_etmv4_get_config(struct auxtrace_record *itr)
config_opts = cs_etm_get_config(itr);
if (config_opts & BIT(ETM_OPT_CYCACC))
config |= BIT(ETM4_CFG_BIT_CYCACC);
if (config_opts & BIT(ETM_OPT_CTXTID))
config |= BIT(ETM4_CFG_BIT_CTXTID);
if (config_opts & BIT(ETM_OPT_TS))
config |= BIT(ETM4_CFG_BIT_TS);
if (config_opts & BIT(ETM_OPT_RETSTK))
@ -363,19 +526,6 @@ cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused,
(etmv3 * CS_ETMV3_PRIV_SIZE));
}
static const char *metadata_etmv3_ro[CS_ETM_PRIV_MAX] = {
[CS_ETM_ETMCCER] = "mgmt/etmccer",
[CS_ETM_ETMIDR] = "mgmt/etmidr",
};
static const char *metadata_etmv4_ro[CS_ETMV4_PRIV_MAX] = {
[CS_ETMV4_TRCIDR0] = "trcidr/trcidr0",
[CS_ETMV4_TRCIDR1] = "trcidr/trcidr1",
[CS_ETMV4_TRCIDR2] = "trcidr/trcidr2",
[CS_ETMV4_TRCIDR8] = "trcidr/trcidr8",
[CS_ETMV4_TRCAUTHSTATUS] = "mgmt/trcauthstatus",
};
static bool cs_etm_is_etmv4(struct auxtrace_record *itr, int cpu)
{
bool ret = false;
@ -536,16 +686,131 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
return 0;
}
static int cs_etm_find_snapshot(struct auxtrace_record *itr __maybe_unused,
static int cs_etm_alloc_wrapped_array(struct cs_etm_recording *ptr, int idx)
{
bool *wrapped;
int cnt = ptr->wrapped_cnt;
/* Make @ptr->wrapped as big as @idx */
while (cnt <= idx)
cnt++;
/*
* Free'ed in cs_etm_recording_free(). Using realloc() to avoid
* cross compilation problems where the host's system supports
* reallocarray() but not the target.
*/
wrapped = realloc(ptr->wrapped, cnt * sizeof(bool));
if (!wrapped)
return -ENOMEM;
wrapped[cnt - 1] = false;
ptr->wrapped_cnt = cnt;
ptr->wrapped = wrapped;
return 0;
}
static bool cs_etm_buffer_has_wrapped(unsigned char *buffer,
size_t buffer_size, u64 head)
{
u64 i, watermark;
u64 *buf = (u64 *)buffer;
size_t buf_size = buffer_size;
/*
* We want to look the very last 512 byte (chosen arbitrarily) in
* the ring buffer.
*/
watermark = buf_size - 512;
/*
* @head is continuously increasing - if its value is equal or greater
* than the size of the ring buffer, it has wrapped around.
*/
if (head >= buffer_size)
return true;
/*
* The value of @head is somewhere within the size of the ring buffer.
* This can be that there hasn't been enough data to fill the ring
* buffer yet or the trace time was so long that @head has numerically
* wrapped around. To find we need to check if we have data at the very
* end of the ring buffer. We can reliably do this because mmap'ed
* pages are zeroed out and there is a fresh mapping with every new
* session.
*/
/* @head is less than 512 byte from the end of the ring buffer */
if (head > watermark)
watermark = head;
/*
* Speed things up by using 64 bit transactions (see "u64 *buf" above)
*/
watermark >>= 3;
buf_size >>= 3;
/*
* If we find trace data at the end of the ring buffer, @head has
* been there and has numerically wrapped around at least once.
*/
for (i = watermark; i < buf_size; i++)
if (buf[i])
return true;
return false;
}
static int cs_etm_find_snapshot(struct auxtrace_record *itr,
int idx, struct auxtrace_mmap *mm,
unsigned char *data __maybe_unused,
unsigned char *data,
u64 *head, u64 *old)
{
int err;
bool wrapped;
struct cs_etm_recording *ptr =
container_of(itr, struct cs_etm_recording, itr);
/*
* Allocate memory to keep track of wrapping if this is the first
* time we deal with this *mm.
*/
if (idx >= ptr->wrapped_cnt) {
err = cs_etm_alloc_wrapped_array(ptr, idx);
if (err)
return err;
}
/*
* Check to see if *head has wrapped around. If it hasn't only the
* amount of data between *head and *old is snapshot'ed to avoid
* bloating the perf.data file with zeros. But as soon as *head has
* wrapped around the entire size of the AUX ring buffer it taken.
*/
wrapped = ptr->wrapped[idx];
if (!wrapped && cs_etm_buffer_has_wrapped(data, mm->len, *head)) {
wrapped = true;
ptr->wrapped[idx] = true;
}
pr_debug3("%s: mmap index %d old head %zu new head %zu size %zu\n",
__func__, idx, (size_t)*old, (size_t)*head, mm->len);
*old = *head;
*head += mm->len;
/* No wrap has occurred, we can just use *head and *old. */
if (!wrapped)
return 0;
/*
* *head has wrapped around - adjust *head and *old to pickup the
* entire content of the AUX buffer.
*/
if (*head >= mm->len) {
*old = *head - mm->len;
} else {
*head += mm->len;
*old = *head - mm->len;
}
return 0;
}
@ -586,6 +851,8 @@ static void cs_etm_recording_free(struct auxtrace_record *itr)
{
struct cs_etm_recording *ptr =
container_of(itr, struct cs_etm_recording, itr);
zfree(&ptr->wrapped);
free(ptr);
}

View File

@ -2191,6 +2191,10 @@ static struct option __record_options[] = {
OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
"Configure all used events to run in user space.",
PARSE_OPT_EXCLUSIVE),
OPT_BOOLEAN(0, "kernel-callchains", &record.opts.kernel_callchains,
"collect kernel callchains"),
OPT_BOOLEAN(0, "user-callchains", &record.opts.user_callchains,
"collect user callchains"),
OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
"clang binary to use for compiling BPF scriptlets"),
OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",

View File

@ -1428,6 +1428,10 @@ repeat:
&report.range_num);
if (ret < 0)
goto error;
itrace_synth_opts__set_time_range(&itrace_synth_opts,
report.ptime_range,
report.range_num);
}
if (session->tevent.pevent &&
@ -1449,8 +1453,10 @@ repeat:
ret = 0;
error:
if (report.ptime_range)
if (report.ptime_range) {
itrace_synth_opts__clear_time_range(&itrace_synth_opts);
zfree(&report.ptime_range);
}
zstd_fini(&(session->zstd_data));
perf_session__delete(session);
return ret;

View File

@ -102,6 +102,7 @@ enum perf_output_field {
PERF_OUTPUT_METRIC = 1U << 28,
PERF_OUTPUT_MISC = 1U << 29,
PERF_OUTPUT_SRCCODE = 1U << 30,
PERF_OUTPUT_IPC = 1U << 31,
};
struct output_option {
@ -139,6 +140,7 @@ struct output_option {
{.str = "metric", .field = PERF_OUTPUT_METRIC},
{.str = "misc", .field = PERF_OUTPUT_MISC},
{.str = "srccode", .field = PERF_OUTPUT_SRCCODE},
{.str = "ipc", .field = PERF_OUTPUT_IPC},
};
enum {
@ -1268,6 +1270,20 @@ static int perf_sample__fprintf_insn(struct perf_sample *sample,
return printed;
}
static int perf_sample__fprintf_ipc(struct perf_sample *sample,
struct perf_event_attr *attr, FILE *fp)
{
unsigned int ipc;
if (!PRINT_FIELD(IPC) || !sample->cyc_cnt || !sample->insn_cnt)
return 0;
ipc = (sample->insn_cnt * 100) / sample->cyc_cnt;
return fprintf(fp, " \t IPC: %u.%02u (%" PRIu64 "/%" PRIu64 ") ",
ipc / 100, ipc % 100, sample->insn_cnt, sample->cyc_cnt);
}
static int perf_sample__fprintf_bts(struct perf_sample *sample,
struct perf_evsel *evsel,
struct thread *thread,
@ -1312,6 +1328,8 @@ static int perf_sample__fprintf_bts(struct perf_sample *sample,
printed += perf_sample__fprintf_addr(sample, thread, attr, fp);
}
printed += perf_sample__fprintf_ipc(sample, attr, fp);
if (print_srcline_last)
printed += map__fprintf_srcline(al->map, al->addr, "\n ", fp);
@ -1859,6 +1877,9 @@ static void process_event(struct perf_script *script,
if (PRINT_FIELD(PHYS_ADDR))
fprintf(fp, "%16" PRIx64, sample->phys_addr);
perf_sample__fprintf_ipc(sample, attr, fp);
fprintf(fp, "\n");
if (PRINT_FIELD(SRCCODE)) {
@ -3433,7 +3454,7 @@ int cmd_script(int argc, const char **argv)
"Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso,"
"addr,symoff,srcline,period,iregs,uregs,brstack,"
"brstacksym,flags,bpf-output,brstackinsn,brstackoff,"
"callindent,insn,insnlen,synth,phys_addr,metric,misc",
"callindent,insn,insnlen,synth,phys_addr,metric,misc,ipc",
parse_output_fields),
OPT_BOOLEAN('a', "all-cpus", &system_wide,
"system-wide collection from all CPUs"),
@ -3808,6 +3829,10 @@ int cmd_script(int argc, const char **argv)
&script.range_num);
if (err < 0)
goto out_delete;
itrace_synth_opts__set_time_range(&itrace_synth_opts,
script.ptime_range,
script.range_num);
}
err = __cmd_script(&script);
@ -3815,8 +3840,10 @@ int cmd_script(int argc, const char **argv)
flush_scripting();
out_delete:
if (script.ptime_range)
if (script.ptime_range) {
itrace_synth_opts__clear_time_range(&itrace_synth_opts);
zfree(&script.ptime_range);
}
perf_evlist__free_stats(session->evlist);
perf_session__delete(session);

View File

@ -776,6 +776,8 @@ static struct option stat_options[] = {
"stop workload and print counts after a timeout period in ms (>= 10ms)"),
OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode,
"aggregate counts per processor socket", AGGR_SOCKET),
OPT_SET_UINT(0, "per-die", &stat_config.aggr_mode,
"aggregate counts per processor die", AGGR_DIE),
OPT_SET_UINT(0, "per-core", &stat_config.aggr_mode,
"aggregate counts per physical processor core", AGGR_CORE),
OPT_SET_UINT(0, "per-thread", &stat_config.aggr_mode,
@ -800,6 +802,12 @@ static int perf_stat__get_socket(struct perf_stat_config *config __maybe_unused,
return cpu_map__get_socket(map, cpu, NULL);
}
static int perf_stat__get_die(struct perf_stat_config *config __maybe_unused,
struct cpu_map *map, int cpu)
{
return cpu_map__get_die(map, cpu, NULL);
}
static int perf_stat__get_core(struct perf_stat_config *config __maybe_unused,
struct cpu_map *map, int cpu)
{
@ -840,6 +848,12 @@ static int perf_stat__get_socket_cached(struct perf_stat_config *config,
return perf_stat__get_aggr(config, perf_stat__get_socket, map, idx);
}
static int perf_stat__get_die_cached(struct perf_stat_config *config,
struct cpu_map *map, int idx)
{
return perf_stat__get_aggr(config, perf_stat__get_die, map, idx);
}
static int perf_stat__get_core_cached(struct perf_stat_config *config,
struct cpu_map *map, int idx)
{
@ -870,6 +884,13 @@ static int perf_stat_init_aggr_mode(void)
}
stat_config.aggr_get_id = perf_stat__get_socket_cached;
break;
case AGGR_DIE:
if (cpu_map__build_die_map(evsel_list->cpus, &stat_config.aggr_map)) {
perror("cannot build die map");
return -1;
}
stat_config.aggr_get_id = perf_stat__get_die_cached;
break;
case AGGR_CORE:
if (cpu_map__build_core_map(evsel_list->cpus, &stat_config.aggr_map)) {
perror("cannot build core map");
@ -935,21 +956,55 @@ static int perf_env__get_socket(struct cpu_map *map, int idx, void *data)
return cpu == -1 ? -1 : env->cpu[cpu].socket_id;
}
static int perf_env__get_die(struct cpu_map *map, int idx, void *data)
{
struct perf_env *env = data;
int die_id = -1, cpu = perf_env__get_cpu(env, map, idx);
if (cpu != -1) {
/*
* Encode socket in bit range 15:8
* die_id is relative to socket,
* we need a global id. So we combine
* socket + die id
*/
if (WARN_ONCE(env->cpu[cpu].socket_id >> 8, "The socket id number is too big.\n"))
return -1;
if (WARN_ONCE(env->cpu[cpu].die_id >> 8, "The die id number is too big.\n"))
return -1;
die_id = (env->cpu[cpu].socket_id << 8) | (env->cpu[cpu].die_id & 0xff);
}
return die_id;
}
static int perf_env__get_core(struct cpu_map *map, int idx, void *data)
{
struct perf_env *env = data;
int core = -1, cpu = perf_env__get_cpu(env, map, idx);
if (cpu != -1) {
int socket_id = env->cpu[cpu].socket_id;
/*
* Encode socket in upper 16 bits
* core_id is relative to socket, and
* Encode socket in bit range 31:24
* encode die id in bit range 23:16
* core_id is relative to socket and die,
* we need a global id. So we combine
* socket + core id.
* socket + die id + core id
*/
core = (socket_id << 16) | (env->cpu[cpu].core_id & 0xffff);
if (WARN_ONCE(env->cpu[cpu].socket_id >> 8, "The socket id number is too big.\n"))
return -1;
if (WARN_ONCE(env->cpu[cpu].die_id >> 8, "The die id number is too big.\n"))
return -1;
if (WARN_ONCE(env->cpu[cpu].core_id >> 16, "The core id number is too big.\n"))
return -1;
core = (env->cpu[cpu].socket_id << 24) |
(env->cpu[cpu].die_id << 16) |
(env->cpu[cpu].core_id & 0xffff);
}
return core;
@ -961,6 +1016,12 @@ static int perf_env__build_socket_map(struct perf_env *env, struct cpu_map *cpus
return cpu_map__build_map(cpus, sockp, perf_env__get_socket, env);
}
static int perf_env__build_die_map(struct perf_env *env, struct cpu_map *cpus,
struct cpu_map **diep)
{
return cpu_map__build_map(cpus, diep, perf_env__get_die, env);
}
static int perf_env__build_core_map(struct perf_env *env, struct cpu_map *cpus,
struct cpu_map **corep)
{
@ -972,6 +1033,11 @@ static int perf_stat__get_socket_file(struct perf_stat_config *config __maybe_un
{
return perf_env__get_socket(map, idx, &perf_stat.session->header.env);
}
static int perf_stat__get_die_file(struct perf_stat_config *config __maybe_unused,
struct cpu_map *map, int idx)
{
return perf_env__get_die(map, idx, &perf_stat.session->header.env);
}
static int perf_stat__get_core_file(struct perf_stat_config *config __maybe_unused,
struct cpu_map *map, int idx)
@ -991,6 +1057,13 @@ static int perf_stat_init_aggr_mode_file(struct perf_stat *st)
}
stat_config.aggr_get_id = perf_stat__get_socket_file;
break;
case AGGR_DIE:
if (perf_env__build_die_map(env, evsel_list->cpus, &stat_config.aggr_map)) {
perror("cannot build die map");
return -1;
}
stat_config.aggr_get_id = perf_stat__get_die_file;
break;
case AGGR_CORE:
if (perf_env__build_core_map(env, evsel_list->cpus, &stat_config.aggr_map)) {
perror("cannot build core map");
@ -1541,6 +1614,8 @@ static int __cmd_report(int argc, const char **argv)
OPT_STRING('i', "input", &input_name, "file", "input file name"),
OPT_SET_UINT(0, "per-socket", &perf_stat.aggr_mode,
"aggregate counts per processor socket", AGGR_SOCKET),
OPT_SET_UINT(0, "per-die", &perf_stat.aggr_mode,
"aggregate counts per processor die", AGGR_DIE),
OPT_SET_UINT(0, "per-core", &perf_stat.aggr_mode,
"aggregate counts per physical processor core", AGGR_CORE),
OPT_SET_UINT('A', "no-aggr", &perf_stat.aggr_mode,

View File

@ -971,8 +971,14 @@ struct syscall {
struct syscall_arg_fmt *arg_fmt;
};
/*
* Must match what is in the BPF program:
*
* tools/perf/examples/bpf/augmented_raw_syscalls.c
*/
struct bpf_map_syscall_entry {
bool enabled;
u16 string_args_len[6];
};
/*
@ -1226,8 +1232,17 @@ static void thread__set_filename_pos(struct thread *thread, const char *bf,
static size_t syscall_arg__scnprintf_augmented_string(struct syscall_arg *arg, char *bf, size_t size)
{
struct augmented_arg *augmented_arg = arg->augmented.args;
size_t printed = scnprintf(bf, size, "\"%.*s\"", augmented_arg->size, augmented_arg->value);
/*
* So that the next arg with a payload can consume its augmented arg, i.e. for rename* syscalls
* we would have two strings, each prefixed by its size.
*/
int consumed = sizeof(*augmented_arg) + augmented_arg->size;
return scnprintf(bf, size, "\"%.*s\"", augmented_arg->size, augmented_arg->value);
arg->augmented.args += consumed;
arg->augmented.size -= consumed;
return printed;
}
static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
@ -1415,10 +1430,11 @@ static int syscall__set_arg_fmts(struct syscall *sc)
if (sc->fmt && sc->fmt->arg[idx].scnprintf)
continue;
len = strlen(field->name);
if (strcmp(field->type, "const char *") == 0 &&
(strcmp(field->name, "filename") == 0 ||
strcmp(field->name, "path") == 0 ||
strcmp(field->name, "pathname") == 0))
((len >= 4 && strcmp(field->name + len - 4, "name") == 0) ||
strstr(field->name, "path") != NULL))
sc->arg_fmt[idx].scnprintf = SCA_FILENAME;
else if ((field->flags & TEP_FIELD_IS_POINTER) || strstr(field->name, "addr"))
sc->arg_fmt[idx].scnprintf = SCA_PTR;
@ -1429,8 +1445,7 @@ static int syscall__set_arg_fmts(struct syscall *sc)
else if ((strcmp(field->type, "int") == 0 ||
strcmp(field->type, "unsigned int") == 0 ||
strcmp(field->type, "long") == 0) &&
(len = strlen(field->name)) >= 2 &&
strcmp(field->name + len - 2, "fd") == 0) {
len >= 2 && strcmp(field->name + len - 2, "fd") == 0) {
/*
* /sys/kernel/tracing/events/syscalls/sys_enter*
* egrep 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c
@ -1513,6 +1528,7 @@ static int trace__read_syscall_info(struct trace *trace, int id)
static int trace__validate_ev_qualifier(struct trace *trace)
{
int err = 0, i;
bool printed_invalid_prefix = false;
size_t nr_allocated;
struct str_node *pos;
@ -1539,14 +1555,15 @@ static int trace__validate_ev_qualifier(struct trace *trace)
if (id >= 0)
goto matches;
if (err == 0) {
fputs("Error:\tInvalid syscall ", trace->output);
err = -EINVAL;
if (!printed_invalid_prefix) {
pr_debug("Skipping unknown syscalls: ");
printed_invalid_prefix = true;
} else {
fputs(", ", trace->output);
pr_debug(", ");
}
fputs(sc, trace->output);
pr_debug("%s", sc);
continue;
}
matches:
trace->ev_qualifier_ids.entries[i++] = id;
@ -1575,15 +1592,14 @@ matches:
}
}
if (err < 0) {
fputs("\nHint:\ttry 'perf list syscalls:sys_enter_*'"
"\nHint:\tand: 'man syscalls'\n", trace->output);
out_free:
zfree(&trace->ev_qualifier_ids.entries);
trace->ev_qualifier_ids.nr = 0;
}
out:
if (printed_invalid_prefix)
pr_debug("\n");
return err;
out_free:
zfree(&trace->ev_qualifier_ids.entries);
trace->ev_qualifier_ids.nr = 0;
goto out;
}
/*
@ -2710,6 +2726,25 @@ out_enomem:
}
#ifdef HAVE_LIBBPF_SUPPORT
static void trace__init_bpf_map_syscall_args(struct trace *trace, int id, struct bpf_map_syscall_entry *entry)
{
struct syscall *sc = trace__syscall_info(trace, NULL, id);
int arg = 0;
if (sc == NULL)
goto out;
for (; arg < sc->nr_args; ++arg) {
entry->string_args_len[arg] = 0;
if (sc->arg_fmt[arg].scnprintf == SCA_FILENAME) {
/* Should be set like strace -s strsize */
entry->string_args_len[arg] = PATH_MAX;
}
}
out:
for (; arg < 6; ++arg)
entry->string_args_len[arg] = 0;
}
static int trace__set_ev_qualifier_bpf_filter(struct trace *trace)
{
int fd = bpf_map__fd(trace->syscalls.map);
@ -2722,6 +2757,9 @@ static int trace__set_ev_qualifier_bpf_filter(struct trace *trace)
for (i = 0; i < trace->ev_qualifier_ids.nr; ++i) {
int key = trace->ev_qualifier_ids.entries[i];
if (value.enabled)
trace__init_bpf_map_syscall_args(trace, key, &value);
err = bpf_map_update_elem(fd, &key, &value, BPF_EXIST);
if (err)
break;
@ -2739,6 +2777,9 @@ static int __trace__init_syscalls_bpf_map(struct trace *trace, bool enabled)
int err = 0, key;
for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
if (enabled)
trace__init_bpf_map_syscall_args(trace, key, &value);
err = bpf_map_update_elem(fd, &key, &value, BPF_ANY);
if (err)
break;
@ -3662,7 +3703,12 @@ static int trace__config(const char *var, const char *value, void *arg)
struct option o = OPT_CALLBACK('e', "event", &trace->evlist, "event",
"event selector. use 'perf list' to list available events",
parse_events_option);
err = parse_events_option(&o, value, 0);
/*
* We can't propagate parse_event_option() return, as it is 1
* for failure while perf_config() expects -1.
*/
if (parse_events_option(&o, value, 0))
err = -1;
} else if (!strcmp(var, "trace.show_timestamp")) {
trace->show_tstamp = perf_config_bool(var, value);
} else if (!strcmp(var, "trace.show_duration")) {

View File

@ -21,8 +21,14 @@
/* bpf-output associated map */
bpf_map(__augmented_syscalls__, PERF_EVENT_ARRAY, int, u32, __NR_CPUS__);
/*
* string_args_len: one per syscall arg, 0 means not a string or don't copy it,
* PATH_MAX for copying everything, any other value to limit
* it a la 'strace -s strsize'.
*/
struct syscall {
bool enabled;
u16 string_args_len[6];
};
bpf_map(syscalls, ARRAY, int, struct syscall, 512);
@ -41,85 +47,10 @@ struct syscall_exit_args {
struct augmented_filename {
unsigned int size;
int reserved;
int err;
char value[PATH_MAX];
};
/* syscalls where the first arg is a string */
#define SYS_OPEN 2
#define SYS_STAT 4
#define SYS_LSTAT 6
#define SYS_ACCESS 21
#define SYS_EXECVE 59
#define SYS_TRUNCATE 76
#define SYS_CHDIR 80
#define SYS_RENAME 82
#define SYS_MKDIR 83
#define SYS_RMDIR 84
#define SYS_CREAT 85
#define SYS_LINK 86
#define SYS_UNLINK 87
#define SYS_SYMLINK 88
#define SYS_READLINK 89
#define SYS_CHMOD 90
#define SYS_CHOWN 92
#define SYS_LCHOWN 94
#define SYS_MKNOD 133
#define SYS_STATFS 137
#define SYS_PIVOT_ROOT 155
#define SYS_CHROOT 161
#define SYS_ACCT 163
#define SYS_SWAPON 167
#define SYS_SWAPOFF 168
#define SYS_DELETE_MODULE 176
#define SYS_SETXATTR 188
#define SYS_LSETXATTR 189
#define SYS_GETXATTR 191
#define SYS_LGETXATTR 192
#define SYS_LISTXATTR 194
#define SYS_LLISTXATTR 195
#define SYS_REMOVEXATTR 197
#define SYS_LREMOVEXATTR 198
#define SYS_MQ_OPEN 240
#define SYS_MQ_UNLINK 241
#define SYS_ADD_KEY 248
#define SYS_REQUEST_KEY 249
#define SYS_SYMLINKAT 266
#define SYS_MEMFD_CREATE 319
/* syscalls where the second arg is a string */
#define SYS_PWRITE64 18
#define SYS_EXECVE 59
#define SYS_RENAME 82
#define SYS_QUOTACTL 179
#define SYS_FSETXATTR 190
#define SYS_FGETXATTR 193
#define SYS_FREMOVEXATTR 199
#define SYS_MQ_TIMEDSEND 242
#define SYS_REQUEST_KEY 249
#define SYS_INOTIFY_ADD_WATCH 254
#define SYS_OPENAT 257
#define SYS_MKDIRAT 258
#define SYS_MKNODAT 259
#define SYS_FCHOWNAT 260
#define SYS_FUTIMESAT 261
#define SYS_NEWFSTATAT 262
#define SYS_UNLINKAT 263
#define SYS_RENAMEAT 264
#define SYS_LINKAT 265
#define SYS_READLINKAT 267
#define SYS_FCHMODAT 268
#define SYS_FACCESSAT 269
#define SYS_UTIMENSAT 280
#define SYS_NAME_TO_HANDLE_AT 303
#define SYS_FINIT_MODULE 313
#define SYS_RENAMEAT2 316
#define SYS_EXECVEAT 322
#define SYS_STATX 332
#define SYS_MOVE_MOUNT 429
#define SYS_FSPICK 433
pid_filter(pids_filtered);
struct augmented_args_filename {
@ -129,12 +60,48 @@ struct augmented_args_filename {
bpf_map(augmented_filename_map, PERCPU_ARRAY, int, struct augmented_args_filename, 1);
static inline
unsigned int augmented_filename__read(struct augmented_filename *augmented_filename,
const void *filename_arg, unsigned int filename_len)
{
unsigned int len = sizeof(*augmented_filename);
int size = probe_read_str(&augmented_filename->value, filename_len, filename_arg);
augmented_filename->size = augmented_filename->err = 0;
/*
* probe_read_str may return < 0, e.g. -EFAULT
* So we leave that in the augmented_filename->size that userspace will
*/
if (size > 0) {
len -= sizeof(augmented_filename->value) - size;
len &= sizeof(augmented_filename->value) - 1;
augmented_filename->size = size;
} else {
/*
* So that username notice the error while still being able
* to skip this augmented arg record
*/
augmented_filename->err = size;
len = offsetof(struct augmented_filename, value);
}
return len;
}
SEC("raw_syscalls:sys_enter")
int sys_enter(struct syscall_enter_args *args)
{
struct augmented_args_filename *augmented_args;
unsigned int len = sizeof(*augmented_args);
const void *filename_arg = NULL;
/*
* We start len, the amount of data that will be in the perf ring
* buffer, if this is not filtered out by one of pid_filter__has(),
* syscall->enabled, etc, with the non-augmented raw syscall payload,
* i.e. sizeof(augmented_args->args).
*
* We'll add to this as we add augmented syscalls right after that
* initial, non-augmented raw_syscalls:sys_enter payload.
*/
unsigned int len = sizeof(augmented_args->args);
struct syscall *syscall;
int key = 0;
@ -191,112 +158,66 @@ int sys_enter(struct syscall_enter_args *args)
* after the ctx memory access to prevent their down stream merging.
*/
/*
* This table of what args are strings will be provided by userspace,
* in the syscalls map, i.e. we will already have to do the lookup to
* see if this specific syscall is filtered, so we can as well get more
* info about what syscall args are strings or pointers, and how many
* bytes to copy, per arg, etc.
* For now copy just the first string arg, we need to improve the protocol
* and have more than one.
*
* For now hard code it, till we have all the basic mechanisms in place
* to automate everything and make the kernel part be completely driven
* by information obtained in userspace for each kernel version and
* processor architecture, making the kernel part the same no matter what
* kernel version or processor architecture it runs on.
*/
switch (augmented_args->args.syscall_nr) {
case SYS_ACCT:
case SYS_ADD_KEY:
case SYS_CHDIR:
case SYS_CHMOD:
case SYS_CHOWN:
case SYS_CHROOT:
case SYS_CREAT:
case SYS_DELETE_MODULE:
case SYS_EXECVE:
case SYS_GETXATTR:
case SYS_LCHOWN:
case SYS_LGETXATTR:
case SYS_LINK:
case SYS_LISTXATTR:
case SYS_LLISTXATTR:
case SYS_LREMOVEXATTR:
case SYS_LSETXATTR:
case SYS_LSTAT:
case SYS_MEMFD_CREATE:
case SYS_MKDIR:
case SYS_MKNOD:
case SYS_MQ_OPEN:
case SYS_MQ_UNLINK:
case SYS_PIVOT_ROOT:
case SYS_READLINK:
case SYS_REMOVEXATTR:
case SYS_RENAME:
case SYS_REQUEST_KEY:
case SYS_RMDIR:
case SYS_SETXATTR:
case SYS_STAT:
case SYS_STATFS:
case SYS_SWAPOFF:
case SYS_SWAPON:
case SYS_SYMLINK:
case SYS_SYMLINKAT:
case SYS_TRUNCATE:
case SYS_UNLINK:
case SYS_ACCESS:
case SYS_OPEN: filename_arg = (const void *)args->args[0];
* Using the unrolled loop is not working, only when we do it manually,
* check this out later...
u8 arg;
#pragma clang loop unroll(full)
for (arg = 0; arg < 6; ++arg) {
if (syscall->string_args_len[arg] != 0) {
filename_len = syscall->string_args_len[arg];
filename_arg = (const void *)args->args[arg];
__asm__ __volatile__("": : :"memory");
break;
case SYS_EXECVEAT:
case SYS_FACCESSAT:
case SYS_FCHMODAT:
case SYS_FCHOWNAT:
case SYS_FGETXATTR:
case SYS_FINIT_MODULE:
case SYS_FREMOVEXATTR:
case SYS_FSETXATTR:
case SYS_FSPICK:
case SYS_FUTIMESAT:
case SYS_INOTIFY_ADD_WATCH:
case SYS_LINKAT:
case SYS_MKDIRAT:
case SYS_MKNODAT:
// case SYS_MOVE_MOUNT:
// For now don't copy move_mount first string arg, as it has two and
// 'perf trace's syscall_arg__scnprintf_filename() will use the one
// copied here, the first, for both args, duplicating the first and
// ignoring the second.
//
// We need to copy both here and make syscall_arg__scnprintf_filename
// skip the first when reading the second, using the size of the first, etc.
// Shouldn't be difficult, but now its perf/urgent time, lets wait for
// the next devel window.
case SYS_MQ_TIMEDSEND:
case SYS_NAME_TO_HANDLE_AT:
case SYS_NEWFSTATAT:
case SYS_PWRITE64:
case SYS_QUOTACTL:
case SYS_READLINKAT:
case SYS_RENAMEAT:
case SYS_RENAMEAT2:
case SYS_STATX:
case SYS_UNLINKAT:
case SYS_UTIMENSAT:
case SYS_OPENAT: filename_arg = (const void *)args->args[1];
break;
break;
}
}
if (filename_arg != NULL) {
augmented_args->filename.reserved = 0;
augmented_args->filename.size = probe_read_str(&augmented_args->filename.value,
sizeof(augmented_args->filename.value),
filename_arg);
if (augmented_args->filename.size < sizeof(augmented_args->filename.value)) {
len -= sizeof(augmented_args->filename.value) - augmented_args->filename.size;
len &= sizeof(augmented_args->filename.value) - 1;
}
} else {
len = sizeof(augmented_args->args);
}
verifier log:
; if (syscall->string_args_len[arg] != 0) {
37: (69) r3 = *(u16 *)(r0 +2)
R0=map_value(id=0,off=0,ks=4,vs=14,imm=0) R1_w=inv0 R2_w=map_value(id=0,off=2,ks=4,vs=14,imm=0) R6=ctx(id=0,off=0,imm=0) R7=map_value(id=0,off=0,ks=4,vs=4168,imm=0) R10=fp0,call_-1 fp-8=mmmmmmmm
; if (syscall->string_args_len[arg] != 0) {
38: (55) if r3 != 0x0 goto pc+5
R0=map_value(id=0,off=0,ks=4,vs=14,imm=0) R1=inv0 R2=map_value(id=0,off=2,ks=4,vs=14,imm=0) R3=inv0 R6=ctx(id=0,off=0,imm=0) R7=map_value(id=0,off=0,ks=4,vs=4168,imm=0) R10=fp0,call_-1 fp-8=mmmmmmmm
39: (b7) r1 = 1
; if (syscall->string_args_len[arg] != 0) {
40: (bf) r2 = r0
41: (07) r2 += 4
42: (69) r3 = *(u16 *)(r0 +4)
R0=map_value(id=0,off=0,ks=4,vs=14,imm=0) R1_w=inv1 R2_w=map_value(id=0,off=4,ks=4,vs=14,imm=0) R3_w=inv0 R6=ctx(id=0,off=0,imm=0) R7=map_value(id=0,off=0,ks=4,vs=4168,imm=0) R10=fp0,call_-1 fp-8=mmmmmmmm
; if (syscall->string_args_len[arg] != 0) {
43: (15) if r3 == 0x0 goto pc+32
R0=map_value(id=0,off=0,ks=4,vs=14,imm=0) R1=inv1 R2=map_value(id=0,off=4,ks=4,vs=14,imm=0) R3=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff)) R6=ctx(id=0,off=0,imm=0) R7=map_value(id=0,off=0,ks=4,vs=4168,imm=0) R10=fp0,call_-1 fp-8=mmmmmmmm
; filename_arg = (const void *)args->args[arg];
44: (67) r1 <<= 3
45: (bf) r3 = r6
46: (0f) r3 += r1
47: (b7) r5 = 64
48: (79) r3 = *(u64 *)(r3 +16)
dereference of modified ctx ptr R3 off=8 disallowed
processed 46 insns (limit 1000000) max_states_per_insn 0 total_states 12 peak_states 12 mark_read 7
*/
#define __loop_iter(arg) \
if (syscall->string_args_len[arg] != 0) { \
unsigned int filename_len = syscall->string_args_len[arg]; \
const void *filename_arg = (const void *)args->args[arg]; \
if (filename_len <= sizeof(augmented_args->filename.value)) \
len += augmented_filename__read(&augmented_args->filename, filename_arg, filename_len);
#define loop_iter_first() __loop_iter(0); }
#define loop_iter(arg) else __loop_iter(arg); }
#define loop_iter_last(arg) else __loop_iter(arg); __asm__ __volatile__("": : :"memory"); }
loop_iter_first()
loop_iter(1)
loop_iter(2)
loop_iter(3)
loop_iter(4)
loop_iter_last(5)
/* If perf_event_output fails, return non-zero so that it gets recorded unaugmented */
return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, augmented_args, len);

View File

@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/compiler.h>
#include <linux/string.h>
#include <sys/types.h>
#include <stdio.h>
#include <string.h>
@ -162,8 +163,7 @@ copy_class_filename(const char * class_sign, const char * file_name, char * resu
result[i] = '\0';
} else {
/* fallback case */
size_t file_name_len = strlen(file_name);
strncpy(result, file_name, file_name_len < max_length ? file_name_len : max_length);
strlcpy(result, file_name, max_length);
}
}

View File

@ -61,6 +61,8 @@ struct record_opts {
bool record_switch_events;
bool all_kernel;
bool all_user;
bool kernel_callchains;
bool user_callchains;
bool tail_synthesize;
bool overwrite;
bool ignore_missing_thread;

View File

@ -394,7 +394,9 @@ if branches:
'to_ip bigint,'
'branch_type integer,'
'in_tx boolean,'
'call_path_id bigint)')
'call_path_id bigint,'
'insn_count bigint,'
'cyc_count bigint)')
else:
do_query(query, 'CREATE TABLE samples ('
'id bigint NOT NULL,'
@ -418,7 +420,9 @@ else:
'data_src bigint,'
'branch_type integer,'
'in_tx boolean,'
'call_path_id bigint)')
'call_path_id bigint,'
'insn_count bigint,'
'cyc_count bigint)')
if perf_db_export_calls or perf_db_export_callchains:
do_query(query, 'CREATE TABLE call_paths ('
@ -439,7 +443,9 @@ if perf_db_export_calls:
'return_id bigint,'
'parent_call_path_id bigint,'
'flags integer,'
'parent_id bigint)')
'parent_id bigint,'
'insn_count bigint,'
'cyc_count bigint)')
do_query(query, 'CREATE VIEW machines_view AS '
'SELECT '
@ -521,6 +527,9 @@ if perf_db_export_calls:
'return_time,'
'return_time - call_time AS elapsed_time,'
'branch_count,'
'insn_count,'
'cyc_count,'
'CASE WHEN cyc_count=0 THEN CAST(0 AS NUMERIC(20, 2)) ELSE CAST((CAST(insn_count AS FLOAT) / cyc_count) AS NUMERIC(20, 2)) END AS IPC,'
'call_id,'
'return_id,'
'CASE WHEN flags=0 THEN \'\' WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' WHEN flags=6 THEN \'jump\' ELSE CAST ( flags AS VARCHAR(6) ) END AS flags,'
@ -546,7 +555,10 @@ do_query(query, 'CREATE VIEW samples_view AS '
'to_sym_offset,'
'(SELECT short_name FROM dsos WHERE id = to_dso_id) AS to_dso_short_name,'
'(SELECT name FROM branch_types WHERE id = branch_type) AS branch_type_name,'
'in_tx'
'in_tx,'
'insn_count,'
'cyc_count,'
'CASE WHEN cyc_count=0 THEN CAST(0 AS NUMERIC(20, 2)) ELSE CAST((CAST(insn_count AS FLOAT) / cyc_count) AS NUMERIC(20, 2)) END AS IPC'
' FROM samples')
@ -618,10 +630,10 @@ def trace_begin():
comm_table(0, "unknown")
dso_table(0, 0, "unknown", "unknown", "")
symbol_table(0, 0, 0, 0, 0, "unknown")
sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
if perf_db_export_calls or perf_db_export_callchains:
call_path_table(0, 0, 0, 0)
call_return_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
call_return_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
unhandled_count = 0
@ -772,11 +784,11 @@ def branch_type_table(branch_type, name, *x):
value = struct.pack(fmt, 2, 4, branch_type, n, name)
branch_type_file.write(value)
def sample_table(sample_id, evsel_id, machine_id, thread_id, comm_id, dso_id, symbol_id, sym_offset, ip, time, cpu, to_dso_id, to_symbol_id, to_sym_offset, to_ip, period, weight, transaction, data_src, branch_type, in_tx, call_path_id, *x):
def sample_table(sample_id, evsel_id, machine_id, thread_id, comm_id, dso_id, symbol_id, sym_offset, ip, time, cpu, to_dso_id, to_symbol_id, to_sym_offset, to_ip, period, weight, transaction, data_src, branch_type, in_tx, call_path_id, insn_cnt, cyc_cnt, *x):
if branches:
value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiiiBiq", 18, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 4, branch_type, 1, in_tx, 8, call_path_id)
value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiiiBiqiqiq", 20, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 4, branch_type, 1, in_tx, 8, call_path_id, 8, insn_cnt, 8, cyc_cnt)
else:
value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiqiqiqiqiiiBiq", 22, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 8, period, 8, weight, 8, transaction, 8, data_src, 4, branch_type, 1, in_tx, 8, call_path_id)
value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiqiqiqiqiiiBiqiqiq", 24, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 8, period, 8, weight, 8, transaction, 8, data_src, 4, branch_type, 1, in_tx, 8, call_path_id, 8, insn_cnt, 8, cyc_cnt)
sample_file.write(value)
def call_path_table(cp_id, parent_id, symbol_id, ip, *x):
@ -784,7 +796,7 @@ def call_path_table(cp_id, parent_id, symbol_id, ip, *x):
value = struct.pack(fmt, 4, 8, cp_id, 8, parent_id, 8, symbol_id, 8, ip)
call_path_file.write(value)
def call_return_table(cr_id, thread_id, comm_id, call_path_id, call_time, return_time, branch_count, call_id, return_id, parent_call_path_id, flags, parent_id, *x):
fmt = "!hiqiqiqiqiqiqiqiqiqiqiiiq"
value = struct.pack(fmt, 12, 8, cr_id, 8, thread_id, 8, comm_id, 8, call_path_id, 8, call_time, 8, return_time, 8, branch_count, 8, call_id, 8, return_id, 8, parent_call_path_id, 4, flags, 8, parent_id)
def call_return_table(cr_id, thread_id, comm_id, call_path_id, call_time, return_time, branch_count, call_id, return_id, parent_call_path_id, flags, parent_id, insn_cnt, cyc_cnt, *x):
fmt = "!hiqiqiqiqiqiqiqiqiqiqiiiqiqiq"
value = struct.pack(fmt, 14, 8, cr_id, 8, thread_id, 8, comm_id, 8, call_path_id, 8, call_time, 8, return_time, 8, branch_count, 8, call_id, 8, return_id, 8, parent_call_path_id, 4, flags, 8, parent_id, 8, insn_cnt, 8, cyc_cnt)
call_file.write(value)

View File

@ -218,7 +218,9 @@ if branches:
'to_ip bigint,'
'branch_type integer,'
'in_tx boolean,'
'call_path_id bigint)')
'call_path_id bigint,'
'insn_count bigint,'
'cyc_count bigint)')
else:
do_query(query, 'CREATE TABLE samples ('
'id integer NOT NULL PRIMARY KEY,'
@ -242,7 +244,9 @@ else:
'data_src bigint,'
'branch_type integer,'
'in_tx boolean,'
'call_path_id bigint)')
'call_path_id bigint,'
'insn_count bigint,'
'cyc_count bigint)')
if perf_db_export_calls or perf_db_export_callchains:
do_query(query, 'CREATE TABLE call_paths ('
@ -263,7 +267,9 @@ if perf_db_export_calls:
'return_id bigint,'
'parent_call_path_id bigint,'
'flags integer,'
'parent_id bigint)')
'parent_id bigint,'
'insn_count bigint,'
'cyc_count bigint)')
# printf was added to sqlite in version 3.8.3
sqlite_has_printf = False
@ -359,6 +365,9 @@ if perf_db_export_calls:
'return_time,'
'return_time - call_time AS elapsed_time,'
'branch_count,'
'insn_count,'
'cyc_count,'
'CASE WHEN cyc_count=0 THEN CAST(0 AS FLOAT) ELSE ROUND(CAST(insn_count AS FLOAT) / cyc_count, 2) END AS IPC,'
'call_id,'
'return_id,'
'CASE WHEN flags=0 THEN \'\' WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' WHEN flags=6 THEN \'jump\' ELSE flags END AS flags,'
@ -384,7 +393,10 @@ do_query(query, 'CREATE VIEW samples_view AS '
'to_sym_offset,'
'(SELECT short_name FROM dsos WHERE id = to_dso_id) AS to_dso_short_name,'
'(SELECT name FROM branch_types WHERE id = branch_type) AS branch_type_name,'
'in_tx'
'in_tx,'
'insn_count,'
'cyc_count,'
'CASE WHEN cyc_count=0 THEN CAST(0 AS FLOAT) ELSE ROUND(CAST(insn_count AS FLOAT) / cyc_count, 2) END AS IPC'
' FROM samples')
do_query(query, 'END TRANSACTION')
@ -407,15 +419,15 @@ branch_type_query = QSqlQuery(db)
branch_type_query.prepare("INSERT INTO branch_types VALUES (?, ?)")
sample_query = QSqlQuery(db)
if branches:
sample_query.prepare("INSERT INTO samples VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
sample_query.prepare("INSERT INTO samples VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
else:
sample_query.prepare("INSERT INTO samples VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
sample_query.prepare("INSERT INTO samples VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
if perf_db_export_calls or perf_db_export_callchains:
call_path_query = QSqlQuery(db)
call_path_query.prepare("INSERT INTO call_paths VALUES (?, ?, ?, ?)")
if perf_db_export_calls:
call_query = QSqlQuery(db)
call_query.prepare("INSERT INTO calls VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
call_query.prepare("INSERT INTO calls VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
def trace_begin():
printdate("Writing records...")
@ -427,10 +439,10 @@ def trace_begin():
comm_table(0, "unknown")
dso_table(0, 0, "unknown", "unknown", "")
symbol_table(0, 0, 0, 0, 0, "unknown")
sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
if perf_db_export_calls or perf_db_export_callchains:
call_path_table(0, 0, 0, 0)
call_return_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
call_return_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
unhandled_count = 0
@ -486,14 +498,14 @@ def sample_table(*x):
if branches:
for xx in x[0:15]:
sample_query.addBindValue(str(xx))
for xx in x[19:22]:
for xx in x[19:24]:
sample_query.addBindValue(str(xx))
do_query_(sample_query)
else:
bind_exec(sample_query, 22, x)
bind_exec(sample_query, 24, x)
def call_path_table(*x):
bind_exec(call_path_query, 4, x)
def call_return_table(*x):
bind_exec(call_query, 12, x)
bind_exec(call_query, 14, x)

View File

@ -200,9 +200,10 @@ class Thread(QThread):
class TreeModel(QAbstractItemModel):
def __init__(self, glb, parent=None):
def __init__(self, glb, params, parent=None):
super(TreeModel, self).__init__(parent)
self.glb = glb
self.params = params
self.root = self.GetRoot()
self.last_row_read = 0
@ -399,6 +400,7 @@ class FindBar():
def Activate(self):
self.bar.show()
self.textbox.lineEdit().selectAll()
self.textbox.setFocus()
def Deactivate(self):
@ -463,8 +465,9 @@ class FindBar():
class CallGraphLevelItemBase(object):
def __init__(self, glb, row, parent_item):
def __init__(self, glb, params, row, parent_item):
self.glb = glb
self.params = params
self.row = row
self.parent_item = parent_item
self.query_done = False;
@ -503,18 +506,24 @@ class CallGraphLevelItemBase(object):
class CallGraphLevelTwoPlusItemBase(CallGraphLevelItemBase):
def __init__(self, glb, row, comm_id, thread_id, call_path_id, time, branch_count, parent_item):
super(CallGraphLevelTwoPlusItemBase, self).__init__(glb, row, parent_item)
def __init__(self, glb, params, row, comm_id, thread_id, call_path_id, time, insn_cnt, cyc_cnt, branch_count, parent_item):
super(CallGraphLevelTwoPlusItemBase, self).__init__(glb, params, row, parent_item)
self.comm_id = comm_id
self.thread_id = thread_id
self.call_path_id = call_path_id
self.insn_cnt = insn_cnt
self.cyc_cnt = cyc_cnt
self.branch_count = branch_count
self.time = time
def Select(self):
self.query_done = True;
query = QSqlQuery(self.glb.db)
QueryExec(query, "SELECT call_path_id, name, short_name, COUNT(calls.id), SUM(return_time - call_time), SUM(branch_count)"
if self.params.have_ipc:
ipc_str = ", SUM(insn_count), SUM(cyc_count)"
else:
ipc_str = ""
QueryExec(query, "SELECT call_path_id, name, short_name, COUNT(calls.id), SUM(return_time - call_time)" + ipc_str + ", SUM(branch_count)"
" FROM calls"
" INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
" INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
@ -525,7 +534,15 @@ class CallGraphLevelTwoPlusItemBase(CallGraphLevelItemBase):
" GROUP BY call_path_id, name, short_name"
" ORDER BY call_path_id")
while query.next():
child_item = CallGraphLevelThreeItem(self.glb, self.child_count, self.comm_id, self.thread_id, query.value(0), query.value(1), query.value(2), query.value(3), int(query.value(4)), int(query.value(5)), self)
if self.params.have_ipc:
insn_cnt = int(query.value(5))
cyc_cnt = int(query.value(6))
branch_count = int(query.value(7))
else:
insn_cnt = 0
cyc_cnt = 0
branch_count = int(query.value(5))
child_item = CallGraphLevelThreeItem(self.glb, self.params, self.child_count, self.comm_id, self.thread_id, query.value(0), query.value(1), query.value(2), query.value(3), int(query.value(4)), insn_cnt, cyc_cnt, branch_count, self)
self.child_items.append(child_item)
self.child_count += 1
@ -533,37 +550,57 @@ class CallGraphLevelTwoPlusItemBase(CallGraphLevelItemBase):
class CallGraphLevelThreeItem(CallGraphLevelTwoPlusItemBase):
def __init__(self, glb, row, comm_id, thread_id, call_path_id, name, dso, count, time, branch_count, parent_item):
super(CallGraphLevelThreeItem, self).__init__(glb, row, comm_id, thread_id, call_path_id, time, branch_count, parent_item)
def __init__(self, glb, params, row, comm_id, thread_id, call_path_id, name, dso, count, time, insn_cnt, cyc_cnt, branch_count, parent_item):
super(CallGraphLevelThreeItem, self).__init__(glb, params, row, comm_id, thread_id, call_path_id, time, insn_cnt, cyc_cnt, branch_count, parent_item)
dso = dsoname(dso)
self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(branch_count), PercentToOneDP(branch_count, parent_item.branch_count) ]
if self.params.have_ipc:
insn_pcnt = PercentToOneDP(insn_cnt, parent_item.insn_cnt)
cyc_pcnt = PercentToOneDP(cyc_cnt, parent_item.cyc_cnt)
br_pcnt = PercentToOneDP(branch_count, parent_item.branch_count)
ipc = CalcIPC(cyc_cnt, insn_cnt)
self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(insn_cnt), insn_pcnt, str(cyc_cnt), cyc_pcnt, ipc, str(branch_count), br_pcnt ]
else:
self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(branch_count), PercentToOneDP(branch_count, parent_item.branch_count) ]
self.dbid = call_path_id
# Context-sensitive call graph data model level two item
class CallGraphLevelTwoItem(CallGraphLevelTwoPlusItemBase):
def __init__(self, glb, row, comm_id, thread_id, pid, tid, parent_item):
super(CallGraphLevelTwoItem, self).__init__(glb, row, comm_id, thread_id, 1, 0, 0, parent_item)
self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", ""]
def __init__(self, glb, params, row, comm_id, thread_id, pid, tid, parent_item):
super(CallGraphLevelTwoItem, self).__init__(glb, params, row, comm_id, thread_id, 1, 0, 0, 0, 0, parent_item)
if self.params.have_ipc:
self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", "", "", "", "", "", ""]
else:
self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", ""]
self.dbid = thread_id
def Select(self):
super(CallGraphLevelTwoItem, self).Select()
for child_item in self.child_items:
self.time += child_item.time
self.insn_cnt += child_item.insn_cnt
self.cyc_cnt += child_item.cyc_cnt
self.branch_count += child_item.branch_count
for child_item in self.child_items:
child_item.data[4] = PercentToOneDP(child_item.time, self.time)
child_item.data[6] = PercentToOneDP(child_item.branch_count, self.branch_count)
if self.params.have_ipc:
child_item.data[6] = PercentToOneDP(child_item.insn_cnt, self.insn_cnt)
child_item.data[8] = PercentToOneDP(child_item.cyc_cnt, self.cyc_cnt)
child_item.data[11] = PercentToOneDP(child_item.branch_count, self.branch_count)
else:
child_item.data[6] = PercentToOneDP(child_item.branch_count, self.branch_count)
# Context-sensitive call graph data model level one item
class CallGraphLevelOneItem(CallGraphLevelItemBase):
def __init__(self, glb, row, comm_id, comm, parent_item):
super(CallGraphLevelOneItem, self).__init__(glb, row, parent_item)
self.data = [comm, "", "", "", "", "", ""]
def __init__(self, glb, params, row, comm_id, comm, parent_item):
super(CallGraphLevelOneItem, self).__init__(glb, params, row, parent_item)
if self.params.have_ipc:
self.data = [comm, "", "", "", "", "", "", "", "", "", "", ""]
else:
self.data = [comm, "", "", "", "", "", ""]
self.dbid = comm_id
def Select(self):
@ -574,7 +611,7 @@ class CallGraphLevelOneItem(CallGraphLevelItemBase):
" INNER JOIN threads ON thread_id = threads.id"
" WHERE comm_id = " + str(self.dbid))
while query.next():
child_item = CallGraphLevelTwoItem(self.glb, self.child_count, self.dbid, query.value(0), query.value(1), query.value(2), self)
child_item = CallGraphLevelTwoItem(self.glb, self.params, self.child_count, self.dbid, query.value(0), query.value(1), query.value(2), self)
self.child_items.append(child_item)
self.child_count += 1
@ -582,8 +619,8 @@ class CallGraphLevelOneItem(CallGraphLevelItemBase):
class CallGraphRootItem(CallGraphLevelItemBase):
def __init__(self, glb):
super(CallGraphRootItem, self).__init__(glb, 0, None)
def __init__(self, glb, params):
super(CallGraphRootItem, self).__init__(glb, params, 0, None)
self.dbid = 0
self.query_done = True;
query = QSqlQuery(glb.db)
@ -591,16 +628,23 @@ class CallGraphRootItem(CallGraphLevelItemBase):
while query.next():
if not query.value(0):
continue
child_item = CallGraphLevelOneItem(glb, self.child_count, query.value(0), query.value(1), self)
child_item = CallGraphLevelOneItem(glb, params, self.child_count, query.value(0), query.value(1), self)
self.child_items.append(child_item)
self.child_count += 1
# Call graph model parameters
class CallGraphModelParams():
def __init__(self, glb, parent=None):
self.have_ipc = IsSelectable(glb.db, "calls", columns = "insn_count, cyc_count")
# Context-sensitive call graph data model base
class CallGraphModelBase(TreeModel):
def __init__(self, glb, parent=None):
super(CallGraphModelBase, self).__init__(glb, parent)
super(CallGraphModelBase, self).__init__(glb, CallGraphModelParams(glb), parent)
def FindSelect(self, value, pattern, query):
if pattern:
@ -682,17 +726,26 @@ class CallGraphModel(CallGraphModelBase):
super(CallGraphModel, self).__init__(glb, parent)
def GetRoot(self):
return CallGraphRootItem(self.glb)
return CallGraphRootItem(self.glb, self.params)
def columnCount(self, parent=None):
return 7
if self.params.have_ipc:
return 12
else:
return 7
def columnHeader(self, column):
headers = ["Call Path", "Object", "Count ", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "]
if self.params.have_ipc:
headers = ["Call Path", "Object", "Count ", "Time (ns) ", "Time (%) ", "Insn Cnt", "Insn Cnt (%)", "Cyc Cnt", "Cyc Cnt (%)", "IPC", "Branch Count ", "Branch Count (%) "]
else:
headers = ["Call Path", "Object", "Count ", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "]
return headers[column]
def columnAlignment(self, column):
alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
if self.params.have_ipc:
alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
else:
alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
return alignment[column]
def DoFindSelect(self, query, match):
@ -729,11 +782,13 @@ class CallGraphModel(CallGraphModelBase):
class CallTreeLevelTwoPlusItemBase(CallGraphLevelItemBase):
def __init__(self, glb, row, comm_id, thread_id, calls_id, time, branch_count, parent_item):
super(CallTreeLevelTwoPlusItemBase, self).__init__(glb, row, parent_item)
def __init__(self, glb, params, row, comm_id, thread_id, calls_id, time, insn_cnt, cyc_cnt, branch_count, parent_item):
super(CallTreeLevelTwoPlusItemBase, self).__init__(glb, params, row, parent_item)
self.comm_id = comm_id
self.thread_id = thread_id
self.calls_id = calls_id
self.insn_cnt = insn_cnt
self.cyc_cnt = cyc_cnt
self.branch_count = branch_count
self.time = time
@ -743,8 +798,12 @@ class CallTreeLevelTwoPlusItemBase(CallGraphLevelItemBase):
comm_thread = " AND comm_id = " + str(self.comm_id) + " AND thread_id = " + str(self.thread_id)
else:
comm_thread = ""
if self.params.have_ipc:
ipc_str = ", insn_count, cyc_count"
else:
ipc_str = ""
query = QSqlQuery(self.glb.db)
QueryExec(query, "SELECT calls.id, name, short_name, call_time, return_time - call_time, branch_count"
QueryExec(query, "SELECT calls.id, name, short_name, call_time, return_time - call_time" + ipc_str + ", branch_count"
" FROM calls"
" INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
" INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
@ -752,7 +811,15 @@ class CallTreeLevelTwoPlusItemBase(CallGraphLevelItemBase):
" WHERE calls.parent_id = " + str(self.calls_id) + comm_thread +
" ORDER BY call_time, calls.id")
while query.next():
child_item = CallTreeLevelThreeItem(self.glb, self.child_count, self.comm_id, self.thread_id, query.value(0), query.value(1), query.value(2), query.value(3), int(query.value(4)), int(query.value(5)), self)
if self.params.have_ipc:
insn_cnt = int(query.value(5))
cyc_cnt = int(query.value(6))
branch_count = int(query.value(7))
else:
insn_cnt = 0
cyc_cnt = 0
branch_count = int(query.value(5))
child_item = CallTreeLevelThreeItem(self.glb, self.params, self.child_count, self.comm_id, self.thread_id, query.value(0), query.value(1), query.value(2), query.value(3), int(query.value(4)), insn_cnt, cyc_cnt, branch_count, self)
self.child_items.append(child_item)
self.child_count += 1
@ -760,37 +827,57 @@ class CallTreeLevelTwoPlusItemBase(CallGraphLevelItemBase):
class CallTreeLevelThreeItem(CallTreeLevelTwoPlusItemBase):
def __init__(self, glb, row, comm_id, thread_id, calls_id, name, dso, count, time, branch_count, parent_item):
super(CallTreeLevelThreeItem, self).__init__(glb, row, comm_id, thread_id, calls_id, time, branch_count, parent_item)
def __init__(self, glb, params, row, comm_id, thread_id, calls_id, name, dso, count, time, insn_cnt, cyc_cnt, branch_count, parent_item):
super(CallTreeLevelThreeItem, self).__init__(glb, params, row, comm_id, thread_id, calls_id, time, insn_cnt, cyc_cnt, branch_count, parent_item)
dso = dsoname(dso)
self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(branch_count), PercentToOneDP(branch_count, parent_item.branch_count) ]
if self.params.have_ipc:
insn_pcnt = PercentToOneDP(insn_cnt, parent_item.insn_cnt)
cyc_pcnt = PercentToOneDP(cyc_cnt, parent_item.cyc_cnt)
br_pcnt = PercentToOneDP(branch_count, parent_item.branch_count)
ipc = CalcIPC(cyc_cnt, insn_cnt)
self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(insn_cnt), insn_pcnt, str(cyc_cnt), cyc_pcnt, ipc, str(branch_count), br_pcnt ]
else:
self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(branch_count), PercentToOneDP(branch_count, parent_item.branch_count) ]
self.dbid = calls_id
# Call tree data model level two item
class CallTreeLevelTwoItem(CallTreeLevelTwoPlusItemBase):
def __init__(self, glb, row, comm_id, thread_id, pid, tid, parent_item):
super(CallTreeLevelTwoItem, self).__init__(glb, row, comm_id, thread_id, 0, 0, 0, parent_item)
self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", ""]
def __init__(self, glb, params, row, comm_id, thread_id, pid, tid, parent_item):
super(CallTreeLevelTwoItem, self).__init__(glb, params, row, comm_id, thread_id, 0, 0, 0, 0, 0, parent_item)
if self.params.have_ipc:
self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", "", "", "", "", "", ""]
else:
self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", ""]
self.dbid = thread_id
def Select(self):
super(CallTreeLevelTwoItem, self).Select()
for child_item in self.child_items:
self.time += child_item.time
self.insn_cnt += child_item.insn_cnt
self.cyc_cnt += child_item.cyc_cnt
self.branch_count += child_item.branch_count
for child_item in self.child_items:
child_item.data[4] = PercentToOneDP(child_item.time, self.time)
child_item.data[6] = PercentToOneDP(child_item.branch_count, self.branch_count)
if self.params.have_ipc:
child_item.data[6] = PercentToOneDP(child_item.insn_cnt, self.insn_cnt)
child_item.data[8] = PercentToOneDP(child_item.cyc_cnt, self.cyc_cnt)
child_item.data[11] = PercentToOneDP(child_item.branch_count, self.branch_count)
else:
child_item.data[6] = PercentToOneDP(child_item.branch_count, self.branch_count)
# Call tree data model level one item
class CallTreeLevelOneItem(CallGraphLevelItemBase):
def __init__(self, glb, row, comm_id, comm, parent_item):
super(CallTreeLevelOneItem, self).__init__(glb, row, parent_item)
self.data = [comm, "", "", "", "", "", ""]
def __init__(self, glb, params, row, comm_id, comm, parent_item):
super(CallTreeLevelOneItem, self).__init__(glb, params, row, parent_item)
if self.params.have_ipc:
self.data = [comm, "", "", "", "", "", "", "", "", "", "", ""]
else:
self.data = [comm, "", "", "", "", "", ""]
self.dbid = comm_id
def Select(self):
@ -801,7 +888,7 @@ class CallTreeLevelOneItem(CallGraphLevelItemBase):
" INNER JOIN threads ON thread_id = threads.id"
" WHERE comm_id = " + str(self.dbid))
while query.next():
child_item = CallTreeLevelTwoItem(self.glb, self.child_count, self.dbid, query.value(0), query.value(1), query.value(2), self)
child_item = CallTreeLevelTwoItem(self.glb, self.params, self.child_count, self.dbid, query.value(0), query.value(1), query.value(2), self)
self.child_items.append(child_item)
self.child_count += 1
@ -809,8 +896,8 @@ class CallTreeLevelOneItem(CallGraphLevelItemBase):
class CallTreeRootItem(CallGraphLevelItemBase):
def __init__(self, glb):
super(CallTreeRootItem, self).__init__(glb, 0, None)
def __init__(self, glb, params):
super(CallTreeRootItem, self).__init__(glb, params, 0, None)
self.dbid = 0
self.query_done = True;
query = QSqlQuery(glb.db)
@ -818,7 +905,7 @@ class CallTreeRootItem(CallGraphLevelItemBase):
while query.next():
if not query.value(0):
continue
child_item = CallTreeLevelOneItem(glb, self.child_count, query.value(0), query.value(1), self)
child_item = CallTreeLevelOneItem(glb, params, self.child_count, query.value(0), query.value(1), self)
self.child_items.append(child_item)
self.child_count += 1
@ -830,17 +917,26 @@ class CallTreeModel(CallGraphModelBase):
super(CallTreeModel, self).__init__(glb, parent)
def GetRoot(self):
return CallTreeRootItem(self.glb)
return CallTreeRootItem(self.glb, self.params)
def columnCount(self, parent=None):
return 7
if self.params.have_ipc:
return 12
else:
return 7
def columnHeader(self, column):
headers = ["Call Path", "Object", "Call Time", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "]
if self.params.have_ipc:
headers = ["Call Path", "Object", "Call Time", "Time (ns) ", "Time (%) ", "Insn Cnt", "Insn Cnt (%)", "Cyc Cnt", "Cyc Cnt (%)", "IPC", "Branch Count ", "Branch Count (%) "]
else:
headers = ["Call Path", "Object", "Call Time", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "]
return headers[column]
def columnAlignment(self, column):
alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
if self.params.have_ipc:
alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
else:
alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
return alignment[column]
def DoFindSelect(self, query, match):
@ -1369,11 +1465,11 @@ class FetchMoreRecordsBar():
class BranchLevelTwoItem():
def __init__(self, row, text, parent_item):
def __init__(self, row, col, text, parent_item):
self.row = row
self.parent_item = parent_item
self.data = [""] * 8
self.data[7] = text
self.data = [""] * (col + 1)
self.data[col] = text
self.level = 2
def getParentItem(self):
@ -1405,6 +1501,7 @@ class BranchLevelOneItem():
self.dbid = data[0]
self.level = 1
self.query_done = False
self.br_col = len(self.data) - 1
def getChildItem(self, row):
return self.child_items[row]
@ -1485,7 +1582,7 @@ class BranchLevelOneItem():
while k < 15:
byte_str += " "
k += 1
self.child_items.append(BranchLevelTwoItem(0, byte_str + " " + text, self))
self.child_items.append(BranchLevelTwoItem(0, self.br_col, byte_str + " " + text, self))
self.child_count += 1
else:
return
@ -1536,16 +1633,37 @@ class BranchRootItem():
def getData(self, column):
return ""
# Calculate instructions per cycle
def CalcIPC(cyc_cnt, insn_cnt):
if cyc_cnt and insn_cnt:
ipc = Decimal(float(insn_cnt) / cyc_cnt)
ipc = str(ipc.quantize(Decimal(".01"), rounding=ROUND_HALF_UP))
else:
ipc = "0"
return ipc
# Branch data preparation
def BranchDataPrepBr(query, data):
data.append(tohex(query.value(8)).rjust(16) + " " + query.value(9) + offstr(query.value(10)) +
" (" + dsoname(query.value(11)) + ")" + " -> " +
tohex(query.value(12)) + " " + query.value(13) + offstr(query.value(14)) +
" (" + dsoname(query.value(15)) + ")")
def BranchDataPrepIPC(query, data):
insn_cnt = query.value(16)
cyc_cnt = query.value(17)
ipc = CalcIPC(cyc_cnt, insn_cnt)
data.append(insn_cnt)
data.append(cyc_cnt)
data.append(ipc)
def BranchDataPrep(query):
data = []
for i in xrange(0, 8):
data.append(query.value(i))
data.append(tohex(query.value(8)).rjust(16) + " " + query.value(9) + offstr(query.value(10)) +
" (" + dsoname(query.value(11)) + ")" + " -> " +
tohex(query.value(12)) + " " + query.value(13) + offstr(query.value(14)) +
" (" + dsoname(query.value(15)) + ")")
BranchDataPrepBr(query, data)
return data
def BranchDataPrepWA(query):
@ -1555,10 +1673,26 @@ def BranchDataPrepWA(query):
data.append("{:>19}".format(query.value(1)))
for i in xrange(2, 8):
data.append(query.value(i))
data.append(tohex(query.value(8)).rjust(16) + " " + query.value(9) + offstr(query.value(10)) +
" (" + dsoname(query.value(11)) + ")" + " -> " +
tohex(query.value(12)) + " " + query.value(13) + offstr(query.value(14)) +
" (" + dsoname(query.value(15)) + ")")
BranchDataPrepBr(query, data)
return data
def BranchDataWithIPCPrep(query):
data = []
for i in xrange(0, 8):
data.append(query.value(i))
BranchDataPrepIPC(query, data)
BranchDataPrepBr(query, data)
return data
def BranchDataWithIPCPrepWA(query):
data = []
data.append(query.value(0))
# Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
data.append("{:>19}".format(query.value(1)))
for i in xrange(2, 8):
data.append(query.value(i))
BranchDataPrepIPC(query, data)
BranchDataPrepBr(query, data)
return data
# Branch data model
@ -1568,14 +1702,24 @@ class BranchModel(TreeModel):
progress = Signal(object)
def __init__(self, glb, event_id, where_clause, parent=None):
super(BranchModel, self).__init__(glb, parent)
super(BranchModel, self).__init__(glb, None, parent)
self.event_id = event_id
self.more = True
self.populated = 0
self.have_ipc = IsSelectable(glb.db, "samples", columns = "insn_count, cyc_count")
if self.have_ipc:
select_ipc = ", insn_count, cyc_count"
prep_fn = BranchDataWithIPCPrep
prep_wa_fn = BranchDataWithIPCPrepWA
else:
select_ipc = ""
prep_fn = BranchDataPrep
prep_wa_fn = BranchDataPrepWA
sql = ("SELECT samples.id, time, cpu, comm, pid, tid, branch_types.name,"
" CASE WHEN in_tx = '0' THEN 'No' ELSE 'Yes' END,"
" ip, symbols.name, sym_offset, dsos.short_name,"
" to_ip, to_symbols.name, to_sym_offset, to_dsos.short_name"
+ select_ipc +
" FROM samples"
" INNER JOIN comms ON comm_id = comms.id"
" INNER JOIN threads ON thread_id = threads.id"
@ -1589,9 +1733,9 @@ class BranchModel(TreeModel):
" ORDER BY samples.id"
" LIMIT " + str(glb_chunk_sz))
if pyside_version_1 and sys.version_info[0] == 3:
prep = BranchDataPrepWA
prep = prep_fn
else:
prep = BranchDataPrep
prep = prep_wa_fn
self.fetcher = SQLFetcher(glb, sql, prep, self.AddSample)
self.fetcher.done.connect(self.Update)
self.fetcher.Fetch(glb_chunk_sz)
@ -1600,13 +1744,23 @@ class BranchModel(TreeModel):
return BranchRootItem()
def columnCount(self, parent=None):
return 8
if self.have_ipc:
return 11
else:
return 8
def columnHeader(self, column):
return ("Time", "CPU", "Command", "PID", "TID", "Branch Type", "In Tx", "Branch")[column]
if self.have_ipc:
return ("Time", "CPU", "Command", "PID", "TID", "Branch Type", "In Tx", "Insn Cnt", "Cyc Cnt", "IPC", "Branch")[column]
else:
return ("Time", "CPU", "Command", "PID", "TID", "Branch Type", "In Tx", "Branch")[column]
def columnFont(self, column):
if column != 7:
if self.have_ipc:
br_col = 10
else:
br_col = 7
if column != br_col:
return None
return QFont("Monospace")
@ -2114,10 +2268,10 @@ def GetEventList(db):
# Is a table selectable
def IsSelectable(db, table, sql = ""):
def IsSelectable(db, table, sql = "", columns = "*"):
query = QSqlQuery(db)
try:
QueryExec(query, "SELECT * FROM " + table + " " + sql + " LIMIT 1")
QueryExec(query, "SELECT " + columns + " FROM " + table + " " + sql + " LIMIT 1")
except:
return False
return True
@ -2854,6 +3008,12 @@ cd xed
sudo ./mfile.py --prefix=/usr/local install
sudo ldconfig
</pre>
<h3>Instructions per Cycle (IPC)</h3>
If available, IPC information is displayed in columns 'insn_cnt', 'cyc_cnt' and 'IPC'.
<p><b>Intel PT note:</b> The information applies to the blocks of code ending with, and including, that branch.
Due to the granularity of timing information, the number of cycles for some code blocks will not be known.
In that case, 'insn_cnt', 'cyc_cnt' and 'IPC' are zero, but when 'IPC' is displayed it covers the period
since the previous displayed 'IPC'.
<h3>Find</h3>
Ctrl-F displays a Find bar which finds substrings by either an exact match or a regular expression match.
Refer to Python documentation for the regular expression syntax.

View File

@ -51,6 +51,7 @@ perf-y += clang.o
perf-y += unit_number__scnprintf.o
perf-y += mem2node.o
perf-y += map_groups.o
perf-y += time-utils-test.o
$(OUTPUT)tests/llvm-src-base.c: tests/bpf-script-example.c tests/Build
$(call rule_mkdir)

View File

@ -289,6 +289,10 @@ static struct test generic_tests[] = {
.desc = "mem2node",
.func = test__mem2node,
},
{
.desc = "time utils",
.func = test__time_utils,
},
{
.desc = "map_groups__merge_in",
.func = test__map_groups__merge_in,

View File

@ -18,6 +18,32 @@
#define PERF_TP_SAMPLE_TYPE (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | \
PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD)
#if defined(__s390x__)
/* Return true if kvm module is available and loaded. Test this
* and retun success when trace point kvm_s390_create_vm
* exists. Otherwise this test always fails.
*/
static bool kvm_s390_create_vm_valid(void)
{
char *eventfile;
bool rc = false;
eventfile = get_events_file("kvm-s390");
if (eventfile) {
DIR *mydir = opendir(eventfile);
if (mydir) {
rc = true;
closedir(mydir);
}
put_events_file(eventfile);
}
return rc;
}
#endif
static int test__checkevent_tracepoint(struct perf_evlist *evlist)
{
struct perf_evsel *evsel = perf_evlist__first(evlist);
@ -1642,6 +1668,7 @@ static struct evlist_test test__events[] = {
{
.name = "kvm-s390:kvm_s390_create_vm",
.check = test__checkevent_tracepoint,
.valid = kvm_s390_create_vm_valid,
.id = 100,
},
#endif

View File

@ -108,6 +108,7 @@ int test__clang_subtest_get_nr(void);
int test__unit_number__scnprint(struct test *test, int subtest);
int test__mem2node(struct test *t, int subtest);
int test__map_groups__merge_in(struct test *t, int subtest);
int test__time_utils(struct test *t, int subtest);
bool test__bp_signal_is_supported(void);
bool test__wp_is_supported(void);

View File

@ -0,0 +1,251 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/compiler.h>
#include <linux/time64.h>
#include <inttypes.h>
#include <string.h>
#include "time-utils.h"
#include "evlist.h"
#include "session.h"
#include "debug.h"
#include "tests.h"
static bool test__parse_nsec_time(const char *str, u64 expected)
{
u64 ptime;
int err;
pr_debug("\nparse_nsec_time(\"%s\")\n", str);
err = parse_nsec_time(str, &ptime);
if (err) {
pr_debug("error %d\n", err);
return false;
}
if (ptime != expected) {
pr_debug("Failed. ptime %" PRIu64 " expected %" PRIu64 "\n",
ptime, expected);
return false;
}
pr_debug("%" PRIu64 "\n", ptime);
return true;
}
static bool test__perf_time__parse_str(const char *ostr, u64 start, u64 end)
{
struct perf_time_interval ptime;
int err;
pr_debug("\nperf_time__parse_str(\"%s\")\n", ostr);
err = perf_time__parse_str(&ptime, ostr);
if (err) {
pr_debug("Error %d\n", err);
return false;
}
if (ptime.start != start || ptime.end != end) {
pr_debug("Failed. Expected %" PRIu64 " to %" PRIu64 "\n",
start, end);
return false;
}
return true;
}
#define TEST_MAX 64
struct test_data {
const char *str;
u64 first;
u64 last;
struct perf_time_interval ptime[TEST_MAX];
int num;
u64 skip[TEST_MAX];
u64 noskip[TEST_MAX];
};
static bool test__perf_time__parse_for_ranges(struct test_data *d)
{
struct perf_evlist evlist = {
.first_sample_time = d->first,
.last_sample_time = d->last,
};
struct perf_session session = { .evlist = &evlist };
struct perf_time_interval *ptime = NULL;
int range_size, range_num;
bool pass = false;
int i, err;
pr_debug("\nperf_time__parse_for_ranges(\"%s\")\n", d->str);
if (strchr(d->str, '%'))
pr_debug("first_sample_time %" PRIu64 " last_sample_time %" PRIu64 "\n",
d->first, d->last);
err = perf_time__parse_for_ranges(d->str, &session, &ptime, &range_size,
&range_num);
if (err) {
pr_debug("error %d\n", err);
goto out;
}
if (range_size < d->num || range_num != d->num) {
pr_debug("bad size: range_size %d range_num %d expected num %d\n",
range_size, range_num, d->num);
goto out;
}
for (i = 0; i < d->num; i++) {
if (ptime[i].start != d->ptime[i].start ||
ptime[i].end != d->ptime[i].end) {
pr_debug("bad range %d expected %" PRIu64 " to %" PRIu64 "\n",
i, d->ptime[i].start, d->ptime[i].end);
goto out;
}
}
if (perf_time__ranges_skip_sample(ptime, d->num, 0)) {
pr_debug("failed to keep 0\n");
goto out;
}
for (i = 0; i < TEST_MAX; i++) {
if (d->skip[i] &&
!perf_time__ranges_skip_sample(ptime, d->num, d->skip[i])) {
pr_debug("failed to skip %" PRIu64 "\n", d->skip[i]);
goto out;
}
if (d->noskip[i] &&
perf_time__ranges_skip_sample(ptime, d->num, d->noskip[i])) {
pr_debug("failed to keep %" PRIu64 "\n", d->noskip[i]);
goto out;
}
}
pass = true;
out:
free(ptime);
return pass;
}
int test__time_utils(struct test *t __maybe_unused, int subtest __maybe_unused)
{
bool pass = true;
pass &= test__parse_nsec_time("0", 0);
pass &= test__parse_nsec_time("1", 1000000000ULL);
pass &= test__parse_nsec_time("0.000000001", 1);
pass &= test__parse_nsec_time("1.000000001", 1000000001ULL);
pass &= test__parse_nsec_time("123456.123456", 123456123456000ULL);
pass &= test__parse_nsec_time("1234567.123456789", 1234567123456789ULL);
pass &= test__parse_nsec_time("18446744073.709551615",
0xFFFFFFFFFFFFFFFFULL);
pass &= test__perf_time__parse_str("1234567.123456789,1234567.123456789",
1234567123456789ULL, 1234567123456789ULL);
pass &= test__perf_time__parse_str("1234567.123456789,1234567.123456790",
1234567123456789ULL, 1234567123456790ULL);
pass &= test__perf_time__parse_str("1234567.123456789,",
1234567123456789ULL, 0);
pass &= test__perf_time__parse_str(",1234567.123456789",
0, 1234567123456789ULL);
pass &= test__perf_time__parse_str("0,1234567.123456789",
0, 1234567123456789ULL);
{
u64 b = 1234567123456789ULL;
struct test_data d = {
.str = "1234567.123456789,1234567.123456790",
.ptime = { {b, b + 1}, },
.num = 1,
.skip = { b - 1, b + 2, },
.noskip = { b, b + 1, },
};
pass &= test__perf_time__parse_for_ranges(&d);
}
{
u64 b = 1234567123456789ULL;
u64 c = 7654321987654321ULL;
u64 e = 8000000000000000ULL;
struct test_data d = {
.str = "1234567.123456789,1234567.123456790 "
"7654321.987654321,7654321.987654444 "
"8000000,8000000.000000005",
.ptime = { {b, b + 1}, {c, c + 123}, {e, e + 5}, },
.num = 3,
.skip = { b - 1, b + 2, c - 1, c + 124, e - 1, e + 6 },
.noskip = { b, b + 1, c, c + 123, e, e + 5 },
};
pass &= test__perf_time__parse_for_ranges(&d);
}
{
u64 b = 7654321ULL * NSEC_PER_SEC;
struct test_data d = {
.str = "10%/1",
.first = b,
.last = b + 100,
.ptime = { {b, b + 9}, },
.num = 1,
.skip = { b - 1, b + 10, },
.noskip = { b, b + 9, },
};
pass &= test__perf_time__parse_for_ranges(&d);
}
{
u64 b = 7654321ULL * NSEC_PER_SEC;
struct test_data d = {
.str = "10%/2",
.first = b,
.last = b + 100,
.ptime = { {b + 10, b + 19}, },
.num = 1,
.skip = { b + 9, b + 20, },
.noskip = { b + 10, b + 19, },
};
pass &= test__perf_time__parse_for_ranges(&d);
}
{
u64 b = 11223344ULL * NSEC_PER_SEC;
struct test_data d = {
.str = "10%/1,10%/2",
.first = b,
.last = b + 100,
.ptime = { {b, b + 9}, {b + 10, b + 19}, },
.num = 2,
.skip = { b - 1, b + 20, },
.noskip = { b, b + 8, b + 9, b + 10, b + 11, b + 12, b + 19, },
};
pass &= test__perf_time__parse_for_ranges(&d);
}
{
u64 b = 11223344ULL * NSEC_PER_SEC;
struct test_data d = {
.str = "10%/1,10%/3,10%/10",
.first = b,
.last = b + 100,
.ptime = { {b, b + 9}, {b + 20, b + 29}, { b + 90, b + 100}, },
.num = 3,
.skip = { b - 1, b + 10, b + 19, b + 30, b + 89, b + 101 },
.noskip = { b, b + 9, b + 20, b + 29, b + 90, b + 100},
};
pass &= test__perf_time__parse_for_ranges(&d);
}
pr_debug("\n");
return pass ? 0 : TEST_FAIL;
}

View File

@ -931,9 +931,8 @@ static int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
if (sym == NULL)
return 0;
src = symbol__hists(sym, evsel->evlist->nr_entries);
if (src == NULL)
return -ENOMEM;
return __symbol__inc_addr_samples(sym, map, src, evsel->idx, addr, sample);
return (src) ? __symbol__inc_addr_samples(sym, map, src, evsel->idx,
addr, sample) : 0;
}
static int symbol__account_cycles(u64 addr, u64 start,

View File

@ -74,6 +74,8 @@ enum itrace_period_type {
* @period_type: 'instructions' events period type
* @initial_skip: skip N events at the beginning.
* @cpu_bitmap: CPUs for which to synthesize events, or NULL for all
* @ptime_range: time intervals to trace or NULL
* @range_num: number of time intervals to trace
*/
struct itrace_synth_opts {
bool set;
@ -98,6 +100,8 @@ struct itrace_synth_opts {
enum itrace_period_type period_type;
unsigned long initial_skip;
unsigned long *cpu_bitmap;
struct perf_time_interval *ptime_range;
int range_num;
};
/**
@ -590,6 +594,21 @@ static inline void auxtrace__free(struct perf_session *session)
" PERIOD[ns|us|ms|i|t]: specify period to sample stream\n" \
" concatenate multiple options. Default is ibxwpe or cewp\n"
static inline
void itrace_synth_opts__set_time_range(struct itrace_synth_opts *opts,
struct perf_time_interval *ptime_range,
int range_num)
{
opts->ptime_range = ptime_range;
opts->range_num = range_num;
}
static inline
void itrace_synth_opts__clear_time_range(struct itrace_synth_opts *opts)
{
opts->ptime_range = NULL;
opts->range_num = 0;
}
#else
@ -733,6 +752,21 @@ void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
#define ITRACE_HELP ""
static inline
void itrace_synth_opts__set_time_range(struct itrace_synth_opts *opts
__maybe_unused,
struct perf_time_interval *ptime_range
__maybe_unused,
int range_num __maybe_unused)
{
}
static inline
void itrace_synth_opts__clear_time_range(struct itrace_synth_opts *opts
__maybe_unused)
{
}
#endif
#endif

View File

@ -739,11 +739,15 @@ int perf_config(config_fn_t fn, void *data)
if (ret < 0) {
pr_err("Error: wrong config key-value pair %s=%s\n",
key, value);
break;
/*
* Can't be just a 'break', as perf_config_set__for_each_entry()
* expands to two nested for() loops.
*/
goto out;
}
}
}
out:
return ret;
}

View File

@ -373,6 +373,46 @@ int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res,
return 0;
}
int cpu_map__get_die_id(int cpu)
{
int value, ret = cpu__get_topology_int(cpu, "die_id", &value);
return ret ?: value;
}
int cpu_map__get_die(struct cpu_map *map, int idx, void *data)
{
int cpu, die_id, s;
if (idx > map->nr)
return -1;
cpu = map->map[idx];
die_id = cpu_map__get_die_id(cpu);
/* There is no die_id on legacy system. */
if (die_id == -1)
die_id = 0;
s = cpu_map__get_socket(map, idx, data);
if (s == -1)
return -1;
/*
* Encode socket in bit range 15:8
* die_id is relative to socket, and
* we need a global id. So we combine
* socket + die id
*/
if (WARN_ONCE(die_id >> 8, "The die id number is too big.\n"))
return -1;
if (WARN_ONCE(s >> 8, "The socket id number is too big.\n"))
return -1;
return (s << 8) | (die_id & 0xff);
}
int cpu_map__get_core_id(int cpu)
{
int value, ret = cpu__get_topology_int(cpu, "core_id", &value);
@ -381,7 +421,7 @@ int cpu_map__get_core_id(int cpu)
int cpu_map__get_core(struct cpu_map *map, int idx, void *data)
{
int cpu, s;
int cpu, s_die;
if (idx > map->nr)
return -1;
@ -390,17 +430,22 @@ int cpu_map__get_core(struct cpu_map *map, int idx, void *data)
cpu = cpu_map__get_core_id(cpu);
s = cpu_map__get_socket(map, idx, data);
if (s == -1)
/* s_die is the combination of socket + die id */
s_die = cpu_map__get_die(map, idx, data);
if (s_die == -1)
return -1;
/*
* encode socket in upper 16 bits
* core_id is relative to socket, and
* encode socket in bit range 31:24
* encode die id in bit range 23:16
* core_id is relative to socket and die,
* we need a global id. So we combine
* socket+ core id
* socket + die id + core id
*/
return (s << 16) | (cpu & 0xffff);
if (WARN_ONCE(cpu >> 16, "The core id number is too big.\n"))
return -1;
return (s_die << 16) | (cpu & 0xffff);
}
int cpu_map__build_socket_map(struct cpu_map *cpus, struct cpu_map **sockp)
@ -408,6 +453,11 @@ int cpu_map__build_socket_map(struct cpu_map *cpus, struct cpu_map **sockp)
return cpu_map__build_map(cpus, sockp, cpu_map__get_socket, NULL);
}
int cpu_map__build_die_map(struct cpu_map *cpus, struct cpu_map **diep)
{
return cpu_map__build_map(cpus, diep, cpu_map__get_die, NULL);
}
int cpu_map__build_core_map(struct cpu_map *cpus, struct cpu_map **corep)
{
return cpu_map__build_map(cpus, corep, cpu_map__get_core, NULL);

View File

@ -25,9 +25,12 @@ size_t cpu_map__snprint_mask(struct cpu_map *map, char *buf, size_t size);
size_t cpu_map__fprintf(struct cpu_map *map, FILE *fp);
int cpu_map__get_socket_id(int cpu);
int cpu_map__get_socket(struct cpu_map *map, int idx, void *data);
int cpu_map__get_die_id(int cpu);
int cpu_map__get_die(struct cpu_map *map, int idx, void *data);
int cpu_map__get_core_id(int cpu);
int cpu_map__get_core(struct cpu_map *map, int idx, void *data);
int cpu_map__build_socket_map(struct cpu_map *cpus, struct cpu_map **sockp);
int cpu_map__build_die_map(struct cpu_map *cpus, struct cpu_map **diep);
int cpu_map__build_core_map(struct cpu_map *cpus, struct cpu_map **corep);
const struct cpu_map *cpu_map__online(void); /* thread unsafe */
@ -43,7 +46,12 @@ static inline int cpu_map__socket(struct cpu_map *sock, int s)
static inline int cpu_map__id_to_socket(int id)
{
return id >> 16;
return id >> 24;
}
static inline int cpu_map__id_to_die(int id)
{
return (id >> 16) & 0xff;
}
static inline int cpu_map__id_to_cpu(int id)

View File

@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <sys/param.h>
#include <sys/utsname.h>
#include <inttypes.h>
#include <api/fs/fs.h>
@ -8,11 +9,14 @@
#include "util.h"
#include "env.h"
#define CORE_SIB_FMT \
"%s/devices/system/cpu/cpu%d/topology/core_siblings_list"
#define DIE_SIB_FMT \
"%s/devices/system/cpu/cpu%d/topology/die_cpus_list"
#define THRD_SIB_FMT \
"%s/devices/system/cpu/cpu%d/topology/thread_siblings_list"
#define THRD_SIB_FMT_NEW \
"%s/devices/system/cpu/cpu%d/topology/core_cpus_list"
#define NODE_ONLINE_FMT \
"%s/devices/system/node/online"
#define NODE_MEMINFO_FMT \
@ -34,12 +38,12 @@ static int build_cpu_topology(struct cpu_topology *tp, int cpu)
sysfs__mountpoint(), cpu);
fp = fopen(filename, "r");
if (!fp)
goto try_threads;
goto try_dies;
sret = getline(&buf, &len, fp);
fclose(fp);
if (sret <= 0)
goto try_threads;
goto try_dies;
p = strchr(buf, '\n');
if (p)
@ -57,10 +61,45 @@ static int build_cpu_topology(struct cpu_topology *tp, int cpu)
}
ret = 0;
try_threads:
scnprintf(filename, MAXPATHLEN, THRD_SIB_FMT,
try_dies:
if (!tp->die_siblings)
goto try_threads;
scnprintf(filename, MAXPATHLEN, DIE_SIB_FMT,
sysfs__mountpoint(), cpu);
fp = fopen(filename, "r");
if (!fp)
goto try_threads;
sret = getline(&buf, &len, fp);
fclose(fp);
if (sret <= 0)
goto try_threads;
p = strchr(buf, '\n');
if (p)
*p = '\0';
for (i = 0; i < tp->die_sib; i++) {
if (!strcmp(buf, tp->die_siblings[i]))
break;
}
if (i == tp->die_sib) {
tp->die_siblings[i] = buf;
tp->die_sib++;
buf = NULL;
len = 0;
}
ret = 0;
try_threads:
scnprintf(filename, MAXPATHLEN, THRD_SIB_FMT_NEW,
sysfs__mountpoint(), cpu);
if (access(filename, F_OK) == -1) {
scnprintf(filename, MAXPATHLEN, THRD_SIB_FMT,
sysfs__mountpoint(), cpu);
}
fp = fopen(filename, "r");
if (!fp)
goto done;
@ -98,21 +137,46 @@ void cpu_topology__delete(struct cpu_topology *tp)
for (i = 0 ; i < tp->core_sib; i++)
zfree(&tp->core_siblings[i]);
if (tp->die_sib) {
for (i = 0 ; i < tp->die_sib; i++)
zfree(&tp->die_siblings[i]);
}
for (i = 0 ; i < tp->thread_sib; i++)
zfree(&tp->thread_siblings[i]);
free(tp);
}
static bool has_die_topology(void)
{
char filename[MAXPATHLEN];
struct utsname uts;
if (uname(&uts) < 0)
return false;
if (strncmp(uts.machine, "x86_64", 6))
return false;
scnprintf(filename, MAXPATHLEN, DIE_SIB_FMT,
sysfs__mountpoint(), 0);
if (access(filename, F_OK) == -1)
return false;
return true;
}
struct cpu_topology *cpu_topology__new(void)
{
struct cpu_topology *tp = NULL;
void *addr;
u32 nr, i;
u32 nr, i, nr_addr;
size_t sz;
long ncpus;
int ret = -1;
struct cpu_map *map;
bool has_die = has_die_topology();
ncpus = cpu__max_present_cpu();
@ -126,7 +190,11 @@ struct cpu_topology *cpu_topology__new(void)
nr = (u32)(ncpus & UINT_MAX);
sz = nr * sizeof(char *);
addr = calloc(1, sizeof(*tp) + 2 * sz);
if (has_die)
nr_addr = 3;
else
nr_addr = 2;
addr = calloc(1, sizeof(*tp) + nr_addr * sz);
if (!addr)
goto out_free;
@ -134,6 +202,10 @@ struct cpu_topology *cpu_topology__new(void)
addr += sizeof(*tp);
tp->core_siblings = addr;
addr += sz;
if (has_die) {
tp->die_siblings = addr;
addr += sz;
}
tp->thread_siblings = addr;
for (i = 0; i < nr; i++) {

View File

@ -7,8 +7,10 @@
struct cpu_topology {
u32 core_sib;
u32 die_sib;
u32 thread_sib;
char **core_siblings;
char **die_siblings;
char **thread_siblings;
};

View File

@ -18,8 +18,6 @@
#include "intlist.h"
#include "util.h"
#define MAX_BUFFER 1024
/* use raw logging */
#ifdef CS_DEBUG_RAW
#define CS_LOG_RAW_FRAMES
@ -31,33 +29,26 @@
#endif
#endif
#define CS_ETM_INVAL_ADDR 0xdeadbeefdeadbeefUL
struct cs_etm_decoder {
void *data;
void (*packet_printer)(const char *msg);
dcd_tree_handle_t dcd_tree;
cs_etm_mem_cb_type mem_access;
ocsd_datapath_resp_t prev_return;
u32 packet_count;
u32 head;
u32 tail;
struct cs_etm_packet packet_buffer[MAX_BUFFER];
};
static u32
cs_etm_decoder__mem_access(const void *context,
const ocsd_vaddr_t address,
const ocsd_mem_space_acc_t mem_space __maybe_unused,
const u8 trace_chan_id,
const u32 req_size,
u8 *buffer)
{
struct cs_etm_decoder *decoder = (struct cs_etm_decoder *) context;
return decoder->mem_access(decoder->data,
address,
req_size,
buffer);
return decoder->mem_access(decoder->data, trace_chan_id,
address, req_size, buffer);
}
int cs_etm_decoder__add_mem_access_cb(struct cs_etm_decoder *decoder,
@ -66,9 +57,10 @@ int cs_etm_decoder__add_mem_access_cb(struct cs_etm_decoder *decoder,
{
decoder->mem_access = cb_func;
if (ocsd_dt_add_callback_mem_acc(decoder->dcd_tree, start, end,
OCSD_MEM_SPACE_ANY,
cs_etm_decoder__mem_access, decoder))
if (ocsd_dt_add_callback_trcid_mem_acc(decoder->dcd_tree, start, end,
OCSD_MEM_SPACE_ANY,
cs_etm_decoder__mem_access,
decoder))
return -1;
return 0;
@ -88,14 +80,14 @@ int cs_etm_decoder__reset(struct cs_etm_decoder *decoder)
return 0;
}
int cs_etm_decoder__get_packet(struct cs_etm_decoder *decoder,
int cs_etm_decoder__get_packet(struct cs_etm_packet_queue *packet_queue,
struct cs_etm_packet *packet)
{
if (!decoder || !packet)
if (!packet_queue || !packet)
return -EINVAL;
/* Nothing to do, might as well just return */
if (decoder->packet_count == 0)
if (packet_queue->packet_count == 0)
return 0;
/*
* The queueing process in function cs_etm_decoder__buffer_packet()
@ -106,11 +98,12 @@ int cs_etm_decoder__get_packet(struct cs_etm_decoder *decoder,
* value. Otherwise the first element of the packet queue is not
* used.
*/
decoder->head = (decoder->head + 1) & (MAX_BUFFER - 1);
packet_queue->head = (packet_queue->head + 1) &
(CS_ETM_PACKET_MAX_BUFFER - 1);
*packet = decoder->packet_buffer[decoder->head];
*packet = packet_queue->packet_buffer[packet_queue->head];
decoder->packet_count--;
packet_queue->packet_count--;
return 1;
}
@ -276,84 +269,130 @@ cs_etm_decoder__create_etm_packet_printer(struct cs_etm_trace_params *t_params,
trace_config);
}
static void cs_etm_decoder__clear_buffer(struct cs_etm_decoder *decoder)
static ocsd_datapath_resp_t
cs_etm_decoder__do_soft_timestamp(struct cs_etm_queue *etmq,
struct cs_etm_packet_queue *packet_queue,
const uint8_t trace_chan_id)
{
int i;
/* No timestamp packet has been received, nothing to do */
if (!packet_queue->timestamp)
return OCSD_RESP_CONT;
decoder->head = 0;
decoder->tail = 0;
decoder->packet_count = 0;
for (i = 0; i < MAX_BUFFER; i++) {
decoder->packet_buffer[i].isa = CS_ETM_ISA_UNKNOWN;
decoder->packet_buffer[i].start_addr = CS_ETM_INVAL_ADDR;
decoder->packet_buffer[i].end_addr = CS_ETM_INVAL_ADDR;
decoder->packet_buffer[i].instr_count = 0;
decoder->packet_buffer[i].last_instr_taken_branch = false;
decoder->packet_buffer[i].last_instr_size = 0;
decoder->packet_buffer[i].last_instr_type = 0;
decoder->packet_buffer[i].last_instr_subtype = 0;
decoder->packet_buffer[i].last_instr_cond = 0;
decoder->packet_buffer[i].flags = 0;
decoder->packet_buffer[i].exception_number = UINT32_MAX;
decoder->packet_buffer[i].trace_chan_id = UINT8_MAX;
decoder->packet_buffer[i].cpu = INT_MIN;
}
packet_queue->timestamp = packet_queue->next_timestamp;
/* Estimate the timestamp for the next range packet */
packet_queue->next_timestamp += packet_queue->instr_count;
packet_queue->instr_count = 0;
/* Tell the front end which traceid_queue needs attention */
cs_etm__etmq_set_traceid_queue_timestamp(etmq, trace_chan_id);
return OCSD_RESP_WAIT;
}
static ocsd_datapath_resp_t
cs_etm_decoder__buffer_packet(struct cs_etm_decoder *decoder,
cs_etm_decoder__do_hard_timestamp(struct cs_etm_queue *etmq,
const ocsd_generic_trace_elem *elem,
const uint8_t trace_chan_id)
{
struct cs_etm_packet_queue *packet_queue;
/* First get the packet queue for this traceID */
packet_queue = cs_etm__etmq_get_packet_queue(etmq, trace_chan_id);
if (!packet_queue)
return OCSD_RESP_FATAL_SYS_ERR;
/*
* We've seen a timestamp packet before - simply record the new value.
* Function do_soft_timestamp() will report the value to the front end,
* hence asking the decoder to keep decoding rather than stopping.
*/
if (packet_queue->timestamp) {
packet_queue->next_timestamp = elem->timestamp;
return OCSD_RESP_CONT;
}
/*
* This is the first timestamp we've seen since the beginning of traces
* or a discontinuity. Since timestamps packets are generated *after*
* range packets have been generated, we need to estimate the time at
* which instructions started by substracting the number of instructions
* executed to the timestamp.
*/
packet_queue->timestamp = elem->timestamp - packet_queue->instr_count;
packet_queue->next_timestamp = elem->timestamp;
packet_queue->instr_count = 0;
/* Tell the front end which traceid_queue needs attention */
cs_etm__etmq_set_traceid_queue_timestamp(etmq, trace_chan_id);
/* Halt processing until we are being told to proceed */
return OCSD_RESP_WAIT;
}
static void
cs_etm_decoder__reset_timestamp(struct cs_etm_packet_queue *packet_queue)
{
packet_queue->timestamp = 0;
packet_queue->next_timestamp = 0;
packet_queue->instr_count = 0;
}
static ocsd_datapath_resp_t
cs_etm_decoder__buffer_packet(struct cs_etm_packet_queue *packet_queue,
const u8 trace_chan_id,
enum cs_etm_sample_type sample_type)
{
u32 et = 0;
int cpu;
if (decoder->packet_count >= MAX_BUFFER - 1)
if (packet_queue->packet_count >= CS_ETM_PACKET_MAX_BUFFER - 1)
return OCSD_RESP_FATAL_SYS_ERR;
if (cs_etm__get_cpu(trace_chan_id, &cpu) < 0)
return OCSD_RESP_FATAL_SYS_ERR;
et = decoder->tail;
et = (et + 1) & (MAX_BUFFER - 1);
decoder->tail = et;
decoder->packet_count++;
et = packet_queue->tail;
et = (et + 1) & (CS_ETM_PACKET_MAX_BUFFER - 1);
packet_queue->tail = et;
packet_queue->packet_count++;
decoder->packet_buffer[et].sample_type = sample_type;
decoder->packet_buffer[et].isa = CS_ETM_ISA_UNKNOWN;
decoder->packet_buffer[et].cpu = cpu;
decoder->packet_buffer[et].start_addr = CS_ETM_INVAL_ADDR;
decoder->packet_buffer[et].end_addr = CS_ETM_INVAL_ADDR;
decoder->packet_buffer[et].instr_count = 0;
decoder->packet_buffer[et].last_instr_taken_branch = false;
decoder->packet_buffer[et].last_instr_size = 0;
decoder->packet_buffer[et].last_instr_type = 0;
decoder->packet_buffer[et].last_instr_subtype = 0;
decoder->packet_buffer[et].last_instr_cond = 0;
decoder->packet_buffer[et].flags = 0;
decoder->packet_buffer[et].exception_number = UINT32_MAX;
decoder->packet_buffer[et].trace_chan_id = trace_chan_id;
packet_queue->packet_buffer[et].sample_type = sample_type;
packet_queue->packet_buffer[et].isa = CS_ETM_ISA_UNKNOWN;
packet_queue->packet_buffer[et].cpu = cpu;
packet_queue->packet_buffer[et].start_addr = CS_ETM_INVAL_ADDR;
packet_queue->packet_buffer[et].end_addr = CS_ETM_INVAL_ADDR;
packet_queue->packet_buffer[et].instr_count = 0;
packet_queue->packet_buffer[et].last_instr_taken_branch = false;
packet_queue->packet_buffer[et].last_instr_size = 0;
packet_queue->packet_buffer[et].last_instr_type = 0;
packet_queue->packet_buffer[et].last_instr_subtype = 0;
packet_queue->packet_buffer[et].last_instr_cond = 0;
packet_queue->packet_buffer[et].flags = 0;
packet_queue->packet_buffer[et].exception_number = UINT32_MAX;
packet_queue->packet_buffer[et].trace_chan_id = trace_chan_id;
if (decoder->packet_count == MAX_BUFFER - 1)
if (packet_queue->packet_count == CS_ETM_PACKET_MAX_BUFFER - 1)
return OCSD_RESP_WAIT;
return OCSD_RESP_CONT;
}
static ocsd_datapath_resp_t
cs_etm_decoder__buffer_range(struct cs_etm_decoder *decoder,
cs_etm_decoder__buffer_range(struct cs_etm_queue *etmq,
struct cs_etm_packet_queue *packet_queue,
const ocsd_generic_trace_elem *elem,
const uint8_t trace_chan_id)
{
int ret = 0;
struct cs_etm_packet *packet;
ret = cs_etm_decoder__buffer_packet(decoder, trace_chan_id,
ret = cs_etm_decoder__buffer_packet(packet_queue, trace_chan_id,
CS_ETM_RANGE);
if (ret != OCSD_RESP_CONT && ret != OCSD_RESP_WAIT)
return ret;
packet = &decoder->packet_buffer[decoder->tail];
packet = &packet_queue->packet_buffer[packet_queue->tail];
switch (elem->isa) {
case ocsd_isa_aarch64:
@ -396,43 +435,90 @@ cs_etm_decoder__buffer_range(struct cs_etm_decoder *decoder,
packet->last_instr_size = elem->last_instr_sz;
/* per-thread scenario, no need to generate a timestamp */
if (cs_etm__etmq_is_timeless(etmq))
goto out;
/*
* The packet queue is full and we haven't seen a timestamp (had we
* seen one the packet queue wouldn't be full). Let the front end
* deal with it.
*/
if (ret == OCSD_RESP_WAIT)
goto out;
packet_queue->instr_count += elem->num_instr_range;
/* Tell the front end we have a new timestamp to process */
ret = cs_etm_decoder__do_soft_timestamp(etmq, packet_queue,
trace_chan_id);
out:
return ret;
}
static ocsd_datapath_resp_t
cs_etm_decoder__buffer_discontinuity(struct cs_etm_decoder *decoder,
const uint8_t trace_chan_id)
cs_etm_decoder__buffer_discontinuity(struct cs_etm_packet_queue *queue,
const uint8_t trace_chan_id)
{
return cs_etm_decoder__buffer_packet(decoder, trace_chan_id,
/*
* Something happened and who knows when we'll get new traces so
* reset time statistics.
*/
cs_etm_decoder__reset_timestamp(queue);
return cs_etm_decoder__buffer_packet(queue, trace_chan_id,
CS_ETM_DISCONTINUITY);
}
static ocsd_datapath_resp_t
cs_etm_decoder__buffer_exception(struct cs_etm_decoder *decoder,
cs_etm_decoder__buffer_exception(struct cs_etm_packet_queue *queue,
const ocsd_generic_trace_elem *elem,
const uint8_t trace_chan_id)
{ int ret = 0;
struct cs_etm_packet *packet;
ret = cs_etm_decoder__buffer_packet(decoder, trace_chan_id,
ret = cs_etm_decoder__buffer_packet(queue, trace_chan_id,
CS_ETM_EXCEPTION);
if (ret != OCSD_RESP_CONT && ret != OCSD_RESP_WAIT)
return ret;
packet = &decoder->packet_buffer[decoder->tail];
packet = &queue->packet_buffer[queue->tail];
packet->exception_number = elem->exception_number;
return ret;
}
static ocsd_datapath_resp_t
cs_etm_decoder__buffer_exception_ret(struct cs_etm_decoder *decoder,
cs_etm_decoder__buffer_exception_ret(struct cs_etm_packet_queue *queue,
const uint8_t trace_chan_id)
{
return cs_etm_decoder__buffer_packet(decoder, trace_chan_id,
return cs_etm_decoder__buffer_packet(queue, trace_chan_id,
CS_ETM_EXCEPTION_RET);
}
static ocsd_datapath_resp_t
cs_etm_decoder__set_tid(struct cs_etm_queue *etmq,
struct cs_etm_packet_queue *packet_queue,
const ocsd_generic_trace_elem *elem,
const uint8_t trace_chan_id)
{
pid_t tid;
/* Ignore PE_CONTEXT packets that don't have a valid contextID */
if (!elem->context.ctxt_id_valid)
return OCSD_RESP_CONT;
tid = elem->context.context_id;
if (cs_etm__etmq_set_tid(etmq, tid, trace_chan_id))
return OCSD_RESP_FATAL_SYS_ERR;
/*
* A timestamp is generated after a PE_CONTEXT element so make sure
* to rely on that coming one.
*/
cs_etm_decoder__reset_timestamp(packet_queue);
return OCSD_RESP_CONT;
}
static ocsd_datapath_resp_t cs_etm_decoder__gen_trace_elem_printer(
const void *context,
const ocsd_trc_index_t indx __maybe_unused,
@ -441,6 +527,13 @@ static ocsd_datapath_resp_t cs_etm_decoder__gen_trace_elem_printer(
{
ocsd_datapath_resp_t resp = OCSD_RESP_CONT;
struct cs_etm_decoder *decoder = (struct cs_etm_decoder *) context;
struct cs_etm_queue *etmq = decoder->data;
struct cs_etm_packet_queue *packet_queue;
/* First get the packet queue for this traceID */
packet_queue = cs_etm__etmq_get_packet_queue(etmq, trace_chan_id);
if (!packet_queue)
return OCSD_RESP_FATAL_SYS_ERR;
switch (elem->elem_type) {
case OCSD_GEN_TRC_ELEM_UNKNOWN:
@ -448,24 +541,30 @@ static ocsd_datapath_resp_t cs_etm_decoder__gen_trace_elem_printer(
case OCSD_GEN_TRC_ELEM_EO_TRACE:
case OCSD_GEN_TRC_ELEM_NO_SYNC:
case OCSD_GEN_TRC_ELEM_TRACE_ON:
resp = cs_etm_decoder__buffer_discontinuity(decoder,
resp = cs_etm_decoder__buffer_discontinuity(packet_queue,
trace_chan_id);
break;
case OCSD_GEN_TRC_ELEM_INSTR_RANGE:
resp = cs_etm_decoder__buffer_range(decoder, elem,
resp = cs_etm_decoder__buffer_range(etmq, packet_queue, elem,
trace_chan_id);
break;
case OCSD_GEN_TRC_ELEM_EXCEPTION:
resp = cs_etm_decoder__buffer_exception(decoder, elem,
resp = cs_etm_decoder__buffer_exception(packet_queue, elem,
trace_chan_id);
break;
case OCSD_GEN_TRC_ELEM_EXCEPTION_RET:
resp = cs_etm_decoder__buffer_exception_ret(decoder,
resp = cs_etm_decoder__buffer_exception_ret(packet_queue,
trace_chan_id);
break;
case OCSD_GEN_TRC_ELEM_PE_CONTEXT:
case OCSD_GEN_TRC_ELEM_ADDR_NACC:
case OCSD_GEN_TRC_ELEM_TIMESTAMP:
resp = cs_etm_decoder__do_hard_timestamp(etmq, elem,
trace_chan_id);
break;
case OCSD_GEN_TRC_ELEM_PE_CONTEXT:
resp = cs_etm_decoder__set_tid(etmq, packet_queue,
elem, trace_chan_id);
break;
case OCSD_GEN_TRC_ELEM_ADDR_NACC:
case OCSD_GEN_TRC_ELEM_CYCLE_COUNT:
case OCSD_GEN_TRC_ELEM_ADDR_UNKNOWN:
case OCSD_GEN_TRC_ELEM_EVENT:
@ -554,7 +653,6 @@ cs_etm_decoder__new(int num_cpu, struct cs_etm_decoder_params *d_params,
decoder->data = d_params->data;
decoder->prev_return = OCSD_RESP_CONT;
cs_etm_decoder__clear_buffer(decoder);
format = (d_params->formatted ? OCSD_TRC_SRC_FRAME_FORMATTED :
OCSD_TRC_SRC_SINGLE);
flags = 0;
@ -577,7 +675,7 @@ cs_etm_decoder__new(int num_cpu, struct cs_etm_decoder_params *d_params,
/* init library print logging support */
ret = cs_etm_decoder__init_def_logger_printing(d_params, decoder);
if (ret != 0)
goto err_free_decoder_tree;
goto err_free_decoder;
/* init raw frame logging if required */
cs_etm_decoder__init_raw_frame_logging(d_params, decoder);
@ -587,15 +685,13 @@ cs_etm_decoder__new(int num_cpu, struct cs_etm_decoder_params *d_params,
&t_params[i],
decoder);
if (ret != 0)
goto err_free_decoder_tree;
goto err_free_decoder;
}
return decoder;
err_free_decoder_tree:
ocsd_destroy_dcd_tree(decoder->dcd_tree);
err_free_decoder:
free(decoder);
cs_etm_decoder__free(decoder);
return NULL;
}

View File

@ -14,43 +14,12 @@
#include <stdio.h>
struct cs_etm_decoder;
enum cs_etm_sample_type {
CS_ETM_EMPTY,
CS_ETM_RANGE,
CS_ETM_DISCONTINUITY,
CS_ETM_EXCEPTION,
CS_ETM_EXCEPTION_RET,
};
enum cs_etm_isa {
CS_ETM_ISA_UNKNOWN,
CS_ETM_ISA_A64,
CS_ETM_ISA_A32,
CS_ETM_ISA_T32,
};
struct cs_etm_packet {
enum cs_etm_sample_type sample_type;
enum cs_etm_isa isa;
u64 start_addr;
u64 end_addr;
u32 instr_count;
u32 last_instr_type;
u32 last_instr_subtype;
u32 flags;
u32 exception_number;
u8 last_instr_cond;
u8 last_instr_taken_branch;
u8 last_instr_size;
u8 trace_chan_id;
int cpu;
};
struct cs_etm_packet;
struct cs_etm_packet_queue;
struct cs_etm_queue;
typedef u32 (*cs_etm_mem_cb_type)(struct cs_etm_queue *, u64,
size_t, u8 *);
typedef u32 (*cs_etm_mem_cb_type)(struct cs_etm_queue *, u8, u64, size_t, u8 *);
struct cs_etmv3_trace_params {
u32 reg_ctrl;
@ -119,7 +88,7 @@ int cs_etm_decoder__add_mem_access_cb(struct cs_etm_decoder *decoder,
u64 start, u64 end,
cs_etm_mem_cb_type cb_func);
int cs_etm_decoder__get_packet(struct cs_etm_decoder *decoder,
int cs_etm_decoder__get_packet(struct cs_etm_packet_queue *packet_queue,
struct cs_etm_packet *packet);
int cs_etm_decoder__reset(struct cs_etm_decoder *decoder);

File diff suppressed because it is too large Load Diff

View File

@ -9,6 +9,7 @@
#include "util/event.h"
#include "util/session.h"
#include <linux/bits.h>
/* Versionning header in case things need tro change in the future. That way
* decoding of old snapshot is still possible.
@ -97,12 +98,72 @@ enum {
CS_ETMV4_EXC_END = 31,
};
enum cs_etm_sample_type {
CS_ETM_EMPTY,
CS_ETM_RANGE,
CS_ETM_DISCONTINUITY,
CS_ETM_EXCEPTION,
CS_ETM_EXCEPTION_RET,
};
enum cs_etm_isa {
CS_ETM_ISA_UNKNOWN,
CS_ETM_ISA_A64,
CS_ETM_ISA_A32,
CS_ETM_ISA_T32,
};
/* RB tree for quick conversion between traceID and metadata pointers */
struct intlist *traceid_list;
struct cs_etm_queue;
struct cs_etm_packet {
enum cs_etm_sample_type sample_type;
enum cs_etm_isa isa;
u64 start_addr;
u64 end_addr;
u32 instr_count;
u32 last_instr_type;
u32 last_instr_subtype;
u32 flags;
u32 exception_number;
u8 last_instr_cond;
u8 last_instr_taken_branch;
u8 last_instr_size;
u8 trace_chan_id;
int cpu;
};
#define CS_ETM_PACKET_MAX_BUFFER 1024
/*
* When working with per-thread scenarios the process under trace can
* be scheduled on any CPU and as such, more than one traceID may be
* associated with the same process. Since a traceID of '0' is illegal
* as per the CoreSight architecture, use that specific value to
* identify the queue where all packets (with any traceID) are
* aggregated.
*/
#define CS_ETM_PER_THREAD_TRACEID 0
struct cs_etm_packet_queue {
u32 packet_count;
u32 head;
u32 tail;
u32 instr_count;
u64 timestamp;
u64 next_timestamp;
struct cs_etm_packet packet_buffer[CS_ETM_PACKET_MAX_BUFFER];
};
#define KiB(x) ((x) * 1024)
#define MiB(x) ((x) * 1024 * 1024)
#define CS_ETM_INVAL_ADDR 0xdeadbeefdeadbeefUL
#define BMVAL(val, lsb, msb) ((val & GENMASK(msb, lsb)) >> lsb)
#define CS_ETM_HEADER_SIZE (CS_HEADER_VERSION_0_MAX * sizeof(u64))
#define __perf_cs_etmv3_magic 0x3030303030303030ULL
@ -114,6 +175,13 @@ struct intlist *traceid_list;
int cs_etm__process_auxtrace_info(union perf_event *event,
struct perf_session *session);
int cs_etm__get_cpu(u8 trace_chan_id, int *cpu);
int cs_etm__etmq_set_tid(struct cs_etm_queue *etmq,
pid_t tid, u8 trace_chan_id);
bool cs_etm__etmq_is_timeless(struct cs_etm_queue *etmq);
void cs_etm__etmq_set_traceid_queue_timestamp(struct cs_etm_queue *etmq,
u8 trace_chan_id);
struct cs_etm_packet_queue
*cs_etm__etmq_get_packet_queue(struct cs_etm_queue *etmq, u8 trace_chan_id);
#else
static inline int
cs_etm__process_auxtrace_info(union perf_event *event __maybe_unused,
@ -127,6 +195,32 @@ static inline int cs_etm__get_cpu(u8 trace_chan_id __maybe_unused,
{
return -1;
}
static inline int cs_etm__etmq_set_tid(
struct cs_etm_queue *etmq __maybe_unused,
pid_t tid __maybe_unused,
u8 trace_chan_id __maybe_unused)
{
return -1;
}
static inline bool cs_etm__etmq_is_timeless(
struct cs_etm_queue *etmq __maybe_unused)
{
/* What else to return? */
return true;
}
static inline void cs_etm__etmq_set_traceid_queue_timestamp(
struct cs_etm_queue *etmq __maybe_unused,
u8 trace_chan_id __maybe_unused) {}
static inline struct cs_etm_packet_queue *cs_etm__etmq_get_packet_queue(
struct cs_etm_queue *etmq __maybe_unused,
u8 trace_chan_id __maybe_unused)
{
return NULL;
}
#endif
#endif

View File

@ -246,6 +246,7 @@ int perf_env__read_cpu_topology_map(struct perf_env *env)
for (cpu = 0; cpu < nr_cpus; ++cpu) {
env->cpu[cpu].core_id = cpu_map__get_core_id(cpu);
env->cpu[cpu].socket_id = cpu_map__get_socket_id(cpu);
env->cpu[cpu].die_id = cpu_map__get_die_id(cpu);
}
env->nr_cpus_avail = nr_cpus;

View File

@ -9,6 +9,7 @@
struct cpu_topology_map {
int socket_id;
int die_id;
int core_id;
};
@ -49,6 +50,7 @@ struct perf_env {
int nr_cmdline;
int nr_sibling_cores;
int nr_sibling_dies;
int nr_sibling_threads;
int nr_numa_nodes;
int nr_memory_nodes;
@ -57,6 +59,7 @@ struct perf_env {
char *cmdline;
const char **cmdline_argv;
char *sibling_cores;
char *sibling_dies;
char *sibling_threads;
char *pmu_mappings;
struct cpu_topology_map *cpu;

View File

@ -204,6 +204,8 @@ struct perf_sample {
u64 period;
u64 weight;
u64 transaction;
u64 insn_cnt;
u64 cyc_cnt;
u32 cpu;
u32 raw_size;
u64 data_src;

View File

@ -679,6 +679,10 @@ static void __perf_evsel__config_callchain(struct perf_evsel *evsel,
attr->sample_max_stack = param->max_stack;
if (opts->kernel_callchains)
attr->exclude_callchain_user = 1;
if (opts->user_callchains)
attr->exclude_callchain_kernel = 1;
if (param->record_mode == CALLCHAIN_LBR) {
if (!opts->branch_stack) {
if (attr->exclude_user) {
@ -701,7 +705,14 @@ static void __perf_evsel__config_callchain(struct perf_evsel *evsel,
if (!function) {
perf_evsel__set_sample_bit(evsel, REGS_USER);
perf_evsel__set_sample_bit(evsel, STACK_USER);
attr->sample_regs_user |= PERF_REGS_MASK;
if (opts->sample_user_regs && DWARF_MINIMAL_REGS != PERF_REGS_MASK) {
attr->sample_regs_user |= DWARF_MINIMAL_REGS;
pr_warning("WARNING: The use of --call-graph=dwarf may require all the user registers, "
"specifying a subset with --user-regs may render DWARF unwinding unreliable, "
"so the minimal registers set (IP, SP) is explicitly forced.\n");
} else {
attr->sample_regs_user |= PERF_REGS_MASK;
}
attr->sample_stack_user = param->dump_size;
attr->exclude_callchain_user = 1;
} else {
@ -1136,9 +1147,6 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
{
if (evsel->system_wide)
nthreads = 1;
evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
if (evsel->fd) {

View File

@ -599,6 +599,27 @@ static int write_cpu_topology(struct feat_fd *ff,
if (ret < 0)
return ret;
}
if (!tp->die_sib)
goto done;
ret = do_write(ff, &tp->die_sib, sizeof(tp->die_sib));
if (ret < 0)
goto done;
for (i = 0; i < tp->die_sib; i++) {
ret = do_write_string(ff, tp->die_siblings[i]);
if (ret < 0)
goto done;
}
for (j = 0; j < perf_env.nr_cpus_avail; j++) {
ret = do_write(ff, &perf_env.cpu[j].die_id,
sizeof(perf_env.cpu[j].die_id));
if (ret < 0)
return ret;
}
done:
cpu_topology__delete(tp);
return ret;
@ -1439,10 +1460,20 @@ static void print_cpu_topology(struct feat_fd *ff, FILE *fp)
str = ph->env.sibling_cores;
for (i = 0; i < nr; i++) {
fprintf(fp, "# sibling cores : %s\n", str);
fprintf(fp, "# sibling sockets : %s\n", str);
str += strlen(str) + 1;
}
if (ph->env.nr_sibling_dies) {
nr = ph->env.nr_sibling_dies;
str = ph->env.sibling_dies;
for (i = 0; i < nr; i++) {
fprintf(fp, "# sibling dies : %s\n", str);
str += strlen(str) + 1;
}
}
nr = ph->env.nr_sibling_threads;
str = ph->env.sibling_threads;
@ -1451,12 +1482,28 @@ static void print_cpu_topology(struct feat_fd *ff, FILE *fp)
str += strlen(str) + 1;
}
if (ph->env.cpu != NULL) {
for (i = 0; i < cpu_nr; i++)
fprintf(fp, "# CPU %d: Core ID %d, Socket ID %d\n", i,
ph->env.cpu[i].core_id, ph->env.cpu[i].socket_id);
} else
fprintf(fp, "# Core ID and Socket ID information is not available\n");
if (ph->env.nr_sibling_dies) {
if (ph->env.cpu != NULL) {
for (i = 0; i < cpu_nr; i++)
fprintf(fp, "# CPU %d: Core ID %d, "
"Die ID %d, Socket ID %d\n",
i, ph->env.cpu[i].core_id,
ph->env.cpu[i].die_id,
ph->env.cpu[i].socket_id);
} else
fprintf(fp, "# Core ID, Die ID and Socket ID "
"information is not available\n");
} else {
if (ph->env.cpu != NULL) {
for (i = 0; i < cpu_nr; i++)
fprintf(fp, "# CPU %d: Core ID %d, "
"Socket ID %d\n",
i, ph->env.cpu[i].core_id,
ph->env.cpu[i].socket_id);
} else
fprintf(fp, "# Core ID and Socket ID "
"information is not available\n");
}
}
static void print_clockid(struct feat_fd *ff, FILE *fp)
@ -2214,6 +2261,7 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
goto free_cpu;
ph->env.cpu[i].core_id = nr;
size += sizeof(u32);
if (do_read_u32(ff, &nr))
goto free_cpu;
@ -2225,6 +2273,40 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
}
ph->env.cpu[i].socket_id = nr;
size += sizeof(u32);
}
/*
* The header may be from old perf,
* which doesn't include die information.
*/
if (ff->size <= size)
return 0;
if (do_read_u32(ff, &nr))
return -1;
ph->env.nr_sibling_dies = nr;
size += sizeof(u32);
for (i = 0; i < nr; i++) {
str = do_read_string(ff);
if (!str)
goto error;
/* include a NULL character at the end */
if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
goto error;
size += string_size(str);
free(str);
}
ph->env.sibling_dies = strbuf_detach(&sb, NULL);
for (i = 0; i < (u32)cpu_nr; i++) {
if (do_read_u32(ff, &nr))
goto free_cpu;
ph->env.cpu[i].die_id = nr;
}
return 0;

View File

@ -95,6 +95,7 @@ struct intel_pt_decoder {
uint64_t *insn_cnt_ptr, uint64_t *ip, uint64_t to_ip,
uint64_t max_insn_cnt, void *data);
bool (*pgd_ip)(uint64_t ip, void *data);
int (*lookahead)(void *data, intel_pt_lookahead_cb_t cb, void *cb_data);
void *data;
struct intel_pt_state state;
const unsigned char *buf;
@ -107,6 +108,7 @@ struct intel_pt_decoder {
bool have_cyc;
bool fixup_last_mtc;
bool have_last_ip;
bool in_psb;
enum intel_pt_param_flags flags;
uint64_t pos;
uint64_t last_ip;
@ -115,6 +117,7 @@ struct intel_pt_decoder {
uint64_t timestamp;
uint64_t tsc_timestamp;
uint64_t ref_timestamp;
uint64_t buf_timestamp;
uint64_t sample_timestamp;
uint64_t ret_addr;
uint64_t ctc_timestamp;
@ -151,6 +154,11 @@ struct intel_pt_decoder {
uint64_t period_mask;
uint64_t period_ticks;
uint64_t last_masked_timestamp;
uint64_t tot_cyc_cnt;
uint64_t sample_tot_cyc_cnt;
uint64_t base_cyc_cnt;
uint64_t cyc_cnt_timestamp;
double tsc_to_cyc;
bool continuous_period;
bool overflow;
bool set_fup_tx_flags;
@ -158,6 +166,7 @@ struct intel_pt_decoder {
bool set_fup_mwait;
bool set_fup_pwre;
bool set_fup_exstop;
bool sample_cyc;
unsigned int fup_tx_flags;
unsigned int tx_flags;
uint64_t fup_ptw_payload;
@ -217,6 +226,7 @@ struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params)
decoder->get_trace = params->get_trace;
decoder->walk_insn = params->walk_insn;
decoder->pgd_ip = params->pgd_ip;
decoder->lookahead = params->lookahead;
decoder->data = params->data;
decoder->return_compression = params->return_compression;
decoder->branch_enable = params->branch_enable;
@ -470,7 +480,21 @@ static int intel_pt_bad_packet(struct intel_pt_decoder *decoder)
return -EBADMSG;
}
static int intel_pt_get_data(struct intel_pt_decoder *decoder)
static inline void intel_pt_update_sample_time(struct intel_pt_decoder *decoder)
{
decoder->sample_timestamp = decoder->timestamp;
decoder->sample_insn_cnt = decoder->timestamp_insn_cnt;
}
static void intel_pt_reposition(struct intel_pt_decoder *decoder)
{
decoder->ip = 0;
decoder->pkt_state = INTEL_PT_STATE_NO_PSB;
decoder->timestamp = 0;
decoder->have_tma = false;
}
static int intel_pt_get_data(struct intel_pt_decoder *decoder, bool reposition)
{
struct intel_pt_buffer buffer = { .buf = 0, };
int ret;
@ -487,12 +511,10 @@ static int intel_pt_get_data(struct intel_pt_decoder *decoder)
intel_pt_log("No more data\n");
return -ENODATA;
}
if (!buffer.consecutive) {
decoder->ip = 0;
decoder->pkt_state = INTEL_PT_STATE_NO_PSB;
decoder->buf_timestamp = buffer.ref_timestamp;
if (!buffer.consecutive || reposition) {
intel_pt_reposition(decoder);
decoder->ref_timestamp = buffer.ref_timestamp;
decoder->timestamp = 0;
decoder->have_tma = false;
decoder->state.trace_nr = buffer.trace_nr;
intel_pt_log("Reference timestamp 0x%" PRIx64 "\n",
decoder->ref_timestamp);
@ -502,10 +524,11 @@ static int intel_pt_get_data(struct intel_pt_decoder *decoder)
return 0;
}
static int intel_pt_get_next_data(struct intel_pt_decoder *decoder)
static int intel_pt_get_next_data(struct intel_pt_decoder *decoder,
bool reposition)
{
if (!decoder->next_buf)
return intel_pt_get_data(decoder);
return intel_pt_get_data(decoder, reposition);
decoder->buf = decoder->next_buf;
decoder->len = decoder->next_len;
@ -524,7 +547,7 @@ static int intel_pt_get_split_packet(struct intel_pt_decoder *decoder)
len = decoder->len;
memcpy(buf, decoder->buf, len);
ret = intel_pt_get_data(decoder);
ret = intel_pt_get_data(decoder, false);
if (ret) {
decoder->pos += old_len;
return ret < 0 ? ret : -EINVAL;
@ -850,7 +873,7 @@ static int intel_pt_get_next_packet(struct intel_pt_decoder *decoder)
decoder->len -= decoder->pkt_step;
if (!decoder->len) {
ret = intel_pt_get_next_data(decoder);
ret = intel_pt_get_next_data(decoder, false);
if (ret)
return ret;
}
@ -1308,10 +1331,10 @@ static int intel_pt_walk_tnt(struct intel_pt_decoder *decoder)
decoder->ip += intel_pt_insn.length;
return 0;
}
decoder->sample_cyc = false;
decoder->ip += intel_pt_insn.length;
if (!decoder->tnt.count) {
decoder->sample_timestamp = decoder->timestamp;
decoder->sample_insn_cnt = decoder->timestamp_insn_cnt;
intel_pt_update_sample_time(decoder);
return -EAGAIN;
}
decoder->tnt.payload <<= 1;
@ -1345,6 +1368,21 @@ static int intel_pt_mode_tsx(struct intel_pt_decoder *decoder, bool *no_tip)
return 0;
}
static uint64_t intel_pt_8b_tsc(uint64_t timestamp, uint64_t ref_timestamp)
{
timestamp |= (ref_timestamp & (0xffULL << 56));
if (timestamp < ref_timestamp) {
if (ref_timestamp - timestamp > (1ULL << 55))
timestamp += (1ULL << 56);
} else {
if (timestamp - ref_timestamp > (1ULL << 55))
timestamp -= (1ULL << 56);
}
return timestamp;
}
static void intel_pt_calc_tsc_timestamp(struct intel_pt_decoder *decoder)
{
uint64_t timestamp;
@ -1352,15 +1390,8 @@ static void intel_pt_calc_tsc_timestamp(struct intel_pt_decoder *decoder)
decoder->have_tma = false;
if (decoder->ref_timestamp) {
timestamp = decoder->packet.payload |
(decoder->ref_timestamp & (0xffULL << 56));
if (timestamp < decoder->ref_timestamp) {
if (decoder->ref_timestamp - timestamp > (1ULL << 55))
timestamp += (1ULL << 56);
} else {
if (timestamp - decoder->ref_timestamp > (1ULL << 55))
timestamp -= (1ULL << 56);
}
timestamp = intel_pt_8b_tsc(decoder->packet.payload,
decoder->ref_timestamp);
decoder->tsc_timestamp = timestamp;
decoder->timestamp = timestamp;
decoder->ref_timestamp = 0;
@ -1404,6 +1435,42 @@ static int intel_pt_overflow(struct intel_pt_decoder *decoder)
return -EOVERFLOW;
}
static inline void intel_pt_mtc_cyc_cnt_pge(struct intel_pt_decoder *decoder)
{
if (decoder->have_cyc)
return;
decoder->cyc_cnt_timestamp = decoder->timestamp;
decoder->base_cyc_cnt = decoder->tot_cyc_cnt;
}
static inline void intel_pt_mtc_cyc_cnt_cbr(struct intel_pt_decoder *decoder)
{
decoder->tsc_to_cyc = decoder->cbr / decoder->max_non_turbo_ratio_fp;
if (decoder->pge)
intel_pt_mtc_cyc_cnt_pge(decoder);
}
static inline void intel_pt_mtc_cyc_cnt_upd(struct intel_pt_decoder *decoder)
{
uint64_t tot_cyc_cnt, tsc_delta;
if (decoder->have_cyc)
return;
decoder->sample_cyc = true;
if (!decoder->pge || decoder->timestamp <= decoder->cyc_cnt_timestamp)
return;
tsc_delta = decoder->timestamp - decoder->cyc_cnt_timestamp;
tot_cyc_cnt = tsc_delta * decoder->tsc_to_cyc + decoder->base_cyc_cnt;
if (tot_cyc_cnt > decoder->tot_cyc_cnt)
decoder->tot_cyc_cnt = tot_cyc_cnt;
}
static void intel_pt_calc_tma(struct intel_pt_decoder *decoder)
{
uint32_t ctc = decoder->packet.payload;
@ -1413,6 +1480,11 @@ static void intel_pt_calc_tma(struct intel_pt_decoder *decoder)
if (!decoder->tsc_ctc_ratio_d)
return;
if (decoder->pge && !decoder->in_psb)
intel_pt_mtc_cyc_cnt_pge(decoder);
else
intel_pt_mtc_cyc_cnt_upd(decoder);
decoder->last_mtc = (ctc >> decoder->mtc_shift) & 0xff;
decoder->ctc_timestamp = decoder->tsc_timestamp - fc;
if (decoder->tsc_ctc_mult) {
@ -1468,6 +1540,8 @@ static void intel_pt_calc_mtc_timestamp(struct intel_pt_decoder *decoder)
else
decoder->timestamp = timestamp;
intel_pt_mtc_cyc_cnt_upd(decoder);
decoder->timestamp_insn_cnt = 0;
decoder->last_mtc = mtc;
@ -1492,6 +1566,8 @@ static void intel_pt_calc_cbr(struct intel_pt_decoder *decoder)
decoder->cbr = cbr;
decoder->cbr_cyc_to_tsc = decoder->max_non_turbo_ratio_fp / cbr;
intel_pt_mtc_cyc_cnt_cbr(decoder);
}
static void intel_pt_calc_cyc_timestamp(struct intel_pt_decoder *decoder)
@ -1501,6 +1577,9 @@ static void intel_pt_calc_cyc_timestamp(struct intel_pt_decoder *decoder)
decoder->have_cyc = true;
decoder->cycle_cnt += decoder->packet.payload;
if (decoder->pge)
decoder->tot_cyc_cnt += decoder->packet.payload;
decoder->sample_cyc = true;
if (!decoder->cyc_ref_timestamp)
return;
@ -1528,14 +1607,17 @@ static int intel_pt_walk_psbend(struct intel_pt_decoder *decoder)
{
int err;
decoder->in_psb = true;
while (1) {
err = intel_pt_get_next_packet(decoder);
if (err)
return err;
goto out;
switch (decoder->packet.type) {
case INTEL_PT_PSBEND:
return 0;
err = 0;
goto out;
case INTEL_PT_TIP_PGD:
case INTEL_PT_TIP_PGE:
@ -1553,10 +1635,12 @@ static int intel_pt_walk_psbend(struct intel_pt_decoder *decoder)
case INTEL_PT_PWRX:
decoder->have_tma = false;
intel_pt_log("ERROR: Unexpected packet\n");
return -EAGAIN;
err = -EAGAIN;
goto out;
case INTEL_PT_OVF:
return intel_pt_overflow(decoder);
err = intel_pt_overflow(decoder);
goto out;
case INTEL_PT_TSC:
intel_pt_calc_tsc_timestamp(decoder);
@ -1602,6 +1686,10 @@ static int intel_pt_walk_psbend(struct intel_pt_decoder *decoder)
break;
}
}
out:
decoder->in_psb = false;
return err;
}
static int intel_pt_walk_fup_tip(struct intel_pt_decoder *decoder)
@ -1675,6 +1763,7 @@ static int intel_pt_walk_fup_tip(struct intel_pt_decoder *decoder)
decoder->state.to_ip = decoder->ip;
}
decoder->state.type |= INTEL_PT_TRACE_BEGIN;
intel_pt_mtc_cyc_cnt_pge(decoder);
return 0;
case INTEL_PT_TIP:
@ -1745,6 +1834,7 @@ next:
case INTEL_PT_TIP_PGE: {
decoder->pge = true;
intel_pt_mtc_cyc_cnt_pge(decoder);
if (decoder->packet.count == 0) {
intel_pt_log_at("Skipping zero TIP.PGE",
decoder->pos);
@ -1975,10 +2065,12 @@ static int intel_pt_walk_psb(struct intel_pt_decoder *decoder)
{
int err;
decoder->in_psb = true;
while (1) {
err = intel_pt_get_next_packet(decoder);
if (err)
return err;
goto out;
switch (decoder->packet.type) {
case INTEL_PT_TIP_PGD:
@ -1994,7 +2086,8 @@ static int intel_pt_walk_psb(struct intel_pt_decoder *decoder)
case INTEL_PT_PWRE:
case INTEL_PT_PWRX:
intel_pt_log("ERROR: Unexpected packet\n");
return -ENOENT;
err = -ENOENT;
goto out;
case INTEL_PT_FUP:
decoder->pge = true;
@ -2053,16 +2146,20 @@ static int intel_pt_walk_psb(struct intel_pt_decoder *decoder)
decoder->pkt_state = INTEL_PT_STATE_ERR4;
else
decoder->pkt_state = INTEL_PT_STATE_ERR3;
return -ENOENT;
err = -ENOENT;
goto out;
case INTEL_PT_BAD: /* Does not happen */
return intel_pt_bug(decoder);
err = intel_pt_bug(decoder);
goto out;
case INTEL_PT_OVF:
return intel_pt_overflow(decoder);
err = intel_pt_overflow(decoder);
goto out;
case INTEL_PT_PSBEND:
return 0;
err = 0;
goto out;
case INTEL_PT_PSB:
case INTEL_PT_VMCS:
@ -2072,6 +2169,10 @@ static int intel_pt_walk_psb(struct intel_pt_decoder *decoder)
break;
}
}
out:
decoder->in_psb = false;
return err;
}
static int intel_pt_walk_to_ip(struct intel_pt_decoder *decoder)
@ -2086,18 +2187,30 @@ static int intel_pt_walk_to_ip(struct intel_pt_decoder *decoder)
switch (decoder->packet.type) {
case INTEL_PT_TIP_PGD:
decoder->continuous_period = false;
__fallthrough;
case INTEL_PT_TIP_PGE:
case INTEL_PT_TIP:
decoder->pge = decoder->packet.type != INTEL_PT_TIP_PGD;
decoder->pge = false;
if (intel_pt_have_ip(decoder))
intel_pt_set_ip(decoder);
if (!decoder->ip)
break;
decoder->state.type |= INTEL_PT_TRACE_END;
return 0;
case INTEL_PT_TIP_PGE:
decoder->pge = true;
intel_pt_mtc_cyc_cnt_pge(decoder);
if (intel_pt_have_ip(decoder))
intel_pt_set_ip(decoder);
if (!decoder->ip)
break;
decoder->state.type |= INTEL_PT_TRACE_BEGIN;
return 0;
case INTEL_PT_TIP:
decoder->pge = true;
if (intel_pt_have_ip(decoder))
intel_pt_set_ip(decoder);
if (!decoder->ip)
break;
if (decoder->packet.type == INTEL_PT_TIP_PGE)
decoder->state.type |= INTEL_PT_TRACE_BEGIN;
if (decoder->packet.type == INTEL_PT_TIP_PGD)
decoder->state.type |= INTEL_PT_TRACE_END;
return 0;
case INTEL_PT_FUP:
@ -2250,7 +2363,7 @@ static int intel_pt_get_split_psb(struct intel_pt_decoder *decoder,
decoder->pos += decoder->len;
decoder->len = 0;
ret = intel_pt_get_next_data(decoder);
ret = intel_pt_get_next_data(decoder, false);
if (ret)
return ret;
@ -2276,7 +2389,7 @@ static int intel_pt_scan_for_psb(struct intel_pt_decoder *decoder)
intel_pt_log("Scanning for PSB\n");
while (1) {
if (!decoder->len) {
ret = intel_pt_get_next_data(decoder);
ret = intel_pt_get_next_data(decoder, false);
if (ret)
return ret;
}
@ -2404,8 +2517,8 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
if (err) {
decoder->state.err = intel_pt_ext_err(err);
decoder->state.from_ip = decoder->ip;
decoder->sample_timestamp = decoder->timestamp;
decoder->sample_insn_cnt = decoder->timestamp_insn_cnt;
intel_pt_update_sample_time(decoder);
decoder->sample_tot_cyc_cnt = decoder->tot_cyc_cnt;
} else {
decoder->state.err = 0;
if (decoder->cbr != decoder->cbr_seen && decoder->state.type) {
@ -2414,8 +2527,9 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
decoder->state.cbr_payload = decoder->cbr_payload;
}
if (intel_pt_sample_time(decoder->pkt_state)) {
decoder->sample_timestamp = decoder->timestamp;
decoder->sample_insn_cnt = decoder->timestamp_insn_cnt;
intel_pt_update_sample_time(decoder);
if (decoder->sample_cyc)
decoder->sample_tot_cyc_cnt = decoder->tot_cyc_cnt;
}
}
@ -2423,6 +2537,7 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
decoder->state.est_timestamp = intel_pt_est_timestamp(decoder);
decoder->state.cr3 = decoder->cr3;
decoder->state.tot_insn_cnt = decoder->tot_insn_cnt;
decoder->state.tot_cyc_cnt = decoder->sample_tot_cyc_cnt;
return &decoder->state;
}
@ -2732,3 +2847,131 @@ unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
return buf_b; /* No overlap */
}
}
/**
* struct fast_forward_data - data used by intel_pt_ff_cb().
* @timestamp: timestamp to fast forward towards
* @buf_timestamp: buffer timestamp of last buffer with trace data earlier than
* the fast forward timestamp.
*/
struct fast_forward_data {
uint64_t timestamp;
uint64_t buf_timestamp;
};
/**
* intel_pt_ff_cb - fast forward lookahead callback.
* @buffer: Intel PT trace buffer
* @data: opaque pointer to fast forward data (struct fast_forward_data)
*
* Determine if @buffer trace is past the fast forward timestamp.
*
* Return: 1 (stop lookahead) if @buffer trace is past the fast forward
* timestamp, and 0 otherwise.
*/
static int intel_pt_ff_cb(struct intel_pt_buffer *buffer, void *data)
{
struct fast_forward_data *d = data;
unsigned char *buf;
uint64_t tsc;
size_t rem;
size_t len;
buf = (unsigned char *)buffer->buf;
len = buffer->len;
if (!intel_pt_next_psb(&buf, &len) ||
!intel_pt_next_tsc(buf, len, &tsc, &rem))
return 0;
tsc = intel_pt_8b_tsc(tsc, buffer->ref_timestamp);
intel_pt_log("Buffer 1st timestamp " x64_fmt " ref timestamp " x64_fmt "\n",
tsc, buffer->ref_timestamp);
/*
* If the buffer contains a timestamp earlier that the fast forward
* timestamp, then record it, else stop.
*/
if (tsc < d->timestamp)
d->buf_timestamp = buffer->ref_timestamp;
else
return 1;
return 0;
}
/**
* intel_pt_fast_forward - reposition decoder forwards.
* @decoder: Intel PT decoder
* @timestamp: timestamp to fast forward towards
*
* Reposition decoder at the last PSB with a timestamp earlier than @timestamp.
*
* Return: 0 on success or negative error code on failure.
*/
int intel_pt_fast_forward(struct intel_pt_decoder *decoder, uint64_t timestamp)
{
struct fast_forward_data d = { .timestamp = timestamp };
unsigned char *buf;
size_t len;
int err;
intel_pt_log("Fast forward towards timestamp " x64_fmt "\n", timestamp);
/* Find buffer timestamp of buffer to fast forward to */
err = decoder->lookahead(decoder->data, intel_pt_ff_cb, &d);
if (err < 0)
return err;
/* Walk to buffer with same buffer timestamp */
if (d.buf_timestamp) {
do {
decoder->pos += decoder->len;
decoder->len = 0;
err = intel_pt_get_next_data(decoder, true);
/* -ENOLINK means non-consecutive trace */
if (err && err != -ENOLINK)
return err;
} while (decoder->buf_timestamp != d.buf_timestamp);
}
if (!decoder->buf)
return 0;
buf = (unsigned char *)decoder->buf;
len = decoder->len;
if (!intel_pt_next_psb(&buf, &len))
return 0;
/*
* Walk PSBs while the PSB timestamp is less than the fast forward
* timestamp.
*/
do {
uint64_t tsc;
size_t rem;
if (!intel_pt_next_tsc(buf, len, &tsc, &rem))
break;
tsc = intel_pt_8b_tsc(tsc, decoder->buf_timestamp);
/*
* A TSC packet can slip past MTC packets but, after fast
* forward, decoding starts at the TSC timestamp. That means
* the timestamps may not be exactly the same as the timestamps
* that would have been decoded without fast forward.
*/
if (tsc < timestamp) {
intel_pt_log("Fast forward to next PSB timestamp " x64_fmt "\n", tsc);
decoder->pos += decoder->len - len;
decoder->buf = buf;
decoder->len = len;
intel_pt_reposition(decoder);
} else {
break;
}
} while (intel_pt_step_psb(&buf, &len));
return 0;
}

View File

@ -68,6 +68,7 @@ struct intel_pt_state {
uint64_t to_ip;
uint64_t cr3;
uint64_t tot_insn_cnt;
uint64_t tot_cyc_cnt;
uint64_t timestamp;
uint64_t est_timestamp;
uint64_t trace_nr;
@ -92,12 +93,15 @@ struct intel_pt_buffer {
uint64_t trace_nr;
};
typedef int (*intel_pt_lookahead_cb_t)(struct intel_pt_buffer *, void *);
struct intel_pt_params {
int (*get_trace)(struct intel_pt_buffer *buffer, void *data);
int (*walk_insn)(struct intel_pt_insn *intel_pt_insn,
uint64_t *insn_cnt_ptr, uint64_t *ip, uint64_t to_ip,
uint64_t max_insn_cnt, void *data);
bool (*pgd_ip)(uint64_t ip, void *data);
int (*lookahead)(void *data, intel_pt_lookahead_cb_t cb, void *cb_data);
void *data;
bool return_compression;
bool branch_enable;
@ -117,6 +121,8 @@ void intel_pt_decoder_free(struct intel_pt_decoder *decoder);
const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder);
int intel_pt_fast_forward(struct intel_pt_decoder *decoder, uint64_t timestamp);
unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
unsigned char *buf_b, size_t len_b,
bool have_tsc, bool *consecutive);

View File

@ -33,6 +33,7 @@
#include "tsc.h"
#include "intel-pt.h"
#include "config.h"
#include "time-utils.h"
#include "intel-pt-decoder/intel-pt-log.h"
#include "intel-pt-decoder/intel-pt-decoder.h"
@ -41,6 +42,11 @@
#define MAX_TIMESTAMP (~0ULL)
struct range {
u64 start;
u64 end;
};
struct intel_pt {
struct auxtrace auxtrace;
struct auxtrace_queues queues;
@ -109,6 +115,9 @@ struct intel_pt {
char *filter;
struct addr_filters filts;
struct range *time_ranges;
unsigned int range_cnt;
};
enum switch_state {
@ -145,9 +154,18 @@ struct intel_pt_queue {
bool have_sample;
u64 time;
u64 timestamp;
u64 sel_timestamp;
bool sel_start;
unsigned int sel_idx;
u32 flags;
u16 insn_len;
u64 last_insn_cnt;
u64 ipc_insn_cnt;
u64 ipc_cyc_cnt;
u64 last_in_insn_cnt;
u64 last_in_cyc_cnt;
u64 last_br_insn_cnt;
u64 last_br_cyc_cnt;
char insn[INTEL_PT_INSN_BUF_SZ];
};
@ -224,32 +242,13 @@ static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *
return 0;
}
/* This function assumes data is processed sequentially only */
static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data)
static int intel_pt_get_buffer(struct intel_pt_queue *ptq,
struct auxtrace_buffer *buffer,
struct auxtrace_buffer *old_buffer,
struct intel_pt_buffer *b)
{
struct intel_pt_queue *ptq = data;
struct auxtrace_buffer *buffer = ptq->buffer;
struct auxtrace_buffer *old_buffer = ptq->old_buffer;
struct auxtrace_queue *queue;
bool might_overlap;
if (ptq->stop) {
b->len = 0;
return 0;
}
queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
buffer = auxtrace_buffer__next(queue, buffer);
if (!buffer) {
if (old_buffer)
auxtrace_buffer__drop_data(old_buffer);
b->len = 0;
return 0;
}
ptq->buffer = buffer;
if (!buffer->data) {
int fd = perf_data__fd(ptq->pt->session->data);
@ -279,6 +278,95 @@ static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data)
b->consecutive = true;
}
return 0;
}
/* Do not drop buffers with references - refer intel_pt_get_trace() */
static void intel_pt_lookahead_drop_buffer(struct intel_pt_queue *ptq,
struct auxtrace_buffer *buffer)
{
if (!buffer || buffer == ptq->buffer || buffer == ptq->old_buffer)
return;
auxtrace_buffer__drop_data(buffer);
}
/* Must be serialized with respect to intel_pt_get_trace() */
static int intel_pt_lookahead(void *data, intel_pt_lookahead_cb_t cb,
void *cb_data)
{
struct intel_pt_queue *ptq = data;
struct auxtrace_buffer *buffer = ptq->buffer;
struct auxtrace_buffer *old_buffer = ptq->old_buffer;
struct auxtrace_queue *queue;
int err = 0;
queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
while (1) {
struct intel_pt_buffer b = { .len = 0 };
buffer = auxtrace_buffer__next(queue, buffer);
if (!buffer)
break;
err = intel_pt_get_buffer(ptq, buffer, old_buffer, &b);
if (err)
break;
if (b.len) {
intel_pt_lookahead_drop_buffer(ptq, old_buffer);
old_buffer = buffer;
} else {
intel_pt_lookahead_drop_buffer(ptq, buffer);
continue;
}
err = cb(&b, cb_data);
if (err)
break;
}
if (buffer != old_buffer)
intel_pt_lookahead_drop_buffer(ptq, buffer);
intel_pt_lookahead_drop_buffer(ptq, old_buffer);
return err;
}
/*
* This function assumes data is processed sequentially only.
* Must be serialized with respect to intel_pt_lookahead()
*/
static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data)
{
struct intel_pt_queue *ptq = data;
struct auxtrace_buffer *buffer = ptq->buffer;
struct auxtrace_buffer *old_buffer = ptq->old_buffer;
struct auxtrace_queue *queue;
int err;
if (ptq->stop) {
b->len = 0;
return 0;
}
queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
buffer = auxtrace_buffer__next(queue, buffer);
if (!buffer) {
if (old_buffer)
auxtrace_buffer__drop_data(old_buffer);
b->len = 0;
return 0;
}
ptq->buffer = buffer;
err = intel_pt_get_buffer(ptq, buffer, old_buffer, b);
if (err)
return err;
if (ptq->step_through_buffers)
ptq->stop = true;
@ -798,6 +886,7 @@ static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
params.get_trace = intel_pt_get_trace;
params.walk_insn = intel_pt_walk_next_insn;
params.lookahead = intel_pt_lookahead;
params.data = ptq;
params.return_compression = intel_pt_return_compression(pt);
params.branch_enable = intel_pt_branch_enable(pt);
@ -921,6 +1010,23 @@ static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
ptq->flags |= PERF_IP_FLAG_TRACE_END;
}
static void intel_pt_setup_time_range(struct intel_pt *pt,
struct intel_pt_queue *ptq)
{
if (!pt->range_cnt)
return;
ptq->sel_timestamp = pt->time_ranges[0].start;
ptq->sel_idx = 0;
if (ptq->sel_timestamp) {
ptq->sel_start = true;
} else {
ptq->sel_timestamp = pt->time_ranges[0].end;
ptq->sel_start = false;
}
}
static int intel_pt_setup_queue(struct intel_pt *pt,
struct auxtrace_queue *queue,
unsigned int queue_nr)
@ -945,6 +1051,8 @@ static int intel_pt_setup_queue(struct intel_pt *pt,
ptq->step_through_buffers = true;
ptq->sync_switch = pt->sync_switch;
intel_pt_setup_time_range(pt, ptq);
}
if (!ptq->on_heap &&
@ -959,6 +1067,14 @@ static int intel_pt_setup_queue(struct intel_pt *pt,
intel_pt_log("queue %u getting timestamp\n", queue_nr);
intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
queue_nr, ptq->cpu, ptq->pid, ptq->tid);
if (ptq->sel_start && ptq->sel_timestamp) {
ret = intel_pt_fast_forward(ptq->decoder,
ptq->sel_timestamp);
if (ret)
return ret;
}
while (1) {
state = intel_pt_decode(ptq->decoder);
if (state->err) {
@ -978,6 +1094,9 @@ static int intel_pt_setup_queue(struct intel_pt *pt,
queue_nr, ptq->timestamp);
ptq->state = state;
ptq->have_sample = true;
if (ptq->sel_start && ptq->sel_timestamp &&
ptq->timestamp < ptq->sel_timestamp)
ptq->have_sample = false;
intel_pt_sample_flags(ptq);
ret = auxtrace_heap__add(&pt->heap, queue_nr, ptq->timestamp);
if (ret)
@ -1153,6 +1272,13 @@ static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
sample.branch_stack = (struct branch_stack *)&dummy_bs;
}
sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_br_cyc_cnt;
if (sample.cyc_cnt) {
sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_br_insn_cnt;
ptq->last_br_insn_cnt = ptq->ipc_insn_cnt;
ptq->last_br_cyc_cnt = ptq->ipc_cyc_cnt;
}
return intel_pt_deliver_synth_b_event(pt, event, &sample,
pt->branches_sample_type);
}
@ -1208,6 +1334,13 @@ static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq)
sample.stream_id = ptq->pt->instructions_id;
sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt;
sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_in_cyc_cnt;
if (sample.cyc_cnt) {
sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_in_insn_cnt;
ptq->last_in_insn_cnt = ptq->ipc_insn_cnt;
ptq->last_in_cyc_cnt = ptq->ipc_cyc_cnt;
}
ptq->last_insn_cnt = ptq->state->tot_insn_cnt;
return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
@ -1479,6 +1612,15 @@ static int intel_pt_sample(struct intel_pt_queue *ptq)
ptq->have_sample = false;
if (ptq->state->tot_cyc_cnt > ptq->ipc_cyc_cnt) {
/*
* Cycle count and instruction count only go together to create
* a valid IPC ratio when the cycle count changes.
*/
ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt;
ptq->ipc_cyc_cnt = ptq->state->tot_cyc_cnt;
}
if (pt->sample_pwr_events && (state->type & INTEL_PT_PWR_EVT)) {
if (state->type & INTEL_PT_CBR_CHG) {
err = intel_pt_synth_cbr_sample(ptq);
@ -1641,10 +1783,83 @@ static void intel_pt_enable_sync_switch(struct intel_pt *pt)
}
}
/*
* To filter against time ranges, it is only necessary to look at the next start
* or end time.
*/
static bool intel_pt_next_time(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
if (ptq->sel_start) {
/* Next time is an end time */
ptq->sel_start = false;
ptq->sel_timestamp = pt->time_ranges[ptq->sel_idx].end;
return true;
} else if (ptq->sel_idx + 1 < pt->range_cnt) {
/* Next time is a start time */
ptq->sel_start = true;
ptq->sel_idx += 1;
ptq->sel_timestamp = pt->time_ranges[ptq->sel_idx].start;
return true;
}
/* No next time */
return false;
}
static int intel_pt_time_filter(struct intel_pt_queue *ptq, u64 *ff_timestamp)
{
int err;
while (1) {
if (ptq->sel_start) {
if (ptq->timestamp >= ptq->sel_timestamp) {
/* After start time, so consider next time */
intel_pt_next_time(ptq);
if (!ptq->sel_timestamp) {
/* No end time */
return 0;
}
/* Check against end time */
continue;
}
/* Before start time, so fast forward */
ptq->have_sample = false;
if (ptq->sel_timestamp > *ff_timestamp) {
if (ptq->sync_switch) {
intel_pt_next_tid(ptq->pt, ptq);
ptq->switch_state = INTEL_PT_SS_UNKNOWN;
}
*ff_timestamp = ptq->sel_timestamp;
err = intel_pt_fast_forward(ptq->decoder,
ptq->sel_timestamp);
if (err)
return err;
}
return 0;
} else if (ptq->timestamp > ptq->sel_timestamp) {
/* After end time, so consider next time */
if (!intel_pt_next_time(ptq)) {
/* No next time range, so stop decoding */
ptq->have_sample = false;
ptq->switch_state = INTEL_PT_SS_NOT_TRACING;
return 1;
}
/* Check against next start time */
continue;
} else {
/* Before end time */
return 0;
}
}
}
static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
{
const struct intel_pt_state *state = ptq->state;
struct intel_pt *pt = ptq->pt;
u64 ff_timestamp = 0;
int err;
if (!pt->kernel_start) {
@ -1709,6 +1924,12 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
ptq->timestamp = state->timestamp;
}
if (ptq->sel_timestamp) {
err = intel_pt_time_filter(ptq, &ff_timestamp);
if (err)
return err;
}
if (!pt->timeless_decoding && ptq->timestamp >= *timestamp) {
*timestamp = ptq->timestamp;
return 0;
@ -2114,6 +2335,7 @@ static void intel_pt_free(struct perf_session *session)
thread__put(pt->unknown_thread);
addr_filters__exit(&pt->filts);
zfree(&pt->filter);
zfree(&pt->time_ranges);
free(pt);
}
@ -2411,6 +2633,85 @@ static int intel_pt_perf_config(const char *var, const char *value, void *data)
return 0;
}
/* Find least TSC which converts to ns or later */
static u64 intel_pt_tsc_start(u64 ns, struct intel_pt *pt)
{
u64 tsc, tm;
tsc = perf_time_to_tsc(ns, &pt->tc);
while (1) {
tm = tsc_to_perf_time(tsc, &pt->tc);
if (tm < ns)
break;
tsc -= 1;
}
while (tm < ns)
tm = tsc_to_perf_time(++tsc, &pt->tc);
return tsc;
}
/* Find greatest TSC which converts to ns or earlier */
static u64 intel_pt_tsc_end(u64 ns, struct intel_pt *pt)
{
u64 tsc, tm;
tsc = perf_time_to_tsc(ns, &pt->tc);
while (1) {
tm = tsc_to_perf_time(tsc, &pt->tc);
if (tm > ns)
break;
tsc += 1;
}
while (tm > ns)
tm = tsc_to_perf_time(--tsc, &pt->tc);
return tsc;
}
static int intel_pt_setup_time_ranges(struct intel_pt *pt,
struct itrace_synth_opts *opts)
{
struct perf_time_interval *p = opts->ptime_range;
int n = opts->range_num;
int i;
if (!n || !p || pt->timeless_decoding)
return 0;
pt->time_ranges = calloc(n, sizeof(struct range));
if (!pt->time_ranges)
return -ENOMEM;
pt->range_cnt = n;
intel_pt_log("%s: %u range(s)\n", __func__, n);
for (i = 0; i < n; i++) {
struct range *r = &pt->time_ranges[i];
u64 ts = p[i].start;
u64 te = p[i].end;
/*
* Take care to ensure the TSC range matches the perf-time range
* when converted back to perf-time.
*/
r->start = ts ? intel_pt_tsc_start(ts, pt) : 0;
r->end = te ? intel_pt_tsc_end(te, pt) : 0;
intel_pt_log("range %d: perf time interval: %"PRIu64" to %"PRIu64"\n",
i, ts, te);
intel_pt_log("range %d: TSC time interval: %#"PRIx64" to %#"PRIx64"\n",
i, r->start, r->end);
}
return 0;
}
static const char * const intel_pt_info_fmts[] = {
[INTEL_PT_PMU_TYPE] = " PMU Type %"PRId64"\n",
[INTEL_PT_TIME_SHIFT] = " Time Shift %"PRIu64"\n",
@ -2643,6 +2944,12 @@ int intel_pt_process_auxtrace_info(union perf_event *event,
pt->cbr2khz = tsc_freq / pt->max_non_turbo_ratio / 1000;
}
if (session->itrace_synth_opts) {
err = intel_pt_setup_time_ranges(pt, session->itrace_synth_opts);
if (err)
goto err_delete_thread;
}
if (pt->synth_opts.calls)
pt->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC |
PERF_IP_FLAG_TRACE_END;
@ -2683,6 +2990,7 @@ err_free_queues:
err_free:
addr_filters__exit(&pt->filts);
zfree(&pt->filter);
zfree(&pt->time_ranges);
free(pt);
return err;
}

View File

@ -29,12 +29,16 @@ uint64_t arch__user_reg_mask(void);
#ifdef HAVE_PERF_REGS_SUPPORT
#include <perf_regs.h>
#define DWARF_MINIMAL_REGS ((1ULL << PERF_REG_IP) | (1ULL << PERF_REG_SP))
int perf_reg_value(u64 *valp, struct regs_dump *regs, int id);
#else
#define PERF_REGS_MASK 0
#define PERF_REGS_MAX 0
#define DWARF_MINIMAL_REGS PERF_REGS_MASK
static inline const char *perf_reg_name(int id __maybe_unused)
{
return NULL;

View File

@ -17,8 +17,8 @@
* see Documentation/perf.data-file-format.txt.
* PERF_RECORD_AUXTRACE_INFO:
* Defines a table of contains for PERF_RECORD_AUXTRACE records. This
* record is generated during 'perf record' command. Each record contains up
* to 256 entries describing offset and size of the AUXTRACE data in the
* record is generated during 'perf record' command. Each record contains
* up to 256 entries describing offset and size of the AUXTRACE data in the
* perf.data file.
* PERF_RECORD_AUXTRACE_ERROR:
* Indicates an error during AUXTRACE collection such as buffer overflow.
@ -237,10 +237,33 @@ static int s390_cpumcf_dumpctr(struct s390_cpumsf *sf,
return rc;
}
/* Display s390 CPU measurement facility basic-sampling data entry */
/* Display s390 CPU measurement facility basic-sampling data entry
* Data written on s390 in big endian byte order and contains bit
* fields across byte boundaries.
*/
static bool s390_cpumsf_basic_show(const char *color, size_t pos,
struct hws_basic_entry *basic)
struct hws_basic_entry *basicp)
{
struct hws_basic_entry *basic = basicp;
#if __BYTE_ORDER == __LITTLE_ENDIAN
struct hws_basic_entry local;
unsigned long long word = be64toh(*(unsigned long long *)basicp);
memset(&local, 0, sizeof(local));
local.def = be16toh(basicp->def);
local.prim_asn = word & 0xffff;
local.CL = word >> 30 & 0x3;
local.I = word >> 32 & 0x1;
local.AS = word >> 33 & 0x3;
local.P = word >> 35 & 0x1;
local.W = word >> 36 & 0x1;
local.T = word >> 37 & 0x1;
local.U = word >> 40 & 0xf;
local.ia = be64toh(basicp->ia);
local.gpp = be64toh(basicp->gpp);
local.hpp = be64toh(basicp->hpp);
basic = &local;
#endif
if (basic->def != 1) {
pr_err("Invalid AUX trace basic entry [%#08zx]\n", pos);
return false;
@ -258,10 +281,22 @@ static bool s390_cpumsf_basic_show(const char *color, size_t pos,
return true;
}
/* Display s390 CPU measurement facility diagnostic-sampling data entry */
/* Display s390 CPU measurement facility diagnostic-sampling data entry.
* Data written on s390 in big endian byte order and contains bit
* fields across byte boundaries.
*/
static bool s390_cpumsf_diag_show(const char *color, size_t pos,
struct hws_diag_entry *diag)
struct hws_diag_entry *diagp)
{
struct hws_diag_entry *diag = diagp;
#if __BYTE_ORDER == __LITTLE_ENDIAN
struct hws_diag_entry local;
unsigned long long word = be64toh(*(unsigned long long *)diagp);
local.def = be16toh(diagp->def);
local.I = word >> 32 & 0x1;
diag = &local;
#endif
if (diag->def < S390_CPUMSF_DIAG_DEF_FIRST) {
pr_err("Invalid AUX trace diagnostic entry [%#08zx]\n", pos);
return false;
@ -272,35 +307,52 @@ static bool s390_cpumsf_diag_show(const char *color, size_t pos,
}
/* Return TOD timestamp contained in an trailer entry */
static unsigned long long trailer_timestamp(struct hws_trailer_entry *te)
static unsigned long long trailer_timestamp(struct hws_trailer_entry *te,
int idx)
{
/* te->t set: TOD in STCKE format, bytes 8-15
* to->t not set: TOD in STCK format, bytes 0-7
*/
unsigned long long ts;
memcpy(&ts, &te->timestamp[te->t], sizeof(ts));
return ts;
memcpy(&ts, &te->timestamp[idx], sizeof(ts));
return be64toh(ts);
}
/* Display s390 CPU measurement facility trailer entry */
static bool s390_cpumsf_trailer_show(const char *color, size_t pos,
struct hws_trailer_entry *te)
{
#if __BYTE_ORDER == __LITTLE_ENDIAN
struct hws_trailer_entry local;
const unsigned long long flags = be64toh(te->flags);
memset(&local, 0, sizeof(local));
local.f = flags >> 63 & 0x1;
local.a = flags >> 62 & 0x1;
local.t = flags >> 61 & 0x1;
local.bsdes = be16toh((flags >> 16 & 0xffff));
local.dsdes = be16toh((flags & 0xffff));
memcpy(&local.timestamp, te->timestamp, sizeof(te->timestamp));
local.overflow = be64toh(te->overflow);
local.clock_base = be64toh(te->progusage[0]) >> 63 & 1;
local.progusage2 = be64toh(te->progusage2);
te = &local;
#endif
if (te->bsdes != sizeof(struct hws_basic_entry)) {
pr_err("Invalid AUX trace trailer entry [%#08zx]\n", pos);
return false;
}
color_fprintf(stdout, color, " [%#08zx] Trailer %c%c%c bsdes:%d"
" dsdes:%d Overflow:%lld Time:%#llx\n"
"\t\tC:%d TOD:%#lx 1:%#llx 2:%#llx\n",
"\t\tC:%d TOD:%#lx\n",
pos,
te->f ? 'F' : ' ',
te->a ? 'A' : ' ',
te->t ? 'T' : ' ',
te->bsdes, te->dsdes, te->overflow,
trailer_timestamp(te), te->clock_base, te->progusage2,
te->progusage[0], te->progusage[1]);
trailer_timestamp(te, te->clock_base),
te->clock_base, te->progusage2);
return true;
}
@ -327,13 +379,13 @@ static bool s390_cpumsf_validate(int machine_type,
*dsdes = *bsdes = 0;
if (len & (S390_CPUMSF_PAGESZ - 1)) /* Illegal size */
return false;
if (basic->def != 1) /* No basic set entry, must be first */
if (be16toh(basic->def) != 1) /* No basic set entry, must be first */
return false;
/* Check for trailer entry at end of SDB */
te = (struct hws_trailer_entry *)(buf + S390_CPUMSF_PAGESZ
- sizeof(*te));
*bsdes = te->bsdes;
*dsdes = te->dsdes;
*bsdes = be16toh(te->bsdes);
*dsdes = be16toh(te->dsdes);
if (!te->bsdes && !te->dsdes) {
/* Very old hardware, use CPUID */
switch (machine_type) {
@ -495,19 +547,27 @@ static bool s390_cpumsf_make_event(size_t pos,
static unsigned long long get_trailer_time(const unsigned char *buf)
{
struct hws_trailer_entry *te;
unsigned long long aux_time;
unsigned long long aux_time, progusage2;
bool clock_base;
te = (struct hws_trailer_entry *)(buf + S390_CPUMSF_PAGESZ
- sizeof(*te));
if (!te->clock_base) /* TOD_CLOCK_BASE value missing */
#if __BYTE_ORDER == __LITTLE_ENDIAN
clock_base = be64toh(te->progusage[0]) >> 63 & 0x1;
progusage2 = be64toh(te->progusage[1]);
#else
clock_base = te->clock_base;
progusage2 = te->progusage2;
#endif
if (!clock_base) /* TOD_CLOCK_BASE value missing */
return 0;
/* Correct calculation to convert time stamp in trailer entry to
* nano seconds (taken from arch/s390 function tod_to_ns()).
* TOD_CLOCK_BASE is stored in trailer entry member progusage2.
*/
aux_time = trailer_timestamp(te) - te->progusage2;
aux_time = trailer_timestamp(te, clock_base) - progusage2;
aux_time = (aux_time >> 9) * 125 + (((aux_time & 0x1ff) * 125) >> 9);
return aux_time;
}

View File

@ -1111,7 +1111,7 @@ static int python_export_sample(struct db_export *dbe,
struct tables *tables = container_of(dbe, struct tables, dbe);
PyObject *t;
t = tuple_new(22);
t = tuple_new(24);
tuple_set_u64(t, 0, es->db_id);
tuple_set_u64(t, 1, es->evsel->db_id);
@ -1135,6 +1135,8 @@ static int python_export_sample(struct db_export *dbe,
tuple_set_s32(t, 19, es->sample->flags & PERF_BRANCH_MASK);
tuple_set_s32(t, 20, !!(es->sample->flags & PERF_IP_FLAG_IN_TX));
tuple_set_u64(t, 21, es->call_path_id);
tuple_set_u64(t, 22, es->sample->insn_cnt);
tuple_set_u64(t, 23, es->sample->cyc_cnt);
call_object(tables->sample_handler, t, "sample_table");
@ -1173,7 +1175,7 @@ static int python_export_call_return(struct db_export *dbe,
u64 comm_db_id = cr->comm ? cr->comm->db_id : 0;
PyObject *t;
t = tuple_new(12);
t = tuple_new(14);
tuple_set_u64(t, 0, cr->db_id);
tuple_set_u64(t, 1, cr->thread->db_id);
@ -1187,6 +1189,8 @@ static int python_export_call_return(struct db_export *dbe,
tuple_set_u64(t, 9, cr->cp->parent->db_id);
tuple_set_s32(t, 10, cr->flags);
tuple_set_u64(t, 11, cr->parent_db_id);
tuple_set_u64(t, 12, cr->insn_count);
tuple_set_u64(t, 13, cr->cyc_count);
call_object(tables->call_return_handler, t, "call_return_table");

View File

@ -23,8 +23,12 @@ int smt_on(void)
char fn[256];
snprintf(fn, sizeof fn,
"devices/system/cpu/cpu%d/topology/thread_siblings",
cpu);
"devices/system/cpu/cpu%d/topology/core_cpus", cpu);
if (access(fn, F_OK) == -1) {
snprintf(fn, sizeof fn,
"devices/system/cpu/cpu%d/topology/thread_siblings",
cpu);
}
if (sysfs__read_str(fn, &str, &strlen) < 0)
continue;
/* Entry is hex, but does not have 0x, so need custom parser */

View File

@ -69,8 +69,9 @@ static void aggr_printout(struct perf_stat_config *config,
{
switch (config->aggr_mode) {
case AGGR_CORE:
fprintf(config->output, "S%d-C%*d%s%*d%s",
fprintf(config->output, "S%d-D%d-C%*d%s%*d%s",
cpu_map__id_to_socket(id),
cpu_map__id_to_die(id),
config->csv_output ? 0 : -8,
cpu_map__id_to_cpu(id),
config->csv_sep,
@ -78,6 +79,16 @@ static void aggr_printout(struct perf_stat_config *config,
nr,
config->csv_sep);
break;
case AGGR_DIE:
fprintf(config->output, "S%d-D%*d%s%*d%s",
cpu_map__id_to_socket(id << 16),
config->csv_output ? 0 : -8,
cpu_map__id_to_die(id << 16),
config->csv_sep,
config->csv_output ? 0 : 4,
nr,
config->csv_sep);
break;
case AGGR_SOCKET:
fprintf(config->output, "S%*d%s%*d%s",
config->csv_output ? 0 : -5,
@ -89,8 +100,9 @@ static void aggr_printout(struct perf_stat_config *config,
break;
case AGGR_NONE:
if (evsel->percore) {
fprintf(config->output, "S%d-C%*d%s",
fprintf(config->output, "S%d-D%d-C%*d%s",
cpu_map__id_to_socket(id),
cpu_map__id_to_die(id),
config->csv_output ? 0 : -5,
cpu_map__id_to_cpu(id), config->csv_sep);
} else {
@ -407,6 +419,7 @@ static void printout(struct perf_stat_config *config, int id, int nr,
[AGGR_THREAD] = 1,
[AGGR_NONE] = 1,
[AGGR_SOCKET] = 2,
[AGGR_DIE] = 2,
[AGGR_CORE] = 2,
};
@ -879,7 +892,8 @@ static void print_no_aggr_metric(struct perf_stat_config *config,
}
static int aggr_header_lens[] = {
[AGGR_CORE] = 18,
[AGGR_CORE] = 24,
[AGGR_DIE] = 18,
[AGGR_SOCKET] = 12,
[AGGR_NONE] = 6,
[AGGR_THREAD] = 24,
@ -888,6 +902,7 @@ static int aggr_header_lens[] = {
static const char *aggr_header_csv[] = {
[AGGR_CORE] = "core,cpus,",
[AGGR_DIE] = "die,cpus",
[AGGR_SOCKET] = "socket,cpus",
[AGGR_NONE] = "cpu,",
[AGGR_THREAD] = "comm-pid,",
@ -954,8 +969,13 @@ static void print_interval(struct perf_stat_config *config,
if (!metric_only)
fprintf(output, " counts %*s events\n", unit_width, "unit");
break;
case AGGR_DIE:
fprintf(output, "# time die cpus");
if (!metric_only)
fprintf(output, " counts %*s events\n", unit_width, "unit");
break;
case AGGR_CORE:
fprintf(output, "# time core cpus");
fprintf(output, "# time core cpus");
if (!metric_only)
fprintf(output, " counts %*s events\n", unit_width, "unit");
break;
@ -1165,6 +1185,7 @@ perf_evlist__print_counters(struct perf_evlist *evlist,
switch (config->aggr_mode) {
case AGGR_CORE:
case AGGR_DIE:
case AGGR_SOCKET:
print_aggr(config, evlist, prefix);
break;

View File

@ -12,6 +12,7 @@
/*
* AGGR_GLOBAL: Use CPU 0
* AGGR_SOCKET: Use first CPU of socket
* AGGR_DIE: Use first CPU of die
* AGGR_CORE: Use first CPU of core
* AGGR_NONE: Use matching CPU
* AGGR_THREAD: Not supported?

View File

@ -272,6 +272,7 @@ process_counter_values(struct perf_stat_config *config, struct perf_evsel *evsel
switch (config->aggr_mode) {
case AGGR_THREAD:
case AGGR_CORE:
case AGGR_DIE:
case AGGR_SOCKET:
case AGGR_NONE:
if (!evsel->snapshot)

View File

@ -44,6 +44,7 @@ enum aggr_mode {
AGGR_NONE,
AGGR_GLOBAL,
AGGR_SOCKET,
AGGR_DIE,
AGGR_CORE,
AGGR_THREAD,
AGGR_UNSET,

View File

@ -699,7 +699,6 @@ bool __weak elf__needs_adjust_symbols(GElf_Ehdr ehdr)
int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
enum dso_binary_type type)
{
int err = -1;
GElf_Ehdr ehdr;
Elf *elf;
int fd;
@ -793,7 +792,7 @@ out_elf_end:
elf_end(elf);
out_close:
close(fd);
return err;
return -1;
}
/**

View File

@ -40,6 +40,8 @@ enum retpoline_state_t {
* @timestamp: timestamp (if known)
* @ref: external reference (e.g. db_id of sample)
* @branch_count: the branch count when the entry was created
* @insn_count: the instruction count when the entry was created
* @cyc_count the cycle count when the entry was created
* @db_id: id used for db-export
* @cp: call path
* @no_call: a 'call' was not seen
@ -51,6 +53,8 @@ struct thread_stack_entry {
u64 timestamp;
u64 ref;
u64 branch_count;
u64 insn_count;
u64 cyc_count;
u64 db_id;
struct call_path *cp;
bool no_call;
@ -66,6 +70,8 @@ struct thread_stack_entry {
* @sz: current maximum stack size
* @trace_nr: current trace number
* @branch_count: running branch count
* @insn_count: running instruction count
* @cyc_count running cycle count
* @kernel_start: kernel start address
* @last_time: last timestamp
* @crp: call/return processor
@ -79,6 +85,8 @@ struct thread_stack {
size_t sz;
u64 trace_nr;
u64 branch_count;
u64 insn_count;
u64 cyc_count;
u64 kernel_start;
u64 last_time;
struct call_return_processor *crp;
@ -280,6 +288,8 @@ static int thread_stack__call_return(struct thread *thread,
cr.call_time = tse->timestamp;
cr.return_time = timestamp;
cr.branch_count = ts->branch_count - tse->branch_count;
cr.insn_count = ts->insn_count - tse->insn_count;
cr.cyc_count = ts->cyc_count - tse->cyc_count;
cr.db_id = tse->db_id;
cr.call_ref = tse->ref;
cr.return_ref = ref;
@ -535,6 +545,8 @@ static int thread_stack__push_cp(struct thread_stack *ts, u64 ret_addr,
tse->timestamp = timestamp;
tse->ref = ref;
tse->branch_count = ts->branch_count;
tse->insn_count = ts->insn_count;
tse->cyc_count = ts->cyc_count;
tse->cp = cp;
tse->no_call = no_call;
tse->trace_end = trace_end;
@ -865,6 +877,8 @@ int thread_stack__process(struct thread *thread, struct comm *comm,
}
ts->branch_count += 1;
ts->insn_count += sample->insn_cnt;
ts->cyc_count += sample->cyc_cnt;
ts->last_time = sample->time;
if (sample->flags & PERF_IP_FLAG_CALL) {

View File

@ -43,6 +43,8 @@ enum {
* @call_time: timestamp of call (if known)
* @return_time: timestamp of return (if known)
* @branch_count: number of branches seen between call and return
* @insn_count: approx. number of instructions between call and return
* @cyc_count: approx. number of cycles between call and return
* @call_ref: external reference to 'call' sample (e.g. db_id)
* @return_ref: external reference to 'return' sample (e.g. db_id)
* @db_id: id used for db-export
@ -56,6 +58,8 @@ struct call_return {
u64 call_time;
u64 return_time;
u64 branch_count;
u64 insn_count;
u64 cyc_count;
u64 call_ref;
u64 return_ref;
u64 db_id;

View File

@ -7,6 +7,7 @@
#include <errno.h>
#include <inttypes.h>
#include <math.h>
#include <ctype.h>
#include "perf.h"
#include "debug.h"
@ -116,6 +117,69 @@ int perf_time__parse_str(struct perf_time_interval *ptime, const char *ostr)
return rc;
}
static int perf_time__parse_strs(struct perf_time_interval *ptime,
const char *ostr, int size)
{
const char *cp;
char *str, *arg, *p;
int i, num = 0, rc = 0;
/* Count the commas */
for (cp = ostr; *cp; cp++)
num += !!(*cp == ',');
if (!num)
return -EINVAL;
BUG_ON(num > size);
str = strdup(ostr);
if (!str)
return -ENOMEM;
/* Split the string and parse each piece, except the last */
for (i = 0, p = str; i < num - 1; i++) {
arg = p;
/* Find next comma, there must be one */
p = strchr(p, ',') + 1;
/* Skip white space */
while (isspace(*p))
p++;
/* Skip the value, must not contain space or comma */
while (*p && !isspace(*p)) {
if (*p++ == ',') {
rc = -EINVAL;
goto out;
}
}
/* Split and parse */
if (*p)
*p++ = 0;
rc = perf_time__parse_str(ptime + i, arg);
if (rc < 0)
goto out;
}
/* Parse the last piece */
rc = perf_time__parse_str(ptime + i, p);
if (rc < 0)
goto out;
/* Check there is no overlap */
for (i = 0; i < num - 1; i++) {
if (ptime[i].end >= ptime[i + 1].start) {
rc = -EINVAL;
goto out;
}
}
rc = num;
out:
free(str);
return rc;
}
static int parse_percent(double *pcnt, char *str)
{
char *c, *endptr;
@ -135,12 +199,30 @@ static int parse_percent(double *pcnt, char *str)
return 0;
}
static int set_percent_time(struct perf_time_interval *ptime, double start_pcnt,
double end_pcnt, u64 start, u64 end)
{
u64 total = end - start;
if (start_pcnt < 0.0 || start_pcnt > 1.0 ||
end_pcnt < 0.0 || end_pcnt > 1.0) {
return -1;
}
ptime->start = start + round(start_pcnt * total);
ptime->end = start + round(end_pcnt * total);
if (ptime->end > ptime->start && ptime->end != end)
ptime->end -= 1;
return 0;
}
static int percent_slash_split(char *str, struct perf_time_interval *ptime,
u64 start, u64 end)
{
char *p, *end_str;
double pcnt, start_pcnt, end_pcnt;
u64 total = end - start;
int i;
/*
@ -168,15 +250,7 @@ static int percent_slash_split(char *str, struct perf_time_interval *ptime,
start_pcnt = pcnt * (i - 1);
end_pcnt = pcnt * i;
if (start_pcnt < 0.0 || start_pcnt > 1.0 ||
end_pcnt < 0.0 || end_pcnt > 1.0) {
return -1;
}
ptime->start = start + round(start_pcnt * total);
ptime->end = start + round(end_pcnt * total);
return 0;
return set_percent_time(ptime, start_pcnt, end_pcnt, start, end);
}
static int percent_dash_split(char *str, struct perf_time_interval *ptime,
@ -184,7 +258,6 @@ static int percent_dash_split(char *str, struct perf_time_interval *ptime,
{
char *start_str = NULL, *end_str;
double start_pcnt, end_pcnt;
u64 total = end - start;
int ret;
/*
@ -203,16 +276,7 @@ static int percent_dash_split(char *str, struct perf_time_interval *ptime,
free(start_str);
if (start_pcnt < 0.0 || start_pcnt > 1.0 ||
end_pcnt < 0.0 || end_pcnt > 1.0 ||
start_pcnt > end_pcnt) {
return -1;
}
ptime->start = start + round(start_pcnt * total);
ptime->end = start + round(end_pcnt * total);
return 0;
return set_percent_time(ptime, start_pcnt, end_pcnt, start, end);
}
typedef int (*time_pecent_split)(char *, struct perf_time_interval *,
@ -389,13 +453,12 @@ bool perf_time__ranges_skip_sample(struct perf_time_interval *ptime_buf,
ptime = &ptime_buf[i];
if (timestamp >= ptime->start &&
((timestamp < ptime->end && i < num - 1) ||
(timestamp <= ptime->end && i == num - 1))) {
break;
(timestamp <= ptime->end || !ptime->end)) {
return false;
}
}
return (i == num) ? true : false;
return true;
}
int perf_time__parse_for_ranges(const char *time_str,
@ -403,20 +466,20 @@ int perf_time__parse_for_ranges(const char *time_str,
struct perf_time_interval **ranges,
int *range_size, int *range_num)
{
bool has_percent = strchr(time_str, '%');
struct perf_time_interval *ptime_range;
int size, num, ret;
int size, num, ret = -EINVAL;
ptime_range = perf_time__range_alloc(time_str, &size);
if (!ptime_range)
return -ENOMEM;
if (perf_time__parse_str(ptime_range, time_str) != 0) {
if (has_percent) {
if (session->evlist->first_sample_time == 0 &&
session->evlist->last_sample_time == 0) {
pr_err("HINT: no first/last sample time found in perf data.\n"
"Please use latest perf binary to execute 'perf record'\n"
"(if '--buildid-all' is enabled, please set '--timestamp-boundary').\n");
ret = -EINVAL;
goto error;
}
@ -425,21 +488,20 @@ int perf_time__parse_for_ranges(const char *time_str,
time_str,
session->evlist->first_sample_time,
session->evlist->last_sample_time);
if (num < 0) {
pr_err("Invalid time string\n");
ret = -EINVAL;
goto error;
}
} else {
num = 1;
num = perf_time__parse_strs(ptime_range, time_str, size);
}
if (num < 0)
goto error_invalid;
*range_size = size;
*range_num = num;
*ranges = ptime_range;
return 0;
error_invalid:
pr_err("Invalid time string\n");
error:
free(ptime_range);
return ret;