perf/core improvements and fixes:

. Make some POWER7 events available in sysfs, equivalent to
   what was done on x86, from Sukadev Bhattiprolu.
 
 . Add event group view, from Namyung Kim:
 
   To use it, 'perf record' should group events when recording. And then perf
   report parses the saved group relation from file header and prints them
   together if --group option is provided.  You can use 'perf evlist' command to
   see event group information:
 
     $ perf record -e '{ref-cycles,cycles}' noploop 1
     [ perf record: Woken up 2 times to write data ]
     [ perf record: Captured and wrote 0.385 MB perf.data (~16807 samples) ]
 
     $ perf evlist --group
     {ref-cycles,cycles}
 
   With this example, default perf report will show you each event
   separately like this:
 
     $ perf report
     ...
     # group: {ref-cycles,cycles}
     # ========
     # Samples: 3K of event 'ref-cycles'
     # Event count (approx.): 3153797218
     #
     # Overhead  Command      Shared Object                      Symbol
     # ........  .......  .................  ..........................
         99.84%  noploop  noploop            [.] main
          0.07%  noploop  ld-2.15.so         [.] strcmp
          0.03%  noploop  [kernel.kallsyms]  [k] timerqueue_del
          0.03%  noploop  [kernel.kallsyms]  [k] sched_clock_cpu
          0.02%  noploop  [kernel.kallsyms]  [k] account_user_time
          0.01%  noploop  [kernel.kallsyms]  [k] __alloc_pages_nodemask
          0.00%  noploop  [kernel.kallsyms]  [k] native_write_msr_safe
 
     # Samples: 3K of event 'cycles'
     # Event count (approx.): 3722310525
     #
     # Overhead  Command      Shared Object                     Symbol
     # ........  .......  .................  .........................
         99.76%  noploop  noploop            [.] main
          0.11%  noploop  [kernel.kallsyms]  [k] _raw_spin_lock
          0.06%  noploop  [kernel.kallsyms]  [k] find_get_page
          0.03%  noploop  [kernel.kallsyms]  [k] sched_clock_cpu
          0.02%  noploop  [kernel.kallsyms]  [k] rcu_check_callbacks
          0.02%  noploop  [kernel.kallsyms]  [k] __current_kernel_time
          0.00%  noploop  [kernel.kallsyms]  [k] native_write_msr_safe
 
   In this case the event group information will be shown in the end of
   header area.  So you can use --group option to enable event group view.
 
     $ perf report --group
     ...
     # group: {ref-cycles,cycles}
     # ========
     # Samples: 7K of event 'anon group { ref-cycles, cycles }'
     # Event count (approx.): 6876107743
     #
     #         Overhead  Command      Shared Object                      Symbol
     # ................  .......  .................  ..........................
         99.84%  99.76%  noploop  noploop            [.] main
          0.07%   0.00%  noploop  ld-2.15.so         [.] strcmp
          0.03%   0.00%  noploop  [kernel.kallsyms]  [k] timerqueue_del
          0.03%   0.03%  noploop  [kernel.kallsyms]  [k] sched_clock_cpu
          0.02%   0.00%  noploop  [kernel.kallsyms]  [k] account_user_time
          0.01%   0.00%  noploop  [kernel.kallsyms]  [k] __alloc_pages_nodemask
          0.00%   0.00%  noploop  [kernel.kallsyms]  [k] native_write_msr_safe
          0.00%   0.11%  noploop  [kernel.kallsyms]  [k] _raw_spin_lock
          0.00%   0.06%  noploop  [kernel.kallsyms]  [k] find_get_page
          0.00%   0.02%  noploop  [kernel.kallsyms]  [k] rcu_check_callbacks
          0.00%   0.02%  noploop  [kernel.kallsyms]  [k] __current_kernel_time
 
   As you can see the Overhead column now contains both of ref-cycles and
   cycles and header line shows group information also - 'anon group {
   ref-cycles, cycles }'.  The output is sorted by period of group leader
   first.
 
   If perf.data file doesn't contain group information, this --group
   option does nothing.  So if you want enable event group view by
   default you can set it in ~/.perfconfig file:
 
     $ cat ~/.perfconfig
     [report]
     group = true
 
   It can be overridden with command line if you want:
 
     $ perf report --no-group
 
 Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2.0.14 (GNU/Linux)
 
 iQIcBAABAgAGBQJRCqUjAAoJENZQFvNTUqpAgH0P/is+nYh9/S6Y/P7MZdjZ+Ilv
 GxA54oWYz/+4umL1A1yY0WJpvFpttXeuPHTqMPxoO60bsBZXQYafjOl5ycgfi3RO
 yL8gWJOYqP8p2Yp9lMZqxY3j5hwzFcrhNsrr3jTvfcCC9uageFEJ1m7sYlCbXOBY
 hQAIa2JveX7wanx/4OkdOKSkSVNG7yNk677BBErh/p8Ji0MhPFxH+g5L5/PAZRCd
 kDjWM1pb4g1cRu/wFOjqPxt7uo9+2GnQcLuEABx6m4lkT6vVsm98fetofab9fnVk
 fKBS6wt7lUmjaL6Kf9+X2P76Z15Th7+XMp7j5er5kpBwp1ERFZon3vbb7mTjHLof
 XZvCqhnyPilNUvdIalE3nkMazabxZz4O+rtATCup9P74bxCE3AX8Nh4YszUDUrXO
 TU60qnjqqSZxUfbmSzUtPagEGAtimzIkolXaA6cQcZ/n3esFltNyWsuJeDKPTExK
 uihMxZKys5OsPv2s/vcN/uRdf2IaMK0NB8Wx309gb+OwuiYaq9xt79LTMkc+Ro+/
 PREv0ie1wNNW9dJWxQ4gkzQRXQAnWOYAbHWoZakDbfOdfr4Byqi/ltINLiZ5pIkt
 A24q2qjdBwqaE97Oz6MpRb3gwDY6QLQTkDzqv1kEoQOTjEggoIQ3df5zVTjp+7/f
 aFqDhofWEm5bIsDnle/V
 =utt0
 -----END PGP SIGNATURE-----

Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core

Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:

. Make some POWER7 events available in sysfs, equivalent to
  what was done on x86, from Sukadev Bhattiprolu.

. Add event group view, from Namyung Kim:

  To use it, 'perf record' should group events when recording. And then perf
  report parses the saved group relation from file header and prints them
  together if --group option is provided.  You can use 'perf evlist' command to
  see event group information:

    $ perf record -e '{ref-cycles,cycles}' noploop 1
    [ perf record: Woken up 2 times to write data ]
    [ perf record: Captured and wrote 0.385 MB perf.data (~16807 samples) ]

    $ perf evlist --group
    {ref-cycles,cycles}

  With this example, default perf report will show you each event
  separately like this:

    $ perf report
    ...
    # group: {ref-cycles,cycles}
    # ========
    # Samples: 3K of event 'ref-cycles'
    # Event count (approx.): 3153797218
    #
    # Overhead  Command      Shared Object                      Symbol
    # ........  .......  .................  ..........................
        99.84%  noploop  noploop            [.] main
         0.07%  noploop  ld-2.15.so         [.] strcmp
         0.03%  noploop  [kernel.kallsyms]  [k] timerqueue_del
         0.03%  noploop  [kernel.kallsyms]  [k] sched_clock_cpu
         0.02%  noploop  [kernel.kallsyms]  [k] account_user_time
         0.01%  noploop  [kernel.kallsyms]  [k] __alloc_pages_nodemask
         0.00%  noploop  [kernel.kallsyms]  [k] native_write_msr_safe

    # Samples: 3K of event 'cycles'
    # Event count (approx.): 3722310525
    #
    # Overhead  Command      Shared Object                     Symbol
    # ........  .......  .................  .........................
        99.76%  noploop  noploop            [.] main
         0.11%  noploop  [kernel.kallsyms]  [k] _raw_spin_lock
         0.06%  noploop  [kernel.kallsyms]  [k] find_get_page
         0.03%  noploop  [kernel.kallsyms]  [k] sched_clock_cpu
         0.02%  noploop  [kernel.kallsyms]  [k] rcu_check_callbacks
         0.02%  noploop  [kernel.kallsyms]  [k] __current_kernel_time
         0.00%  noploop  [kernel.kallsyms]  [k] native_write_msr_safe

  In this case the event group information will be shown in the end of
  header area.  So you can use --group option to enable event group view.

    $ perf report --group
    ...
    # group: {ref-cycles,cycles}
    # ========
    # Samples: 7K of event 'anon group { ref-cycles, cycles }'
    # Event count (approx.): 6876107743
    #
    #         Overhead  Command      Shared Object                      Symbol
    # ................  .......  .................  ..........................
        99.84%  99.76%  noploop  noploop            [.] main
         0.07%   0.00%  noploop  ld-2.15.so         [.] strcmp
         0.03%   0.00%  noploop  [kernel.kallsyms]  [k] timerqueue_del
         0.03%   0.03%  noploop  [kernel.kallsyms]  [k] sched_clock_cpu
         0.02%   0.00%  noploop  [kernel.kallsyms]  [k] account_user_time
         0.01%   0.00%  noploop  [kernel.kallsyms]  [k] __alloc_pages_nodemask
         0.00%   0.00%  noploop  [kernel.kallsyms]  [k] native_write_msr_safe
         0.00%   0.11%  noploop  [kernel.kallsyms]  [k] _raw_spin_lock
         0.00%   0.06%  noploop  [kernel.kallsyms]  [k] find_get_page
         0.00%   0.02%  noploop  [kernel.kallsyms]  [k] rcu_check_callbacks
         0.00%   0.02%  noploop  [kernel.kallsyms]  [k] __current_kernel_time

  As you can see the Overhead column now contains both of ref-cycles and
  cycles and header line shows group information also - 'anon group {
  ref-cycles, cycles }'.  The output is sorted by period of group leader
  first.

  If perf.data file doesn't contain group information, this --group
  option does nothing.  So if you want enable event group view by
  default you can set it in ~/.perfconfig file:

    $ cat ~/.perfconfig
    [report]
    group = true

  It can be overridden with command line if you want:

    $ perf report --no-group

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2013-02-01 11:17:09 +01:00
commit 9c4c5fd9e6
28 changed files with 1060 additions and 268 deletions

View File

@ -0,0 +1,62 @@
What: /sys/devices/cpu/events/
/sys/devices/cpu/events/branch-misses
/sys/devices/cpu/events/cache-references
/sys/devices/cpu/events/cache-misses
/sys/devices/cpu/events/stalled-cycles-frontend
/sys/devices/cpu/events/branch-instructions
/sys/devices/cpu/events/stalled-cycles-backend
/sys/devices/cpu/events/instructions
/sys/devices/cpu/events/cpu-cycles
Date: 2013/01/08
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
Description: Generic performance monitoring events
A collection of performance monitoring events that may be
supported by many/most CPUs. These events can be monitored
using the 'perf(1)' tool.
The contents of each file would look like:
event=0xNNNN
where 'N' is a hex digit and the number '0xNNNN' shows the
"raw code" for the perf event identified by the file's
"basename".
What: /sys/devices/cpu/events/PM_LD_MISS_L1
/sys/devices/cpu/events/PM_LD_REF_L1
/sys/devices/cpu/events/PM_CYC
/sys/devices/cpu/events/PM_BRU_FIN
/sys/devices/cpu/events/PM_GCT_NOSLOT_CYC
/sys/devices/cpu/events/PM_BRU_MPRED
/sys/devices/cpu/events/PM_INST_CMPL
/sys/devices/cpu/events/PM_CMPLU_STALL
Date: 2013/01/08
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
Linux Powerpc mailing list <linuxppc-dev@ozlabs.org>
Description: POWER-systems specific performance monitoring events
A collection of performance monitoring events that may be
supported by the POWER CPU. These events can be monitored
using the 'perf(1)' tool.
These events may not be supported by other CPUs.
The contents of each file would look like:
event=0xNNNN
where 'N' is a hex digit and the number '0xNNNN' shows the
"raw code" for the perf event identified by the file's
"basename".
Further, multiple terms like 'event=0xNNNN' can be specified
and separated with comma. All available terms are defined in
the /sys/bus/event_source/devices/<dev>/format file.

View File

@ -11,6 +11,7 @@
#include <linux/types.h>
#include <asm/hw_irq.h>
#include <linux/device.h>
#define MAX_HWEVENTS 8
#define MAX_EVENT_ALTERNATIVES 8
@ -35,6 +36,7 @@ struct power_pmu {
void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]);
int (*limited_pmc_event)(u64 event_id);
u32 flags;
const struct attribute_group **attr_groups;
int n_generic;
int *generic_events;
int (*cache_events)[PERF_COUNT_HW_CACHE_MAX]
@ -109,3 +111,27 @@ extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
* If an event_id is not subject to the constraint expressed by a particular
* field, then it will have 0 in both the mask and value for that field.
*/
extern ssize_t power_events_sysfs_show(struct device *dev,
struct device_attribute *attr, char *page);
/*
* EVENT_VAR() is same as PMU_EVENT_VAR with a suffix.
*
* Having a suffix allows us to have aliases in sysfs - eg: the generic
* event 'cpu-cycles' can have two entries in sysfs: 'cpu-cycles' and
* 'PM_CYC' where the latter is the name by which the event is known in
* POWER CPU specification.
*/
#define EVENT_VAR(_id, _suffix) event_attr_##_id##_suffix
#define EVENT_PTR(_id, _suffix) &EVENT_VAR(_id, _suffix)
#define EVENT_ATTR(_name, _id, _suffix) \
PMU_EVENT_ATTR(_name, EVENT_VAR(_id, _suffix), PME_PM_##_id, \
power_events_sysfs_show)
#define GENERIC_EVENT_ATTR(_name, _id) EVENT_ATTR(_name, _id, _g)
#define GENERIC_EVENT_PTR(_id) EVENT_PTR(_id, _g)
#define POWER_EVENT_ATTR(_name, _id) EVENT_ATTR(PM_##_name, _id, _p)
#define POWER_EVENT_PTR(_id) EVENT_PTR(_id, _p)

View File

@ -1305,6 +1305,16 @@ static int power_pmu_event_idx(struct perf_event *event)
return event->hw.idx;
}
ssize_t power_events_sysfs_show(struct device *dev,
struct device_attribute *attr, char *page)
{
struct perf_pmu_events_attr *pmu_attr;
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
}
struct pmu power_pmu = {
.pmu_enable = power_pmu_enable,
.pmu_disable = power_pmu_disable,
@ -1537,6 +1547,8 @@ int __cpuinit register_power_pmu(struct power_pmu *pmu)
pr_info("%s performance monitor hardware support registered\n",
pmu->name);
power_pmu.attr_groups = ppmu->attr_groups;
#ifdef MSR_HV
/*
* Use FCHV to ignore kernel events if MSR.HV is set.

View File

@ -50,6 +50,18 @@
#define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8)
#define MMCR1_PMCSEL_MSK 0xff
/*
* Power7 event codes.
*/
#define PME_PM_CYC 0x1e
#define PME_PM_GCT_NOSLOT_CYC 0x100f8
#define PME_PM_CMPLU_STALL 0x4000a
#define PME_PM_INST_CMPL 0x2
#define PME_PM_LD_REF_L1 0xc880
#define PME_PM_LD_MISS_L1 0x400f0
#define PME_PM_BRU_FIN 0x10068
#define PME_PM_BRU_MPRED 0x400f6
/*
* Layout of constraint bits:
* 6666555555555544444444443333333333222222222211111111110000000000
@ -307,14 +319,14 @@ static void power7_disable_pmc(unsigned int pmc, unsigned long mmcr[])
}
static int power7_generic_events[] = {
[PERF_COUNT_HW_CPU_CYCLES] = 0x1e,
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x100f8, /* GCT_NOSLOT_CYC */
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x4000a, /* CMPLU_STALL */
[PERF_COUNT_HW_INSTRUCTIONS] = 2,
[PERF_COUNT_HW_CACHE_REFERENCES] = 0xc880, /* LD_REF_L1_LSU*/
[PERF_COUNT_HW_CACHE_MISSES] = 0x400f0, /* LD_MISS_L1 */
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x10068, /* BRU_FIN */
[PERF_COUNT_HW_BRANCH_MISSES] = 0x400f6, /* BR_MPRED */
[PERF_COUNT_HW_CPU_CYCLES] = PME_PM_CYC,
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PME_PM_GCT_NOSLOT_CYC,
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PME_PM_CMPLU_STALL,
[PERF_COUNT_HW_INSTRUCTIONS] = PME_PM_INST_CMPL,
[PERF_COUNT_HW_CACHE_REFERENCES] = PME_PM_LD_REF_L1,
[PERF_COUNT_HW_CACHE_MISSES] = PME_PM_LD_MISS_L1,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PME_PM_BRU_FIN,
[PERF_COUNT_HW_BRANCH_MISSES] = PME_PM_BRU_MPRED,
};
#define C(x) PERF_COUNT_HW_CACHE_##x
@ -362,6 +374,57 @@ static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
},
};
GENERIC_EVENT_ATTR(cpu-cycles, CYC);
GENERIC_EVENT_ATTR(stalled-cycles-frontend, GCT_NOSLOT_CYC);
GENERIC_EVENT_ATTR(stalled-cycles-backend, CMPLU_STALL);
GENERIC_EVENT_ATTR(instructions, INST_CMPL);
GENERIC_EVENT_ATTR(cache-references, LD_REF_L1);
GENERIC_EVENT_ATTR(cache-misses, LD_MISS_L1);
GENERIC_EVENT_ATTR(branch-instructions, BRU_FIN);
GENERIC_EVENT_ATTR(branch-misses, BRU_MPRED);
POWER_EVENT_ATTR(CYC, CYC);
POWER_EVENT_ATTR(GCT_NOSLOT_CYC, GCT_NOSLOT_CYC);
POWER_EVENT_ATTR(CMPLU_STALL, CMPLU_STALL);
POWER_EVENT_ATTR(INST_CMPL, INST_CMPL);
POWER_EVENT_ATTR(LD_REF_L1, LD_REF_L1);
POWER_EVENT_ATTR(LD_MISS_L1, LD_MISS_L1);
POWER_EVENT_ATTR(BRU_FIN, BRU_FIN)
POWER_EVENT_ATTR(BRU_MPRED, BRU_MPRED);
static struct attribute *power7_events_attr[] = {
GENERIC_EVENT_PTR(CYC),
GENERIC_EVENT_PTR(GCT_NOSLOT_CYC),
GENERIC_EVENT_PTR(CMPLU_STALL),
GENERIC_EVENT_PTR(INST_CMPL),
GENERIC_EVENT_PTR(LD_REF_L1),
GENERIC_EVENT_PTR(LD_MISS_L1),
GENERIC_EVENT_PTR(BRU_FIN),
GENERIC_EVENT_PTR(BRU_MPRED),
POWER_EVENT_PTR(CYC),
POWER_EVENT_PTR(GCT_NOSLOT_CYC),
POWER_EVENT_PTR(CMPLU_STALL),
POWER_EVENT_PTR(INST_CMPL),
POWER_EVENT_PTR(LD_REF_L1),
POWER_EVENT_PTR(LD_MISS_L1),
POWER_EVENT_PTR(BRU_FIN),
POWER_EVENT_PTR(BRU_MPRED),
NULL
};
static struct attribute_group power7_pmu_events_group = {
.name = "events",
.attrs = power7_events_attr,
};
static const struct attribute_group *power7_pmu_attr_groups[] = {
&power7_pmu_events_group,
NULL,
};
static struct power_pmu power7_pmu = {
.name = "POWER7",
.n_counter = 6,
@ -373,6 +436,7 @@ static struct power_pmu power7_pmu = {
.get_alternatives = power7_get_alternatives,
.disable_pmc = power7_disable_pmc,
.flags = PPMU_ALT_SIPR,
.attr_groups = power7_pmu_attr_groups,
.n_generic = ARRAY_SIZE(power7_generic_events),
.generic_events = power7_generic_events,
.cache_events = &power7_cache_events,

View File

@ -1310,11 +1310,6 @@ static struct attribute_group x86_pmu_format_group = {
.attrs = NULL,
};
struct perf_pmu_events_attr {
struct device_attribute attr;
u64 id;
};
/*
* Remove all undefined events (x86_pmu.event_map(id) == 0)
* out of events_attr attributes.
@ -1348,11 +1343,9 @@ static ssize_t events_sysfs_show(struct device *dev, struct device_attribute *at
#define EVENT_VAR(_id) event_attr_##_id
#define EVENT_PTR(_id) &event_attr_##_id.attr.attr
#define EVENT_ATTR(_name, _id) \
static struct perf_pmu_events_attr EVENT_VAR(_id) = { \
.attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \
.id = PERF_COUNT_HW_##_id, \
};
#define EVENT_ATTR(_name, _id) \
PMU_EVENT_ATTR(_name, EVENT_VAR(_id), PERF_COUNT_HW_##_id, \
events_sysfs_show)
EVENT_ATTR(cpu-cycles, CPU_CYCLES );
EVENT_ATTR(instructions, INSTRUCTIONS );

View File

@ -817,6 +817,17 @@ do { \
} while (0)
struct perf_pmu_events_attr {
struct device_attribute attr;
u64 id;
};
#define PMU_EVENT_ATTR(_name, _var, _id, _show) \
static struct perf_pmu_events_attr _var = { \
.attr = __ATTR(_name, 0444, _show, NULL), \
.id = _id, \
};
#define PMU_FORMAT_ATTR(_name, _format) \
static ssize_t \
_name##_show(struct device *dev, \

View File

@ -28,6 +28,10 @@ OPTIONS
--verbose=::
Show all fields.
-g::
--group::
Show event group information.
SEE ALSO
--------
linkperf:perf-record[1], linkperf:perf-list[1],

View File

@ -203,6 +203,9 @@ OPTIONS
--objdump=<path>::
Path to objdump binary.
--group::
Show event group information together.
SEE ALSO
--------
linkperf:perf-stat[1], linkperf:perf-annotate[1]

View File

@ -39,6 +39,8 @@ int cmd_evlist(int argc, const char **argv, const char *prefix __maybe_unused)
OPT_BOOLEAN('F', "freq", &details.freq, "Show the sample frequency"),
OPT_BOOLEAN('v', "verbose", &details.verbose,
"Show all event attr details"),
OPT_BOOLEAN('g', "group", &symbol_conf.event_group,
"Show event group information"),
OPT_END()
};
const char * const evlist_usage[] = {
@ -50,5 +52,10 @@ int cmd_evlist(int argc, const char **argv, const char *prefix __maybe_unused)
if (argc)
usage_with_options(evlist_usage, options);
if (symbol_conf.event_group && (details.verbose || details.freq)) {
pr_err("--group option is not compatible with other options\n");
usage_with_options(evlist_usage, options);
}
return __cmd_evlist(input_name, &details);
}

View File

@ -486,6 +486,9 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
goto out_delete_session;
}
if (!evsel_list->nr_groups)
perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
/*
* perf_session__delete(session) will be called at perf_record__exit()
*/

View File

@ -8,6 +8,7 @@
#include "builtin.h"
#include "util/util.h"
#include "util/cache.h"
#include "util/annotate.h"
#include "util/color.h"
@ -54,6 +55,16 @@ struct perf_report {
DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
};
static int perf_report_config(const char *var, const char *value, void *cb)
{
if (!strcmp(var, "report.group")) {
symbol_conf.event_group = perf_config_bool(var, value);
return 0;
}
return perf_default_config(var, value, cb);
}
static int perf_report__add_branch_hist_entry(struct perf_tool *tool,
struct addr_location *al,
struct perf_sample *sample,
@ -299,6 +310,21 @@ static size_t hists__fprintf_nr_sample_events(struct hists *self,
char unit;
unsigned long nr_samples = self->stats.nr_events[PERF_RECORD_SAMPLE];
u64 nr_events = self->stats.total_period;
struct perf_evsel *evsel = hists_to_evsel(self);
char buf[512];
size_t size = sizeof(buf);
if (symbol_conf.event_group && evsel->nr_members > 1) {
struct perf_evsel *pos;
perf_evsel__group_desc(evsel, buf, size);
evname = buf;
for_each_group_member(pos, evsel) {
nr_samples += pos->hists.stats.nr_events[PERF_RECORD_SAMPLE];
nr_events += pos->hists.stats.total_period;
}
}
nr_samples = convert_unit(nr_samples, &unit);
ret = fprintf(fp, "# Samples: %lu%c", nr_samples, unit);
@ -319,6 +345,10 @@ static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist,
struct hists *hists = &pos->hists;
const char *evname = perf_evsel__name(pos);
if (symbol_conf.event_group &&
!perf_evsel__is_group_leader(pos))
continue;
hists__fprintf_nr_sample_events(hists, evname, stdout);
hists__fprintf(hists, true, 0, 0, stdout);
fprintf(stdout, "\n\n");
@ -416,8 +446,16 @@ static int __cmd_report(struct perf_report *rep)
hists->symbol_filter_str = rep->symbol_filter_str;
hists__collapse_resort(hists);
hists__output_resort(hists);
nr_samples += hists->stats.nr_events[PERF_RECORD_SAMPLE];
/* Non-group events are considered as leader */
if (symbol_conf.event_group &&
!perf_evsel__is_group_leader(pos)) {
struct hists *leader_hists = &pos->leader->hists;
hists__match(leader_hists, hists);
hists__link(leader_hists, hists);
}
}
if (nr_samples == 0) {
@ -425,6 +463,9 @@ static int __cmd_report(struct perf_report *rep)
goto out_delete;
}
list_for_each_entry(pos, &session->evlist->entries, node)
hists__output_resort(&pos->hists);
if (use_browser > 0) {
if (use_browser == 1) {
perf_evlist__tui_browse_hists(session->evlist, help,
@ -638,6 +679,8 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
"Specify disassembler style (e.g. -M intel for intel syntax)"),
OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
"Show a column with the sum of periods"),
OPT_BOOLEAN(0, "group", &symbol_conf.event_group,
"Show event group information together"),
OPT_CALLBACK_NOOPT('b', "branch-stack", &sort__branch_mode, "",
"use branch records for histogram filling", parse_branch_mode),
OPT_STRING(0, "objdump", &objdump_path, "path",
@ -645,6 +688,8 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
OPT_END()
};
perf_config(perf_report_config, NULL);
argc = parse_options(argc, argv, options, report_usage, 0);
if (report.use_stdio)

View File

@ -68,6 +68,8 @@
#include <linux/unistd.h>
#include <linux/types.h>
static volatile int done;
static void perf_top__update_print_entries(struct perf_top *top)
{
if (top->print_entries > 9)
@ -431,8 +433,10 @@ static int perf_top__key_mapped(struct perf_top *top, int c)
return 0;
}
static void perf_top__handle_keypress(struct perf_top *top, int c)
static bool perf_top__handle_keypress(struct perf_top *top, int c)
{
bool ret = true;
if (!perf_top__key_mapped(top, c)) {
struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
struct termios tc, save;
@ -453,7 +457,7 @@ static void perf_top__handle_keypress(struct perf_top *top, int c)
tcsetattr(0, TCSAFLUSH, &save);
if (!perf_top__key_mapped(top, c))
return;
return ret;
}
switch (c) {
@ -515,7 +519,8 @@ static void perf_top__handle_keypress(struct perf_top *top, int c)
printf("exiting.\n");
if (top->dump_symtab)
perf_session__fprintf_dsos(top->session, stderr);
exit(0);
ret = false;
break;
case 's':
perf_top__prompt_symbol(top, "Enter details symbol");
break;
@ -538,6 +543,8 @@ static void perf_top__handle_keypress(struct perf_top *top, int c)
default:
break;
}
return ret;
}
static void perf_top__sort_new_samples(void *arg)
@ -579,8 +586,7 @@ static void *display_thread_tui(void *arg)
perf_evlist__tui_browse_hists(top->evlist, help, &hbt,
&top->session->header.env);
exit_browser(0);
exit(0);
done = 1;
return NULL;
}
@ -604,7 +610,7 @@ repeat:
/* trash return*/
getc(stdin);
while (1) {
while (!done) {
perf_top__print_sym_table(top);
/*
* Either timeout expired or we got an EINTR due to SIGWINCH,
@ -618,15 +624,14 @@ repeat:
continue;
/* Fall trhu */
default:
goto process_hotkey;
c = getc(stdin);
tcsetattr(0, TCSAFLUSH, &save);
if (perf_top__handle_keypress(top, c))
goto repeat;
done = 1;
}
}
process_hotkey:
c = getc(stdin);
tcsetattr(0, TCSAFLUSH, &save);
perf_top__handle_keypress(top, c);
goto repeat;
return NULL;
}
@ -705,7 +710,7 @@ static void perf_event__process_sample(struct perf_tool *tool,
}
if (!machine) {
pr_err("%u unprocessable samples recorded.\n",
pr_err("%u unprocessable samples recorded.\r",
top->session->stats.nr_unprocessable_samples++);
return;
}
@ -868,7 +873,7 @@ static void perf_top__mmap_read(struct perf_top *top)
perf_top__mmap_read_idx(top, i);
}
static void perf_top__start_counters(struct perf_top *top)
static int perf_top__start_counters(struct perf_top *top)
{
char msg[512];
struct perf_evsel *counter;
@ -900,11 +905,10 @@ try_again:
goto out_err;
}
return;
return 0;
out_err:
exit_browser(0);
exit(0);
return -1;
}
static int perf_top__setup_sample_type(struct perf_top *top)
@ -948,7 +952,11 @@ static int __cmd_top(struct perf_top *top)
else
perf_event__synthesize_threads(&top->tool, perf_event__process,
&top->session->machines.host);
perf_top__start_counters(top);
ret = perf_top__start_counters(top);
if (ret)
goto out_delete;
top->session->evlist = top->evlist;
perf_session__set_id_hdr_size(top->session);
@ -968,10 +976,11 @@ static int __cmd_top(struct perf_top *top)
perf_top__mmap_read(top);
ret = -1;
if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui :
display_thread), top)) {
ui__error("Could not create display thread.\n");
exit(-1);
goto out_delete;
}
if (top->realtime_prio) {
@ -980,11 +989,11 @@ static int __cmd_top(struct perf_top *top)
param.sched_priority = top->realtime_prio;
if (sched_setscheduler(0, SCHED_FIFO, &param)) {
ui__error("Could not set realtime priority.\n");
exit(-1);
goto out_delete;
}
}
while (1) {
while (!done) {
u64 hits = top->samples;
perf_top__mmap_read(top);
@ -993,11 +1002,12 @@ static int __cmd_top(struct perf_top *top)
ret = poll(top->evlist->pollfd, top->evlist->nr_fds, 100);
}
ret = 0;
out_delete:
perf_session__delete(top->session);
top->session = NULL;
return 0;
return ret;
}
static int
@ -1154,7 +1164,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
if (!top.evlist->nr_entries &&
perf_evlist__add_default(top.evlist) < 0) {
ui__error("Not enough memory for event selector list\n");
return -ENOMEM;
goto out_delete_maps;
}
symbol_conf.nr_events = top.evlist->nr_entries;
@ -1177,7 +1187,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
} else {
ui__error("frequency and count are zero, aborting\n");
status = -EINVAL;
goto out_delete_evlist;
goto out_delete_maps;
}
top.sym_evsel = perf_evlist__first(top.evlist);
@ -1210,6 +1220,8 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
status = __cmd_top(&top);
out_delete_maps:
perf_evlist__delete_maps(top.evlist);
out_delete_evlist:
perf_evlist__delete(top.evlist);

View File

@ -23,6 +23,7 @@ static int test__checkevent_tracepoint(struct perf_evlist *evlist)
struct perf_evsel *evsel = perf_evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
TEST_ASSERT_VAL("wrong number of groups", 0 == evlist->nr_groups);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type);
TEST_ASSERT_VAL("wrong sample_type",
PERF_TP_SAMPLE_TYPE == evsel->attr.sample_type);
@ -35,6 +36,7 @@ static int test__checkevent_tracepoint_multi(struct perf_evlist *evlist)
struct perf_evsel *evsel;
TEST_ASSERT_VAL("wrong number of entries", evlist->nr_entries > 1);
TEST_ASSERT_VAL("wrong number of groups", 0 == evlist->nr_groups);
list_for_each_entry(evsel, &evlist->entries, node) {
TEST_ASSERT_VAL("wrong type",
@ -510,6 +512,7 @@ static int test__group1(struct perf_evlist *evlist)
struct perf_evsel *evsel, *leader;
TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries);
TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups);
/* instructions:k */
evsel = leader = perf_evlist__first(evlist);
@ -523,6 +526,8 @@ static int test__group1(struct perf_evlist *evlist)
TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2);
TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0);
/* cycles:upp */
evsel = perf_evsel__next(evsel);
@ -537,6 +542,7 @@ static int test__group1(struct perf_evlist *evlist)
TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 2);
TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1);
return 0;
}
@ -546,6 +552,7 @@ static int test__group2(struct perf_evlist *evlist)
struct perf_evsel *evsel, *leader;
TEST_ASSERT_VAL("wrong number of entries", 3 == evlist->nr_entries);
TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups);
/* faults + :ku modifier */
evsel = leader = perf_evlist__first(evlist);
@ -559,6 +566,8 @@ static int test__group2(struct perf_evlist *evlist)
TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2);
TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0);
/* cache-references + :u modifier */
evsel = perf_evsel__next(evsel);
@ -572,6 +581,7 @@ static int test__group2(struct perf_evlist *evlist)
TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1);
/* cycles:k */
evsel = perf_evsel__next(evsel);
@ -594,6 +604,7 @@ static int test__group3(struct perf_evlist *evlist __maybe_unused)
struct perf_evsel *evsel, *leader;
TEST_ASSERT_VAL("wrong number of entries", 5 == evlist->nr_entries);
TEST_ASSERT_VAL("wrong number of groups", 2 == evlist->nr_groups);
/* group1 syscalls:sys_enter_open:H */
evsel = leader = perf_evlist__first(evlist);
@ -610,6 +621,8 @@ static int test__group3(struct perf_evlist *evlist __maybe_unused)
TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
TEST_ASSERT_VAL("wrong group name",
!strcmp(leader->group_name, "group1"));
TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2);
TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0);
/* group1 cycles:kppp */
evsel = perf_evsel__next(evsel);
@ -625,6 +638,7 @@ static int test__group3(struct perf_evlist *evlist __maybe_unused)
TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 3);
TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1);
/* group2 cycles + G modifier */
evsel = leader = perf_evsel__next(evsel);
@ -640,6 +654,8 @@ static int test__group3(struct perf_evlist *evlist __maybe_unused)
TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
TEST_ASSERT_VAL("wrong group name",
!strcmp(leader->group_name, "group2"));
TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2);
TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0);
/* group2 1:3 + G modifier */
evsel = perf_evsel__next(evsel);
@ -652,6 +668,7 @@ static int test__group3(struct perf_evlist *evlist __maybe_unused)
TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1);
/* instructions:u */
evsel = perf_evsel__next(evsel);
@ -674,6 +691,7 @@ static int test__group4(struct perf_evlist *evlist __maybe_unused)
struct perf_evsel *evsel, *leader;
TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries);
TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups);
/* cycles:u + p */
evsel = leader = perf_evlist__first(evlist);
@ -689,6 +707,8 @@ static int test__group4(struct perf_evlist *evlist __maybe_unused)
TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 1);
TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2);
TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0);
/* instructions:kp + p */
evsel = perf_evsel__next(evsel);
@ -703,6 +723,7 @@ static int test__group4(struct perf_evlist *evlist __maybe_unused)
TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 2);
TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1);
return 0;
}
@ -712,6 +733,7 @@ static int test__group5(struct perf_evlist *evlist __maybe_unused)
struct perf_evsel *evsel, *leader;
TEST_ASSERT_VAL("wrong number of entries", 5 == evlist->nr_entries);
TEST_ASSERT_VAL("wrong number of groups", 2 == evlist->nr_groups);
/* cycles + G */
evsel = leader = perf_evlist__first(evlist);
@ -726,6 +748,8 @@ static int test__group5(struct perf_evlist *evlist __maybe_unused)
TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2);
TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0);
/* instructions + G */
evsel = perf_evsel__next(evsel);
@ -739,6 +763,7 @@ static int test__group5(struct perf_evlist *evlist __maybe_unused)
TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1);
/* cycles:G */
evsel = leader = perf_evsel__next(evsel);
@ -753,6 +778,8 @@ static int test__group5(struct perf_evlist *evlist __maybe_unused)
TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2);
TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0);
/* instructions:G */
evsel = perf_evsel__next(evsel);
@ -766,6 +793,7 @@ static int test__group5(struct perf_evlist *evlist __maybe_unused)
TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1);
/* cycles */
evsel = perf_evsel__next(evsel);

View File

@ -567,23 +567,123 @@ static int hist_browser__show_callchain(struct hist_browser *browser,
return row - first_row;
}
#define HPP__COLOR_FN(_name, _field) \
static int hist_browser__hpp_color_ ## _name(struct perf_hpp *hpp, \
struct hist_entry *he) \
{ \
struct hists *hists = he->hists; \
double percent = 100.0 * he->stat._field / hists->stats.total_period; \
*(double *)hpp->ptr = percent; \
return scnprintf(hpp->buf, hpp->size, "%6.2f%%", percent); \
struct hpp_arg {
struct ui_browser *b;
char folded_sign;
bool current_entry;
};
static int __hpp__color_callchain(struct hpp_arg *arg)
{
if (!symbol_conf.use_callchain)
return 0;
slsmg_printf("%c ", arg->folded_sign);
return 2;
}
HPP__COLOR_FN(overhead, period)
HPP__COLOR_FN(overhead_sys, period_sys)
HPP__COLOR_FN(overhead_us, period_us)
HPP__COLOR_FN(overhead_guest_sys, period_guest_sys)
HPP__COLOR_FN(overhead_guest_us, period_guest_us)
static int __hpp__color_fmt(struct perf_hpp *hpp, struct hist_entry *he,
u64 (*get_field)(struct hist_entry *),
int (*callchain_cb)(struct hpp_arg *))
{
int ret = 0;
double percent = 0.0;
struct hists *hists = he->hists;
struct hpp_arg *arg = hpp->ptr;
#undef HPP__COLOR_FN
if (hists->stats.total_period)
percent = 100.0 * get_field(he) / hists->stats.total_period;
ui_browser__set_percent_color(arg->b, percent, arg->current_entry);
if (callchain_cb)
ret += callchain_cb(arg);
ret += scnprintf(hpp->buf, hpp->size, "%6.2f%%", percent);
slsmg_printf("%s", hpp->buf);
if (symbol_conf.event_group) {
int prev_idx, idx_delta;
struct perf_evsel *evsel = hists_to_evsel(hists);
struct hist_entry *pair;
int nr_members = evsel->nr_members;
if (nr_members <= 1)
goto out;
prev_idx = perf_evsel__group_idx(evsel);
list_for_each_entry(pair, &he->pairs.head, pairs.node) {
u64 period = get_field(pair);
u64 total = pair->hists->stats.total_period;
if (!total)
continue;
evsel = hists_to_evsel(pair->hists);
idx_delta = perf_evsel__group_idx(evsel) - prev_idx - 1;
while (idx_delta--) {
/*
* zero-fill group members in the middle which
* have no sample
*/
ui_browser__set_percent_color(arg->b, 0.0,
arg->current_entry);
ret += scnprintf(hpp->buf, hpp->size,
" %6.2f%%", 0.0);
slsmg_printf("%s", hpp->buf);
}
percent = 100.0 * period / total;
ui_browser__set_percent_color(arg->b, percent,
arg->current_entry);
ret += scnprintf(hpp->buf, hpp->size,
" %6.2f%%", percent);
slsmg_printf("%s", hpp->buf);
prev_idx = perf_evsel__group_idx(evsel);
}
idx_delta = nr_members - prev_idx - 1;
while (idx_delta--) {
/*
* zero-fill group members at last which have no sample
*/
ui_browser__set_percent_color(arg->b, 0.0,
arg->current_entry);
ret += scnprintf(hpp->buf, hpp->size,
" %6.2f%%", 0.0);
slsmg_printf("%s", hpp->buf);
}
}
out:
if (!arg->current_entry || !arg->b->navkeypressed)
ui_browser__set_color(arg->b, HE_COLORSET_NORMAL);
return ret;
}
#define __HPP_COLOR_PERCENT_FN(_type, _field, _cb) \
static u64 __hpp_get_##_field(struct hist_entry *he) \
{ \
return he->stat._field; \
} \
\
static int hist_browser__hpp_color_##_type(struct perf_hpp *hpp, \
struct hist_entry *he) \
{ \
return __hpp__color_fmt(hpp, he, __hpp_get_##_field, _cb); \
}
__HPP_COLOR_PERCENT_FN(overhead, period, __hpp__color_callchain)
__HPP_COLOR_PERCENT_FN(overhead_sys, period_sys, NULL)
__HPP_COLOR_PERCENT_FN(overhead_us, period_us, NULL)
__HPP_COLOR_PERCENT_FN(overhead_guest_sys, period_guest_sys, NULL)
__HPP_COLOR_PERCENT_FN(overhead_guest_us, period_guest_us, NULL)
#undef __HPP_COLOR_PERCENT_FN
void hist_browser__init_hpp(void)
{
@ -608,7 +708,6 @@ static int hist_browser__show_entry(struct hist_browser *browser,
unsigned short row)
{
char s[256];
double percent;
int printed = 0;
int width = browser->b.width;
char folded_sign = ' ';
@ -628,16 +727,20 @@ static int hist_browser__show_entry(struct hist_browser *browser,
}
if (row_offset == 0) {
struct hpp_arg arg = {
.b = &browser->b,
.folded_sign = folded_sign,
.current_entry = current_entry,
};
struct perf_hpp hpp = {
.buf = s,
.size = sizeof(s),
.ptr = &arg,
};
int i = 0;
ui_browser__gotorc(&browser->b, row, 0);
perf_hpp__for_each_format(fmt) {
if (!first) {
slsmg_printf(" ");
width -= 2;
@ -645,27 +748,11 @@ static int hist_browser__show_entry(struct hist_browser *browser,
first = false;
if (fmt->color) {
hpp.ptr = &percent;
/* It will set percent for us. See HPP__COLOR_FN above. */
width -= fmt->color(&hpp, entry);
ui_browser__set_percent_color(&browser->b, percent, current_entry);
if (!i && symbol_conf.use_callchain) {
slsmg_printf("%c ", folded_sign);
width -= 2;
}
slsmg_printf("%s", s);
if (!current_entry || !browser->b.navkeypressed)
ui_browser__set_color(&browser->b, HE_COLORSET_NORMAL);
} else {
width -= fmt->entry(&hpp, entry);
slsmg_printf("%s", s);
}
i++;
}
/* The scroll bar isn't being used */
@ -1102,6 +1189,21 @@ static int hists__browser_title(struct hists *hists, char *bf, size_t size,
const struct thread *thread = hists->thread_filter;
unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
u64 nr_events = hists->stats.total_period;
struct perf_evsel *evsel = hists_to_evsel(hists);
char buf[512];
size_t buflen = sizeof(buf);
if (symbol_conf.event_group && evsel->nr_members > 1) {
struct perf_evsel *pos;
perf_evsel__group_desc(evsel, buf, buflen);
ev_name = buf;
for_each_group_member(pos, evsel) {
nr_samples += pos->hists.stats.nr_events[PERF_RECORD_SAMPLE];
nr_events += pos->hists.stats.total_period;
}
}
nr_samples = convert_unit(nr_samples, &unit);
printed = scnprintf(bf, size,
@ -1498,6 +1600,16 @@ static void perf_evsel_menu__write(struct ui_browser *browser,
ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED :
HE_COLORSET_NORMAL);
if (symbol_conf.event_group && evsel->nr_members > 1) {
struct perf_evsel *pos;
ev_name = perf_evsel__group_name(evsel);
for_each_group_member(pos, evsel) {
nr_events += pos->hists.stats.nr_events[PERF_RECORD_SAMPLE];
}
}
nr_events = convert_unit(nr_events, &unit);
printed = scnprintf(bf, sizeof(bf), "%lu%c%s%s", nr_events,
unit, unit == ' ' ? "" : " ", ev_name);
@ -1608,8 +1720,19 @@ out:
return key;
}
static bool filter_group_entries(struct ui_browser *self __maybe_unused,
void *entry)
{
struct perf_evsel *evsel = list_entry(entry, struct perf_evsel, node);
if (symbol_conf.event_group && !perf_evsel__is_group_leader(evsel))
return true;
return false;
}
static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist,
const char *help,
int nr_entries, const char *help,
struct hist_browser_timer *hbt,
struct perf_session_env *env)
{
@ -1620,7 +1743,8 @@ static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist,
.refresh = ui_browser__list_head_refresh,
.seek = ui_browser__list_head_seek,
.write = perf_evsel_menu__write,
.nr_entries = evlist->nr_entries,
.filter = filter_group_entries,
.nr_entries = nr_entries,
.priv = evlist,
},
.env = env,
@ -1636,20 +1760,37 @@ static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist,
menu.b.width = line_len;
}
return perf_evsel_menu__run(&menu, evlist->nr_entries, help, hbt);
return perf_evsel_menu__run(&menu, nr_entries, help, hbt);
}
int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
struct hist_browser_timer *hbt,
struct perf_session_env *env)
{
if (evlist->nr_entries == 1) {
int nr_entries = evlist->nr_entries;
single_entry:
if (nr_entries == 1) {
struct perf_evsel *first = list_entry(evlist->entries.next,
struct perf_evsel, node);
const char *ev_name = perf_evsel__name(first);
return perf_evsel__hists_browse(first, evlist->nr_entries, help,
return perf_evsel__hists_browse(first, nr_entries, help,
ev_name, false, hbt, env);
}
return __perf_evlist__tui_browse_hists(evlist, help, hbt, env);
if (symbol_conf.event_group) {
struct perf_evsel *pos;
nr_entries = 0;
list_for_each_entry(pos, &evlist->entries, node)
if (perf_evsel__is_group_leader(pos))
nr_entries++;
if (nr_entries == 1)
goto single_entry;
}
return __perf_evlist__tui_browse_hists(evlist, nr_entries, help,
hbt, env);
}

View File

@ -8,32 +8,105 @@
#define MAX_COLUMNS 32
#define HPP__COLOR_FN(_name, _field) \
static int perf_gtk__hpp_color_ ## _name(struct perf_hpp *hpp, \
struct hist_entry *he) \
{ \
struct hists *hists = he->hists; \
double percent = 100.0 * he->stat._field / hists->stats.total_period; \
const char *markup; \
int ret = 0; \
\
markup = perf_gtk__get_percent_color(percent); \
if (markup) \
ret += scnprintf(hpp->buf, hpp->size, "%s", markup); \
ret += scnprintf(hpp->buf + ret, hpp->size - ret, "%6.2f%%", percent); \
if (markup) \
ret += scnprintf(hpp->buf + ret, hpp->size - ret, "</span>"); \
\
return ret; \
static int __percent_color_snprintf(char *buf, size_t size, double percent)
{
int ret = 0;
const char *markup;
markup = perf_gtk__get_percent_color(percent);
if (markup)
ret += scnprintf(buf, size, markup);
ret += scnprintf(buf + ret, size - ret, " %6.2f%%", percent);
if (markup)
ret += scnprintf(buf + ret, size - ret, "</span>");
return ret;
}
HPP__COLOR_FN(overhead, period)
HPP__COLOR_FN(overhead_sys, period_sys)
HPP__COLOR_FN(overhead_us, period_us)
HPP__COLOR_FN(overhead_guest_sys, period_guest_sys)
HPP__COLOR_FN(overhead_guest_us, period_guest_us)
#undef HPP__COLOR_FN
static int __hpp__color_fmt(struct perf_hpp *hpp, struct hist_entry *he,
u64 (*get_field)(struct hist_entry *))
{
int ret;
double percent = 0.0;
struct hists *hists = he->hists;
if (hists->stats.total_period)
percent = 100.0 * get_field(he) / hists->stats.total_period;
ret = __percent_color_snprintf(hpp->buf, hpp->size, percent);
if (symbol_conf.event_group) {
int prev_idx, idx_delta;
struct perf_evsel *evsel = hists_to_evsel(hists);
struct hist_entry *pair;
int nr_members = evsel->nr_members;
if (nr_members <= 1)
return ret;
prev_idx = perf_evsel__group_idx(evsel);
list_for_each_entry(pair, &he->pairs.head, pairs.node) {
u64 period = get_field(pair);
u64 total = pair->hists->stats.total_period;
evsel = hists_to_evsel(pair->hists);
idx_delta = perf_evsel__group_idx(evsel) - prev_idx - 1;
while (idx_delta--) {
/*
* zero-fill group members in the middle which
* have no sample
*/
ret += __percent_color_snprintf(hpp->buf + ret,
hpp->size - ret,
0.0);
}
percent = 100.0 * period / total;
ret += __percent_color_snprintf(hpp->buf + ret,
hpp->size - ret,
percent);
prev_idx = perf_evsel__group_idx(evsel);
}
idx_delta = nr_members - prev_idx - 1;
while (idx_delta--) {
/*
* zero-fill group members at last which have no sample
*/
ret += __percent_color_snprintf(hpp->buf + ret,
hpp->size - ret,
0.0);
}
}
return ret;
}
#define __HPP_COLOR_PERCENT_FN(_type, _field) \
static u64 he_get_##_field(struct hist_entry *he) \
{ \
return he->stat._field; \
} \
\
static int perf_gtk__hpp_color_##_type(struct perf_hpp *hpp, \
struct hist_entry *he) \
{ \
return __hpp__color_fmt(hpp, he, he_get_##_field); \
}
__HPP_COLOR_PERCENT_FN(overhead, period)
__HPP_COLOR_PERCENT_FN(overhead_sys, period_sys)
__HPP_COLOR_PERCENT_FN(overhead_us, period_us)
__HPP_COLOR_PERCENT_FN(overhead_guest_sys, period_guest_sys)
__HPP_COLOR_PERCENT_FN(overhead_guest_us, period_guest_us)
#undef __HPP_COLOR_PERCENT_FN
void perf_gtk__init_hpp(void)
@ -70,6 +143,7 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists)
struct perf_hpp hpp = {
.buf = s,
.size = sizeof(s),
.ptr = hists_to_evsel(hists),
};
nr_cols = 0;
@ -96,7 +170,7 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists)
fmt->header(&hpp);
gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view),
-1, s,
-1, ltrim(s),
renderer, "markup",
col_idx++, NULL);
}
@ -196,6 +270,18 @@ int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist,
const char *evname = perf_evsel__name(pos);
GtkWidget *scrolled_window;
GtkWidget *tab_label;
char buf[512];
size_t size = sizeof(buf);
if (symbol_conf.event_group) {
if (!perf_evsel__is_group_leader(pos))
continue;
if (pos->nr_members > 1) {
perf_evsel__group_desc(pos, buf, size);
evname = buf;
}
}
scrolled_window = gtk_scrolled_window_new(NULL, NULL);

View File

@ -3,151 +3,164 @@
#include "../util/hist.h"
#include "../util/util.h"
#include "../util/sort.h"
#include "../util/evsel.h"
/* hist period print (hpp) functions */
static int hpp__header_overhead(struct perf_hpp *hpp)
{
return scnprintf(hpp->buf, hpp->size, "Overhead");
}
static int hpp__width_overhead(struct perf_hpp *hpp __maybe_unused)
{
return 8;
}
typedef int (*hpp_snprint_fn)(char *buf, size_t size, const char *fmt, ...);
static int hpp__color_overhead(struct perf_hpp *hpp, struct hist_entry *he)
static int __hpp__percent_fmt(struct perf_hpp *hpp, struct hist_entry *he,
u64 (*get_field)(struct hist_entry *),
const char *fmt, hpp_snprint_fn print_fn)
{
int ret;
double percent = 0.0;
struct hists *hists = he->hists;
double percent = 100.0 * he->stat.period / hists->stats.total_period;
return percent_color_snprintf(hpp->buf, hpp->size, " %6.2f%%", percent);
if (hists->stats.total_period)
percent = 100.0 * get_field(he) / hists->stats.total_period;
ret = print_fn(hpp->buf, hpp->size, fmt, percent);
if (symbol_conf.event_group) {
int prev_idx, idx_delta;
struct perf_evsel *evsel = hists_to_evsel(hists);
struct hist_entry *pair;
int nr_members = evsel->nr_members;
if (nr_members <= 1)
return ret;
prev_idx = perf_evsel__group_idx(evsel);
list_for_each_entry(pair, &he->pairs.head, pairs.node) {
u64 period = get_field(pair);
u64 total = pair->hists->stats.total_period;
if (!total)
continue;
evsel = hists_to_evsel(pair->hists);
idx_delta = perf_evsel__group_idx(evsel) - prev_idx - 1;
while (idx_delta--) {
/*
* zero-fill group members in the middle which
* have no sample
*/
ret += print_fn(hpp->buf + ret, hpp->size - ret,
fmt, 0.0);
}
ret += print_fn(hpp->buf + ret, hpp->size - ret,
fmt, 100.0 * period / total);
prev_idx = perf_evsel__group_idx(evsel);
}
idx_delta = nr_members - prev_idx - 1;
while (idx_delta--) {
/*
* zero-fill group members at last which have no sample
*/
ret += print_fn(hpp->buf + ret, hpp->size - ret,
fmt, 0.0);
}
}
return ret;
}
static int hpp__entry_overhead(struct perf_hpp *hpp, struct hist_entry *he)
static int __hpp__raw_fmt(struct perf_hpp *hpp, struct hist_entry *he,
u64 (*get_field)(struct hist_entry *),
const char *fmt, hpp_snprint_fn print_fn)
{
struct hists *hists = he->hists;
double percent = 100.0 * he->stat.period / hists->stats.total_period;
const char *fmt = symbol_conf.field_sep ? "%.2f" : " %6.2f%%";
int ret;
return scnprintf(hpp->buf, hpp->size, fmt, percent);
ret = print_fn(hpp->buf, hpp->size, fmt, get_field(he));
return ret;
}
static int hpp__header_overhead_sys(struct perf_hpp *hpp)
{
const char *fmt = symbol_conf.field_sep ? "%s" : "%7s";
return scnprintf(hpp->buf, hpp->size, fmt, "sys");
#define __HPP_HEADER_FN(_type, _str, _min_width, _unit_width) \
static int hpp__header_##_type(struct perf_hpp *hpp) \
{ \
int len = _min_width; \
\
if (symbol_conf.event_group) { \
struct perf_evsel *evsel = hpp->ptr; \
\
len = max(len, evsel->nr_members * _unit_width); \
} \
return scnprintf(hpp->buf, hpp->size, "%*s", len, _str); \
}
static int hpp__width_overhead_sys(struct perf_hpp *hpp __maybe_unused)
{
return 7;
#define __HPP_WIDTH_FN(_type, _min_width, _unit_width) \
static int hpp__width_##_type(struct perf_hpp *hpp __maybe_unused) \
{ \
int len = _min_width; \
\
if (symbol_conf.event_group) { \
struct perf_evsel *evsel = hpp->ptr; \
\
len = max(len, evsel->nr_members * _unit_width); \
} \
return len; \
}
static int hpp__color_overhead_sys(struct perf_hpp *hpp, struct hist_entry *he)
{
struct hists *hists = he->hists;
double percent = 100.0 * he->stat.period_sys / hists->stats.total_period;
return percent_color_snprintf(hpp->buf, hpp->size, "%6.2f%%", percent);
#define __HPP_COLOR_PERCENT_FN(_type, _field) \
static u64 he_get_##_field(struct hist_entry *he) \
{ \
return he->stat._field; \
} \
\
static int hpp__color_##_type(struct perf_hpp *hpp, struct hist_entry *he) \
{ \
return __hpp__percent_fmt(hpp, he, he_get_##_field, " %6.2f%%", \
(hpp_snprint_fn)percent_color_snprintf); \
}
static int hpp__entry_overhead_sys(struct perf_hpp *hpp, struct hist_entry *he)
{
struct hists *hists = he->hists;
double percent = 100.0 * he->stat.period_sys / hists->stats.total_period;
const char *fmt = symbol_conf.field_sep ? "%.2f" : "%6.2f%%";
return scnprintf(hpp->buf, hpp->size, fmt, percent);
#define __HPP_ENTRY_PERCENT_FN(_type, _field) \
static int hpp__entry_##_type(struct perf_hpp *hpp, struct hist_entry *he) \
{ \
const char *fmt = symbol_conf.field_sep ? " %.2f" : " %6.2f%%"; \
return __hpp__percent_fmt(hpp, he, he_get_##_field, fmt, \
scnprintf); \
}
static int hpp__header_overhead_us(struct perf_hpp *hpp)
{
const char *fmt = symbol_conf.field_sep ? "%s" : "%7s";
return scnprintf(hpp->buf, hpp->size, fmt, "user");
#define __HPP_ENTRY_RAW_FN(_type, _field) \
static u64 he_get_raw_##_field(struct hist_entry *he) \
{ \
return he->stat._field; \
} \
\
static int hpp__entry_##_type(struct perf_hpp *hpp, struct hist_entry *he) \
{ \
const char *fmt = symbol_conf.field_sep ? " %"PRIu64 : " %11"PRIu64; \
return __hpp__raw_fmt(hpp, he, he_get_raw_##_field, fmt, scnprintf); \
}
static int hpp__width_overhead_us(struct perf_hpp *hpp __maybe_unused)
{
return 7;
}
#define HPP_PERCENT_FNS(_type, _str, _field, _min_width, _unit_width) \
__HPP_HEADER_FN(_type, _str, _min_width, _unit_width) \
__HPP_WIDTH_FN(_type, _min_width, _unit_width) \
__HPP_COLOR_PERCENT_FN(_type, _field) \
__HPP_ENTRY_PERCENT_FN(_type, _field)
static int hpp__color_overhead_us(struct perf_hpp *hpp, struct hist_entry *he)
{
struct hists *hists = he->hists;
double percent = 100.0 * he->stat.period_us / hists->stats.total_period;
#define HPP_RAW_FNS(_type, _str, _field, _min_width, _unit_width) \
__HPP_HEADER_FN(_type, _str, _min_width, _unit_width) \
__HPP_WIDTH_FN(_type, _min_width, _unit_width) \
__HPP_ENTRY_RAW_FN(_type, _field)
return percent_color_snprintf(hpp->buf, hpp->size, "%6.2f%%", percent);
}
static int hpp__entry_overhead_us(struct perf_hpp *hpp, struct hist_entry *he)
{
struct hists *hists = he->hists;
double percent = 100.0 * he->stat.period_us / hists->stats.total_period;
const char *fmt = symbol_conf.field_sep ? "%.2f" : "%6.2f%%";
HPP_PERCENT_FNS(overhead, "Overhead", period, 8, 8)
HPP_PERCENT_FNS(overhead_sys, "sys", period_sys, 8, 8)
HPP_PERCENT_FNS(overhead_us, "usr", period_us, 8, 8)
HPP_PERCENT_FNS(overhead_guest_sys, "guest sys", period_guest_sys, 9, 8)
HPP_PERCENT_FNS(overhead_guest_us, "guest usr", period_guest_us, 9, 8)
return scnprintf(hpp->buf, hpp->size, fmt, percent);
}
HPP_RAW_FNS(samples, "Samples", nr_events, 12, 12)
HPP_RAW_FNS(period, "Period", period, 12, 12)
static int hpp__header_overhead_guest_sys(struct perf_hpp *hpp)
{
return scnprintf(hpp->buf, hpp->size, "guest sys");
}
static int hpp__width_overhead_guest_sys(struct perf_hpp *hpp __maybe_unused)
{
return 9;
}
static int hpp__color_overhead_guest_sys(struct perf_hpp *hpp,
struct hist_entry *he)
{
struct hists *hists = he->hists;
double percent = 100.0 * he->stat.period_guest_sys / hists->stats.total_period;
return percent_color_snprintf(hpp->buf, hpp->size, " %6.2f%% ", percent);
}
static int hpp__entry_overhead_guest_sys(struct perf_hpp *hpp,
struct hist_entry *he)
{
struct hists *hists = he->hists;
double percent = 100.0 * he->stat.period_guest_sys / hists->stats.total_period;
const char *fmt = symbol_conf.field_sep ? "%.2f" : " %6.2f%% ";
return scnprintf(hpp->buf, hpp->size, fmt, percent);
}
static int hpp__header_overhead_guest_us(struct perf_hpp *hpp)
{
return scnprintf(hpp->buf, hpp->size, "guest usr");
}
static int hpp__width_overhead_guest_us(struct perf_hpp *hpp __maybe_unused)
{
return 9;
}
static int hpp__color_overhead_guest_us(struct perf_hpp *hpp,
struct hist_entry *he)
{
struct hists *hists = he->hists;
double percent = 100.0 * he->stat.period_guest_us / hists->stats.total_period;
return percent_color_snprintf(hpp->buf, hpp->size, " %6.2f%% ", percent);
}
static int hpp__entry_overhead_guest_us(struct perf_hpp *hpp,
struct hist_entry *he)
{
struct hists *hists = he->hists;
double percent = 100.0 * he->stat.period_guest_us / hists->stats.total_period;
const char *fmt = symbol_conf.field_sep ? "%.2f" : " %6.2f%% ";
return scnprintf(hpp->buf, hpp->size, fmt, percent);
}
static int hpp__header_baseline(struct perf_hpp *hpp)
{
@ -179,7 +192,7 @@ static int hpp__color_baseline(struct perf_hpp *hpp, struct hist_entry *he)
{
double percent = baseline_percent(he);
if (hist_entry__has_pairs(he))
if (hist_entry__has_pairs(he) || symbol_conf.field_sep)
return percent_color_snprintf(hpp->buf, hpp->size, " %6.2f%%", percent);
else
return scnprintf(hpp->buf, hpp->size, " ");
@ -196,44 +209,6 @@ static int hpp__entry_baseline(struct perf_hpp *hpp, struct hist_entry *he)
return scnprintf(hpp->buf, hpp->size, " ");
}
static int hpp__header_samples(struct perf_hpp *hpp)
{
const char *fmt = symbol_conf.field_sep ? "%s" : "%11s";
return scnprintf(hpp->buf, hpp->size, fmt, "Samples");
}
static int hpp__width_samples(struct perf_hpp *hpp __maybe_unused)
{
return 11;
}
static int hpp__entry_samples(struct perf_hpp *hpp, struct hist_entry *he)
{
const char *fmt = symbol_conf.field_sep ? "%" PRIu64 : "%11" PRIu64;
return scnprintf(hpp->buf, hpp->size, fmt, he->stat.nr_events);
}
static int hpp__header_period(struct perf_hpp *hpp)
{
const char *fmt = symbol_conf.field_sep ? "%s" : "%12s";
return scnprintf(hpp->buf, hpp->size, fmt, "Period");
}
static int hpp__width_period(struct perf_hpp *hpp __maybe_unused)
{
return 12;
}
static int hpp__entry_period(struct perf_hpp *hpp, struct hist_entry *he)
{
const char *fmt = symbol_conf.field_sep ? "%" PRIu64 : "%12" PRIu64;
return scnprintf(hpp->buf, hpp->size, fmt, he->stat.period);
}
static int hpp__header_period_baseline(struct perf_hpp *hpp)
{
const char *fmt = symbol_conf.field_sep ? "%s" : "%12s";
@ -254,6 +229,7 @@ static int hpp__entry_period_baseline(struct perf_hpp *hpp, struct hist_entry *h
return scnprintf(hpp->buf, hpp->size, fmt, period);
}
static int hpp__header_delta(struct perf_hpp *hpp)
{
const char *fmt = symbol_conf.field_sep ? "%s" : "%7s";
@ -408,9 +384,20 @@ struct perf_hpp_fmt perf_hpp__format[] = {
LIST_HEAD(perf_hpp__list);
#undef HPP__COLOR_PRINT_FNS
#undef HPP__PRINT_FNS
#undef HPP_PERCENT_FNS
#undef HPP_RAW_FNS
#undef __HPP_HEADER_FN
#undef __HPP_WIDTH_FN
#undef __HPP_COLOR_PERCENT_FN
#undef __HPP_ENTRY_PERCENT_FN
#undef __HPP_ENTRY_RAW_FN
void perf_hpp__init(void)
{
if (symbol_conf.show_cpu_utilization) {
@ -508,12 +495,15 @@ unsigned int hists__sort_list_width(struct hists *hists)
struct perf_hpp_fmt *fmt;
struct sort_entry *se;
int i = 0, ret = 0;
struct perf_hpp dummy_hpp = {
.ptr = hists_to_evsel(hists),
};
perf_hpp__for_each_format(fmt) {
if (i)
ret += 2;
ret += fmt->width(NULL);
ret += fmt->width(&dummy_hpp);
}
list_for_each_entry(se, &hist_entry__sort_list, list)

View File

@ -3,6 +3,7 @@
#include "../../util/util.h"
#include "../../util/hist.h"
#include "../../util/sort.h"
#include "../../util/evsel.h"
static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
@ -347,6 +348,7 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
struct perf_hpp dummy_hpp = {
.buf = bf,
.size = sizeof(bf),
.ptr = hists_to_evsel(hists),
};
bool first = true;

View File

@ -117,6 +117,9 @@ void __perf_evlist__set_leader(struct list_head *list)
struct perf_evsel *evsel, *leader;
leader = list_entry(list->next, struct perf_evsel, node);
evsel = list_entry(list->prev, struct perf_evsel, node);
leader->nr_members = evsel->idx - leader->idx + 1;
list_for_each_entry(evsel, list, node) {
if (evsel != leader)
@ -126,8 +129,10 @@ void __perf_evlist__set_leader(struct list_head *list)
void perf_evlist__set_leader(struct perf_evlist *evlist)
{
if (evlist->nr_entries)
if (evlist->nr_entries) {
evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
__perf_evlist__set_leader(&evlist->entries);
}
}
int perf_evlist__add_default(struct perf_evlist *evlist)

View File

@ -21,6 +21,7 @@ struct perf_evlist {
struct list_head entries;
struct hlist_head heads[PERF_EVLIST__HLIST_SIZE];
int nr_entries;
int nr_groups;
int nr_fds;
int nr_mmaps;
int mmap_len;

View File

@ -434,6 +434,31 @@ const char *perf_evsel__name(struct perf_evsel *evsel)
return evsel->name ?: "unknown";
}
const char *perf_evsel__group_name(struct perf_evsel *evsel)
{
return evsel->group_name ?: "anon group";
}
int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size)
{
int ret;
struct perf_evsel *pos;
const char *group_name = perf_evsel__group_name(evsel);
ret = scnprintf(buf, size, "%s", group_name);
ret += scnprintf(buf + ret, size - ret, " { %s",
perf_evsel__name(evsel));
for_each_group_member(pos, evsel)
ret += scnprintf(buf + ret, size - ret, ", %s",
perf_evsel__name(pos));
ret += scnprintf(buf + ret, size - ret, " }");
return ret;
}
/*
* The enable_on_exec/disabled value strategy:
*
@ -1364,7 +1389,27 @@ int perf_evsel__fprintf(struct perf_evsel *evsel,
struct perf_attr_details *details, FILE *fp)
{
bool first = true;
int printed = fprintf(fp, "%s", perf_evsel__name(evsel));
int printed = 0;
if (symbol_conf.event_group) {
struct perf_evsel *pos;
if (!perf_evsel__is_group_leader(evsel))
return 0;
if (evsel->nr_members > 1)
printed += fprintf(fp, "%s{", evsel->group_name ?: "");
printed += fprintf(fp, "%s", perf_evsel__name(evsel));
for_each_group_member(pos, evsel)
printed += fprintf(fp, ",%s", perf_evsel__name(pos));
if (evsel->nr_members > 1)
printed += fprintf(fp, "}");
goto out;
}
printed += fprintf(fp, "%s", perf_evsel__name(evsel));
if (details->verbose || details->freq) {
printed += comma_fprintf(fp, &first, " sample_freq=%" PRIu64,
@ -1405,7 +1450,7 @@ int perf_evsel__fprintf(struct perf_evsel *evsel,
if_print(bp_type);
if_print(branch_sample_type);
}
out:
fputc('\n', fp);
return ++printed;
}

View File

@ -74,10 +74,13 @@ struct perf_evsel {
bool needs_swap;
/* parse modifier helper */
int exclude_GH;
int nr_members;
struct perf_evsel *leader;
char *group_name;
};
#define hists_to_evsel(h) container_of(h, struct perf_evsel, hists)
struct cpu_map;
struct thread_map;
struct perf_evlist;
@ -111,6 +114,8 @@ extern const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX];
int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
char *bf, size_t size);
const char *perf_evsel__name(struct perf_evsel *evsel);
const char *perf_evsel__group_name(struct perf_evsel *evsel);
int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size);
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
@ -259,4 +264,15 @@ bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
int perf_evsel__open_strerror(struct perf_evsel *evsel,
struct perf_target *target,
int err, char *msg, size_t size);
static inline int perf_evsel__group_idx(struct perf_evsel *evsel)
{
return evsel->idx - evsel->leader->idx;
}
#define for_each_group_member(_evsel, _leader) \
for ((_evsel) = list_entry((_leader)->node.next, struct perf_evsel, node); \
(_evsel) && (_evsel)->leader == (_leader); \
(_evsel) = list_entry((_evsel)->node.next, struct perf_evsel, node))
#endif /* __PERF_EVSEL_H */

View File

@ -1084,6 +1084,52 @@ static int write_pmu_mappings(int fd, struct perf_header *h __maybe_unused,
return 0;
}
/*
* File format:
*
* struct group_descs {
* u32 nr_groups;
* struct group_desc {
* char name[];
* u32 leader_idx;
* u32 nr_members;
* }[nr_groups];
* };
*/
static int write_group_desc(int fd, struct perf_header *h __maybe_unused,
struct perf_evlist *evlist)
{
u32 nr_groups = evlist->nr_groups;
struct perf_evsel *evsel;
int ret;
ret = do_write(fd, &nr_groups, sizeof(nr_groups));
if (ret < 0)
return ret;
list_for_each_entry(evsel, &evlist->entries, node) {
if (perf_evsel__is_group_leader(evsel) &&
evsel->nr_members > 1) {
const char *name = evsel->group_name ?: "{anon_group}";
u32 leader_idx = evsel->idx;
u32 nr_members = evsel->nr_members;
ret = do_write_string(fd, name);
if (ret < 0)
return ret;
ret = do_write(fd, &leader_idx, sizeof(leader_idx));
if (ret < 0)
return ret;
ret = do_write(fd, &nr_members, sizeof(nr_members));
if (ret < 0)
return ret;
}
}
return 0;
}
/*
* default get_cpuid(): nothing gets recorded
* actual implementation must be in arch/$(ARCH)/util/header.c
@ -1447,6 +1493,31 @@ error:
fprintf(fp, "# pmu mappings: unable to read\n");
}
static void print_group_desc(struct perf_header *ph, int fd __maybe_unused,
FILE *fp)
{
struct perf_session *session;
struct perf_evsel *evsel;
u32 nr = 0;
session = container_of(ph, struct perf_session, header);
list_for_each_entry(evsel, &session->evlist->entries, node) {
if (perf_evsel__is_group_leader(evsel) &&
evsel->nr_members > 1) {
fprintf(fp, "# group: %s{%s", evsel->group_name ?: "",
perf_evsel__name(evsel));
nr = evsel->nr_members - 1;
} else if (nr) {
fprintf(fp, ",%s", perf_evsel__name(evsel));
if (--nr == 0)
fprintf(fp, "}\n");
}
}
}
static int __event_process_build_id(struct build_id_event *bev,
char *filename,
struct perf_session *session)
@ -1961,6 +2032,98 @@ error:
return -1;
}
static int process_group_desc(struct perf_file_section *section __maybe_unused,
struct perf_header *ph, int fd,
void *data __maybe_unused)
{
size_t ret = -1;
u32 i, nr, nr_groups;
struct perf_session *session;
struct perf_evsel *evsel, *leader = NULL;
struct group_desc {
char *name;
u32 leader_idx;
u32 nr_members;
} *desc;
if (readn(fd, &nr_groups, sizeof(nr_groups)) != sizeof(nr_groups))
return -1;
if (ph->needs_swap)
nr_groups = bswap_32(nr_groups);
ph->env.nr_groups = nr_groups;
if (!nr_groups) {
pr_debug("group desc not available\n");
return 0;
}
desc = calloc(nr_groups, sizeof(*desc));
if (!desc)
return -1;
for (i = 0; i < nr_groups; i++) {
desc[i].name = do_read_string(fd, ph);
if (!desc[i].name)
goto out_free;
if (readn(fd, &desc[i].leader_idx, sizeof(u32)) != sizeof(u32))
goto out_free;
if (readn(fd, &desc[i].nr_members, sizeof(u32)) != sizeof(u32))
goto out_free;
if (ph->needs_swap) {
desc[i].leader_idx = bswap_32(desc[i].leader_idx);
desc[i].nr_members = bswap_32(desc[i].nr_members);
}
}
/*
* Rebuild group relationship based on the group_desc
*/
session = container_of(ph, struct perf_session, header);
session->evlist->nr_groups = nr_groups;
i = nr = 0;
list_for_each_entry(evsel, &session->evlist->entries, node) {
if (evsel->idx == (int) desc[i].leader_idx) {
evsel->leader = evsel;
/* {anon_group} is a dummy name */
if (strcmp(desc[i].name, "{anon_group}"))
evsel->group_name = desc[i].name;
evsel->nr_members = desc[i].nr_members;
if (i >= nr_groups || nr > 0) {
pr_debug("invalid group desc\n");
goto out_free;
}
leader = evsel;
nr = evsel->nr_members - 1;
i++;
} else if (nr) {
/* This is a group member */
evsel->leader = leader;
nr--;
}
}
if (i != nr_groups || nr != 0) {
pr_debug("invalid group desc\n");
goto out_free;
}
ret = 0;
out_free:
while ((int) --i >= 0)
free(desc[i].name);
free(desc);
return ret;
}
struct feature_ops {
int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist);
void (*print)(struct perf_header *h, int fd, FILE *fp);
@ -2000,6 +2163,7 @@ static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
FEAT_OPF(HEADER_NUMA_TOPOLOGY, numa_topology),
FEAT_OPA(HEADER_BRANCH_STACK, branch_stack),
FEAT_OPP(HEADER_PMU_MAPPINGS, pmu_mappings),
FEAT_OPP(HEADER_GROUP_DESC, group_desc),
};
struct header_print_data {

View File

@ -29,6 +29,7 @@ enum {
HEADER_NUMA_TOPOLOGY,
HEADER_BRANCH_STACK,
HEADER_PMU_MAPPINGS,
HEADER_GROUP_DESC,
HEADER_LAST_FEATURE,
HEADER_FEAT_BITS = 256,
};
@ -79,6 +80,7 @@ struct perf_session_env {
char *numa_nodes;
int nr_pmu_mappings;
char *pmu_mappings;
int nr_groups;
};
struct perf_header {

View File

@ -4,6 +4,7 @@
#include "hist.h"
#include "session.h"
#include "sort.h"
#include "evsel.h"
#include <math.h>
static bool hists__filter_entry_by_dso(struct hists *hists,
@ -540,6 +541,62 @@ void hists__collapse_resort_threaded(struct hists *hists)
* reverse the map, sort on period.
*/
static int period_cmp(u64 period_a, u64 period_b)
{
if (period_a > period_b)
return 1;
if (period_a < period_b)
return -1;
return 0;
}
static int hist_entry__sort_on_period(struct hist_entry *a,
struct hist_entry *b)
{
int ret;
int i, nr_members;
struct perf_evsel *evsel;
struct hist_entry *pair;
u64 *periods_a, *periods_b;
ret = period_cmp(a->stat.period, b->stat.period);
if (ret || !symbol_conf.event_group)
return ret;
evsel = hists_to_evsel(a->hists);
nr_members = evsel->nr_members;
if (nr_members <= 1)
return ret;
periods_a = zalloc(sizeof(periods_a) * nr_members);
periods_b = zalloc(sizeof(periods_b) * nr_members);
if (!periods_a || !periods_b)
goto out;
list_for_each_entry(pair, &a->pairs.head, pairs.node) {
evsel = hists_to_evsel(pair->hists);
periods_a[perf_evsel__group_idx(evsel)] = pair->stat.period;
}
list_for_each_entry(pair, &b->pairs.head, pairs.node) {
evsel = hists_to_evsel(pair->hists);
periods_b[perf_evsel__group_idx(evsel)] = pair->stat.period;
}
for (i = 1; i < nr_members; i++) {
ret = period_cmp(periods_a[i], periods_b[i]);
if (ret)
break;
}
out:
free(periods_a);
free(periods_b);
return ret;
}
static void __hists__insert_output_entry(struct rb_root *entries,
struct hist_entry *he,
u64 min_callchain_hits)
@ -556,7 +613,7 @@ static void __hists__insert_output_entry(struct rb_root *entries,
parent = *p;
iter = rb_entry(parent, struct hist_entry, rb_node);
if (he->stat.period > iter->stat.period)
if (hist_entry__sort_on_period(he, iter) > 0)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;

View File

@ -884,6 +884,7 @@ int parse_events(struct perf_evlist *evlist, const char *str)
if (!ret) {
int entries = data.idx - evlist->nr_entries;
perf_evlist__splice_list_tail(evlist, &data.list, entries);
evlist->nr_groups += data.nr_groups;
return 0;
}

View File

@ -64,6 +64,7 @@ struct parse_events_term {
struct parse_events_evlist {
struct list_head list;
int idx;
int nr_groups;
};
struct parse_events_terms {

View File

@ -23,6 +23,14 @@ do { \
YYABORT; \
} while (0)
static inc_group_count(struct list_head *list,
struct parse_events_evlist *data)
{
/* Count groups only have more than 1 members */
if (!list_is_last(list->next, list))
data->nr_groups++;
}
%}
%token PE_START_EVENTS PE_START_TERMS
@ -123,6 +131,7 @@ PE_NAME '{' events '}'
{
struct list_head *list = $3;
inc_group_count(list, _data);
parse_events__set_leader($1, list);
$$ = list;
}
@ -131,6 +140,7 @@ PE_NAME '{' events '}'
{
struct list_head *list = $2;
inc_group_count(list, _data);
parse_events__set_leader(NULL, list);
$$ = list;
}

View File

@ -96,7 +96,8 @@ struct symbol_conf {
initialized,
kptr_restrict,
annotate_asm_raw,
annotate_src;
annotate_src,
event_group;
const char *vmlinux_name,
*kallsyms_name,
*source_prefix,