2010-05-14 01:47:16 +08:00
|
|
|
#include "util.h"
|
2010-05-21 18:48:39 +08:00
|
|
|
#include "build-id.h"
|
2009-09-28 21:32:55 +08:00
|
|
|
#include "hist.h"
|
2009-12-14 23:10:39 +08:00
|
|
|
#include "session.h"
|
|
|
|
#include "sort.h"
|
2014-10-11 02:49:21 +08:00
|
|
|
#include "evlist.h"
|
2013-01-22 17:09:33 +08:00
|
|
|
#include "evsel.h"
|
2013-10-30 08:40:34 +08:00
|
|
|
#include "annotate.h"
|
2014-12-22 12:44:10 +08:00
|
|
|
#include "ui/progress.h"
|
2009-12-17 00:31:49 +08:00
|
|
|
#include <math.h>
|
2009-09-28 21:32:55 +08:00
|
|
|
|
2011-10-19 23:09:10 +08:00
|
|
|
static bool hists__filter_entry_by_dso(struct hists *hists,
|
|
|
|
struct hist_entry *he);
|
|
|
|
static bool hists__filter_entry_by_thread(struct hists *hists,
|
|
|
|
struct hist_entry *he);
|
2012-03-16 16:50:51 +08:00
|
|
|
static bool hists__filter_entry_by_symbol(struct hists *hists,
|
|
|
|
struct hist_entry *he);
|
2015-09-04 22:45:44 +08:00
|
|
|
static bool hists__filter_entry_by_socket(struct hists *hists,
|
|
|
|
struct hist_entry *he);
|
2011-10-19 23:09:10 +08:00
|
|
|
|
2011-09-26 23:33:28 +08:00
|
|
|
u16 hists__col_len(struct hists *hists, enum hist_column col)
|
2010-07-21 01:42:52 +08:00
|
|
|
{
|
2011-09-26 23:33:28 +08:00
|
|
|
return hists->col_len[col];
|
2010-07-21 01:42:52 +08:00
|
|
|
}
|
|
|
|
|
2011-09-26 23:33:28 +08:00
|
|
|
void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
|
2010-07-21 01:42:52 +08:00
|
|
|
{
|
2011-09-26 23:33:28 +08:00
|
|
|
hists->col_len[col] = len;
|
2010-07-21 01:42:52 +08:00
|
|
|
}
|
|
|
|
|
2011-09-26 23:33:28 +08:00
|
|
|
bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
|
2010-07-21 01:42:52 +08:00
|
|
|
{
|
2011-09-26 23:33:28 +08:00
|
|
|
if (len > hists__col_len(hists, col)) {
|
|
|
|
hists__set_col_len(hists, col, len);
|
2010-07-21 01:42:52 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2012-08-20 12:52:05 +08:00
|
|
|
void hists__reset_col_len(struct hists *hists)
|
2010-07-21 01:42:52 +08:00
|
|
|
{
|
|
|
|
enum hist_column col;
|
|
|
|
|
|
|
|
for (col = 0; col < HISTC_NR_COLS; ++col)
|
2011-09-26 23:33:28 +08:00
|
|
|
hists__set_col_len(hists, col, 0);
|
2010-07-21 01:42:52 +08:00
|
|
|
}
|
|
|
|
|
2012-02-10 06:21:01 +08:00
|
|
|
static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
|
|
|
|
{
|
|
|
|
const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
|
|
|
|
|
|
|
|
if (hists__col_len(hists, dso) < unresolved_col_width &&
|
|
|
|
!symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
|
|
|
|
!symbol_conf.dso_list)
|
|
|
|
hists__set_col_len(hists, dso, unresolved_col_width);
|
|
|
|
}
|
|
|
|
|
2012-08-20 12:52:05 +08:00
|
|
|
void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
|
2010-07-21 01:42:52 +08:00
|
|
|
{
|
2012-02-10 06:21:01 +08:00
|
|
|
const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
|
2013-01-24 23:10:35 +08:00
|
|
|
int symlen;
|
2010-07-21 01:42:52 +08:00
|
|
|
u16 len;
|
|
|
|
|
2013-04-01 19:35:19 +08:00
|
|
|
/*
|
|
|
|
* +4 accounts for '[x] ' priv level info
|
|
|
|
* +2 accounts for 0x prefix on raw addresses
|
|
|
|
* +3 accounts for ' y ' symtab origin info
|
|
|
|
*/
|
|
|
|
if (h->ms.sym) {
|
|
|
|
symlen = h->ms.sym->namelen + 4;
|
|
|
|
if (verbose)
|
|
|
|
symlen += BITS_PER_LONG / 4 + 2 + 3;
|
|
|
|
hists__new_col_len(hists, HISTC_SYMBOL, symlen);
|
|
|
|
} else {
|
2013-01-24 23:10:35 +08:00
|
|
|
symlen = unresolved_col_width + 4 + 2;
|
|
|
|
hists__new_col_len(hists, HISTC_SYMBOL, symlen);
|
2012-02-10 06:21:01 +08:00
|
|
|
hists__set_unres_dso_col_len(hists, HISTC_DSO);
|
2013-01-24 23:10:35 +08:00
|
|
|
}
|
2010-07-21 01:42:52 +08:00
|
|
|
|
|
|
|
len = thread__comm_len(h->thread);
|
2011-09-26 23:33:28 +08:00
|
|
|
if (hists__new_col_len(hists, HISTC_COMM, len))
|
|
|
|
hists__set_col_len(hists, HISTC_THREAD, len + 6);
|
2010-07-21 01:42:52 +08:00
|
|
|
|
|
|
|
if (h->ms.map) {
|
|
|
|
len = dso__name_len(h->ms.map->dso);
|
2011-09-26 23:33:28 +08:00
|
|
|
hists__new_col_len(hists, HISTC_DSO, len);
|
2010-07-21 01:42:52 +08:00
|
|
|
}
|
2012-02-10 06:21:01 +08:00
|
|
|
|
2012-12-27 17:11:42 +08:00
|
|
|
if (h->parent)
|
|
|
|
hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
|
|
|
|
|
2012-02-10 06:21:01 +08:00
|
|
|
if (h->branch_info) {
|
|
|
|
if (h->branch_info->from.sym) {
|
|
|
|
symlen = (int)h->branch_info->from.sym->namelen + 4;
|
2013-04-01 19:35:19 +08:00
|
|
|
if (verbose)
|
|
|
|
symlen += BITS_PER_LONG / 4 + 2 + 3;
|
2012-02-10 06:21:01 +08:00
|
|
|
hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
|
|
|
|
|
|
|
|
symlen = dso__name_len(h->branch_info->from.map->dso);
|
|
|
|
hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
|
|
|
|
} else {
|
|
|
|
symlen = unresolved_col_width + 4 + 2;
|
|
|
|
hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
|
|
|
|
hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (h->branch_info->to.sym) {
|
|
|
|
symlen = (int)h->branch_info->to.sym->namelen + 4;
|
2013-04-01 19:35:19 +08:00
|
|
|
if (verbose)
|
|
|
|
symlen += BITS_PER_LONG / 4 + 2 + 3;
|
2012-02-10 06:21:01 +08:00
|
|
|
hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
|
|
|
|
|
|
|
|
symlen = dso__name_len(h->branch_info->to.map->dso);
|
|
|
|
hists__new_col_len(hists, HISTC_DSO_TO, symlen);
|
|
|
|
} else {
|
|
|
|
symlen = unresolved_col_width + 4 + 2;
|
|
|
|
hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
|
|
|
|
hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
|
|
|
|
}
|
|
|
|
}
|
2013-01-24 23:10:35 +08:00
|
|
|
|
|
|
|
if (h->mem_info) {
|
|
|
|
if (h->mem_info->daddr.sym) {
|
|
|
|
symlen = (int)h->mem_info->daddr.sym->namelen + 4
|
|
|
|
+ unresolved_col_width + 2;
|
|
|
|
hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
|
|
|
|
symlen);
|
2014-06-01 21:38:29 +08:00
|
|
|
hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
|
|
|
|
symlen + 1);
|
2013-01-24 23:10:35 +08:00
|
|
|
} else {
|
|
|
|
symlen = unresolved_col_width + 4 + 2;
|
|
|
|
hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
|
|
|
|
symlen);
|
|
|
|
}
|
2015-10-06 02:06:08 +08:00
|
|
|
|
|
|
|
if (h->mem_info->iaddr.sym) {
|
|
|
|
symlen = (int)h->mem_info->iaddr.sym->namelen + 4
|
|
|
|
+ unresolved_col_width + 2;
|
|
|
|
hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
|
|
|
|
symlen);
|
|
|
|
} else {
|
|
|
|
symlen = unresolved_col_width + 4 + 2;
|
|
|
|
hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
|
|
|
|
symlen);
|
|
|
|
}
|
|
|
|
|
2013-01-24 23:10:35 +08:00
|
|
|
if (h->mem_info->daddr.map) {
|
|
|
|
symlen = dso__name_len(h->mem_info->daddr.map->dso);
|
|
|
|
hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
|
|
|
|
symlen);
|
|
|
|
} else {
|
|
|
|
symlen = unresolved_col_width + 4 + 2;
|
|
|
|
hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
symlen = unresolved_col_width + 4 + 2;
|
|
|
|
hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
|
2015-10-06 02:06:08 +08:00
|
|
|
hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
|
2013-01-24 23:10:35 +08:00
|
|
|
hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
|
|
|
|
}
|
|
|
|
|
2015-09-09 23:14:00 +08:00
|
|
|
hists__new_col_len(hists, HISTC_CPU, 3);
|
2015-09-04 22:45:43 +08:00
|
|
|
hists__new_col_len(hists, HISTC_SOCKET, 6);
|
2013-01-24 23:10:35 +08:00
|
|
|
hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
|
|
|
|
hists__new_col_len(hists, HISTC_MEM_TLB, 22);
|
|
|
|
hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
|
|
|
|
hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
|
|
|
|
hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
|
|
|
|
hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
|
2013-09-20 22:40:43 +08:00
|
|
|
|
2015-08-11 03:53:54 +08:00
|
|
|
if (h->srcline)
|
|
|
|
hists__new_col_len(hists, HISTC_SRCLINE, strlen(h->srcline));
|
|
|
|
|
2015-08-08 06:54:24 +08:00
|
|
|
if (h->srcfile)
|
|
|
|
hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
|
|
|
|
|
2013-09-20 22:40:43 +08:00
|
|
|
if (h->transaction)
|
|
|
|
hists__new_col_len(hists, HISTC_TRANSACTION,
|
|
|
|
hist_entry__transaction_len());
|
2010-07-21 01:42:52 +08:00
|
|
|
}
|
|
|
|
|
2012-08-20 12:52:05 +08:00
|
|
|
void hists__output_recalc_col_len(struct hists *hists, int max_rows)
|
|
|
|
{
|
|
|
|
struct rb_node *next = rb_first(&hists->entries);
|
|
|
|
struct hist_entry *n;
|
|
|
|
int row = 0;
|
|
|
|
|
|
|
|
hists__reset_col_len(hists);
|
|
|
|
|
|
|
|
while (next && row++ < max_rows) {
|
|
|
|
n = rb_entry(next, struct hist_entry, rb_node);
|
|
|
|
if (!n->filtered)
|
|
|
|
hists__calc_col_len(hists, n);
|
|
|
|
next = rb_next(&n->rb_node);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-14 13:25:37 +08:00
|
|
|
static void he_stat__add_cpumode_period(struct he_stat *he_stat,
|
|
|
|
unsigned int cpumode, u64 period)
|
2010-04-19 13:32:50 +08:00
|
|
|
{
|
2010-05-10 00:02:23 +08:00
|
|
|
switch (cpumode) {
|
2010-04-19 13:32:50 +08:00
|
|
|
case PERF_RECORD_MISC_KERNEL:
|
2014-01-14 13:25:37 +08:00
|
|
|
he_stat->period_sys += period;
|
2010-04-19 13:32:50 +08:00
|
|
|
break;
|
|
|
|
case PERF_RECORD_MISC_USER:
|
2014-01-14 13:25:37 +08:00
|
|
|
he_stat->period_us += period;
|
2010-04-19 13:32:50 +08:00
|
|
|
break;
|
|
|
|
case PERF_RECORD_MISC_GUEST_KERNEL:
|
2014-01-14 13:25:37 +08:00
|
|
|
he_stat->period_guest_sys += period;
|
2010-04-19 13:32:50 +08:00
|
|
|
break;
|
|
|
|
case PERF_RECORD_MISC_GUEST_USER:
|
2014-01-14 13:25:37 +08:00
|
|
|
he_stat->period_guest_us += period;
|
2010-04-19 13:32:50 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-01-24 23:10:29 +08:00
|
|
|
static void he_stat__add_period(struct he_stat *he_stat, u64 period,
|
|
|
|
u64 weight)
|
2012-10-04 20:49:43 +08:00
|
|
|
{
|
2013-01-24 23:10:35 +08:00
|
|
|
|
2012-10-04 20:49:43 +08:00
|
|
|
he_stat->period += period;
|
2013-01-24 23:10:29 +08:00
|
|
|
he_stat->weight += weight;
|
2012-10-04 20:49:43 +08:00
|
|
|
he_stat->nr_events += 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
|
|
|
|
{
|
|
|
|
dest->period += src->period;
|
|
|
|
dest->period_sys += src->period_sys;
|
|
|
|
dest->period_us += src->period_us;
|
|
|
|
dest->period_guest_sys += src->period_guest_sys;
|
|
|
|
dest->period_guest_us += src->period_guest_us;
|
|
|
|
dest->nr_events += src->nr_events;
|
2013-01-24 23:10:29 +08:00
|
|
|
dest->weight += src->weight;
|
2012-10-04 20:49:43 +08:00
|
|
|
}
|
|
|
|
|
2014-01-14 13:25:37 +08:00
|
|
|
static void he_stat__decay(struct he_stat *he_stat)
|
perf top: Reuse the 'report' hist_entry/hists classes
This actually fixes several problems we had in the old 'perf top':
1. Unresolved symbols not show, limitation that came from the old
"KernelTop" codebase, to solve it we would need to do changes
that would make sym_entry have most of the hist_entry fields.
2. It was using the number of samples, not the sum of sample->period.
And brings the --sort code that allows us to have all the views in
'perf report', for instance:
[root@emilia ~]# perf top --sort dso
PerfTop: 5903 irqs/sec kernel:77.5% exact: 0.0% [1000Hz cycles], (all, 8 CPUs)
------------------------------------------------------------------------------
31.59% libcrypto.so.1.0.0
21.55% [kernel]
18.57% libpython2.6.so.1.0
7.04% libc-2.12.so
6.99% _backend_agg.so
4.72% sshd
1.48% multiarray.so
1.39% libfreetype.so.6.3.22
1.37% perf
0.71% libgobject-2.0.so.0.2200.5
0.53% [tg3]
0.48% libglib-2.0.so.0.2200.5
0.44% libstdc++.so.6.0.13
0.40% libcairo.so.2.10800.8
0.38% libm-2.12.so
0.34% umath.so
0.30% libgdk-x11-2.0.so.0.1800.9
0.22% libpthread-2.12.so
0.20% libgtk-x11-2.0.so.0.1800.9
0.20% librt-2.12.so
0.15% _path.so
0.13% libpango-1.0.so.0.2800.1
0.11% libatlas.so.3.0
0.09% ft2font.so
0.09% libpangoft2-1.0.so.0.2800.1
0.08% libX11.so.6.3.0
0.07% [vdso]
0.06% cyclictest
^C
All the filter lists can be used as well: --dsos, --comms, --symbols,
etc.
The 'perf report' TUI is also reused, being possible to apply all the
zoom operations, do annotation, etc.
This change will allow multiple simplifications in the symbol system as
well, that will be detailed in upcoming changesets.
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-xzaaldxq7zhqrrxdxjifk1mh@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2011-10-06 06:16:15 +08:00
|
|
|
{
|
2014-01-14 13:25:37 +08:00
|
|
|
he_stat->period = (he_stat->period * 7) / 8;
|
|
|
|
he_stat->nr_events = (he_stat->nr_events * 7) / 8;
|
2013-01-24 23:10:29 +08:00
|
|
|
/* XXX need decay for weight too? */
|
perf top: Reuse the 'report' hist_entry/hists classes
This actually fixes several problems we had in the old 'perf top':
1. Unresolved symbols not show, limitation that came from the old
"KernelTop" codebase, to solve it we would need to do changes
that would make sym_entry have most of the hist_entry fields.
2. It was using the number of samples, not the sum of sample->period.
And brings the --sort code that allows us to have all the views in
'perf report', for instance:
[root@emilia ~]# perf top --sort dso
PerfTop: 5903 irqs/sec kernel:77.5% exact: 0.0% [1000Hz cycles], (all, 8 CPUs)
------------------------------------------------------------------------------
31.59% libcrypto.so.1.0.0
21.55% [kernel]
18.57% libpython2.6.so.1.0
7.04% libc-2.12.so
6.99% _backend_agg.so
4.72% sshd
1.48% multiarray.so
1.39% libfreetype.so.6.3.22
1.37% perf
0.71% libgobject-2.0.so.0.2200.5
0.53% [tg3]
0.48% libglib-2.0.so.0.2200.5
0.44% libstdc++.so.6.0.13
0.40% libcairo.so.2.10800.8
0.38% libm-2.12.so
0.34% umath.so
0.30% libgdk-x11-2.0.so.0.1800.9
0.22% libpthread-2.12.so
0.20% libgtk-x11-2.0.so.0.1800.9
0.20% librt-2.12.so
0.15% _path.so
0.13% libpango-1.0.so.0.2800.1
0.11% libatlas.so.3.0
0.09% ft2font.so
0.09% libpangoft2-1.0.so.0.2800.1
0.08% libX11.so.6.3.0
0.07% [vdso]
0.06% cyclictest
^C
All the filter lists can be used as well: --dsos, --comms, --symbols,
etc.
The 'perf report' TUI is also reused, being possible to apply all the
zoom operations, do annotation, etc.
This change will allow multiple simplifications in the symbol system as
well, that will be detailed in upcoming changesets.
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-xzaaldxq7zhqrrxdxjifk1mh@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2011-10-06 06:16:15 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
|
|
|
|
{
|
2012-10-04 20:49:41 +08:00
|
|
|
u64 prev_period = he->stat.period;
|
2014-04-22 12:44:23 +08:00
|
|
|
u64 diff;
|
2011-10-20 16:45:44 +08:00
|
|
|
|
|
|
|
if (prev_period == 0)
|
2011-10-13 19:01:33 +08:00
|
|
|
return true;
|
2011-10-20 16:45:44 +08:00
|
|
|
|
2014-01-14 13:25:37 +08:00
|
|
|
he_stat__decay(&he->stat);
|
2012-09-11 12:15:07 +08:00
|
|
|
if (symbol_conf.cumulate_callchain)
|
|
|
|
he_stat__decay(he->stat_acc);
|
2016-01-05 11:06:00 +08:00
|
|
|
decay_callchain(he->callchain);
|
2011-10-20 16:45:44 +08:00
|
|
|
|
2014-04-22 12:44:23 +08:00
|
|
|
diff = prev_period - he->stat.period;
|
|
|
|
|
|
|
|
hists->stats.total_period -= diff;
|
2011-10-20 16:45:44 +08:00
|
|
|
if (!he->filtered)
|
2014-04-22 12:44:23 +08:00
|
|
|
hists->stats.total_non_filtered_period -= diff;
|
2011-10-20 16:45:44 +08:00
|
|
|
|
2012-10-04 20:49:41 +08:00
|
|
|
return he->stat.period == 0;
|
perf top: Reuse the 'report' hist_entry/hists classes
This actually fixes several problems we had in the old 'perf top':
1. Unresolved symbols not show, limitation that came from the old
"KernelTop" codebase, to solve it we would need to do changes
that would make sym_entry have most of the hist_entry fields.
2. It was using the number of samples, not the sum of sample->period.
And brings the --sort code that allows us to have all the views in
'perf report', for instance:
[root@emilia ~]# perf top --sort dso
PerfTop: 5903 irqs/sec kernel:77.5% exact: 0.0% [1000Hz cycles], (all, 8 CPUs)
------------------------------------------------------------------------------
31.59% libcrypto.so.1.0.0
21.55% [kernel]
18.57% libpython2.6.so.1.0
7.04% libc-2.12.so
6.99% _backend_agg.so
4.72% sshd
1.48% multiarray.so
1.39% libfreetype.so.6.3.22
1.37% perf
0.71% libgobject-2.0.so.0.2200.5
0.53% [tg3]
0.48% libglib-2.0.so.0.2200.5
0.44% libstdc++.so.6.0.13
0.40% libcairo.so.2.10800.8
0.38% libm-2.12.so
0.34% umath.so
0.30% libgdk-x11-2.0.so.0.1800.9
0.22% libpthread-2.12.so
0.20% libgtk-x11-2.0.so.0.1800.9
0.20% librt-2.12.so
0.15% _path.so
0.13% libpango-1.0.so.0.2800.1
0.11% libatlas.so.3.0
0.09% ft2font.so
0.09% libpangoft2-1.0.so.0.2800.1
0.08% libX11.so.6.3.0
0.07% [vdso]
0.06% cyclictest
^C
All the filter lists can be used as well: --dsos, --comms, --symbols,
etc.
The 'perf report' TUI is also reused, being possible to apply all the
zoom operations, do annotation, etc.
This change will allow multiple simplifications in the symbol system as
well, that will be detailed in upcoming changesets.
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-xzaaldxq7zhqrrxdxjifk1mh@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2011-10-06 06:16:15 +08:00
|
|
|
}
|
|
|
|
|
2014-12-19 23:41:28 +08:00
|
|
|
static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
|
|
|
|
{
|
|
|
|
rb_erase(&he->rb_node, &hists->entries);
|
|
|
|
|
|
|
|
if (sort__need_collapse)
|
|
|
|
rb_erase(&he->rb_node_in, &hists->entries_collapsed);
|
2015-12-10 15:53:20 +08:00
|
|
|
else
|
|
|
|
rb_erase(&he->rb_node_in, hists->entries_in);
|
2014-12-19 23:41:28 +08:00
|
|
|
|
|
|
|
--hists->nr_entries;
|
|
|
|
if (!he->filtered)
|
|
|
|
--hists->nr_non_filtered_entries;
|
|
|
|
|
|
|
|
hist_entry__delete(he);
|
|
|
|
}
|
|
|
|
|
2013-05-14 10:09:01 +08:00
|
|
|
void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
|
perf top: Reuse the 'report' hist_entry/hists classes
This actually fixes several problems we had in the old 'perf top':
1. Unresolved symbols not show, limitation that came from the old
"KernelTop" codebase, to solve it we would need to do changes
that would make sym_entry have most of the hist_entry fields.
2. It was using the number of samples, not the sum of sample->period.
And brings the --sort code that allows us to have all the views in
'perf report', for instance:
[root@emilia ~]# perf top --sort dso
PerfTop: 5903 irqs/sec kernel:77.5% exact: 0.0% [1000Hz cycles], (all, 8 CPUs)
------------------------------------------------------------------------------
31.59% libcrypto.so.1.0.0
21.55% [kernel]
18.57% libpython2.6.so.1.0
7.04% libc-2.12.so
6.99% _backend_agg.so
4.72% sshd
1.48% multiarray.so
1.39% libfreetype.so.6.3.22
1.37% perf
0.71% libgobject-2.0.so.0.2200.5
0.53% [tg3]
0.48% libglib-2.0.so.0.2200.5
0.44% libstdc++.so.6.0.13
0.40% libcairo.so.2.10800.8
0.38% libm-2.12.so
0.34% umath.so
0.30% libgdk-x11-2.0.so.0.1800.9
0.22% libpthread-2.12.so
0.20% libgtk-x11-2.0.so.0.1800.9
0.20% librt-2.12.so
0.15% _path.so
0.13% libpango-1.0.so.0.2800.1
0.11% libatlas.so.3.0
0.09% ft2font.so
0.09% libpangoft2-1.0.so.0.2800.1
0.08% libX11.so.6.3.0
0.07% [vdso]
0.06% cyclictest
^C
All the filter lists can be used as well: --dsos, --comms, --symbols,
etc.
The 'perf report' TUI is also reused, being possible to apply all the
zoom operations, do annotation, etc.
This change will allow multiple simplifications in the symbol system as
well, that will be detailed in upcoming changesets.
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-xzaaldxq7zhqrrxdxjifk1mh@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2011-10-06 06:16:15 +08:00
|
|
|
{
|
|
|
|
struct rb_node *next = rb_first(&hists->entries);
|
|
|
|
struct hist_entry *n;
|
|
|
|
|
|
|
|
while (next) {
|
|
|
|
n = rb_entry(next, struct hist_entry, rb_node);
|
|
|
|
next = rb_next(&n->rb_node);
|
2011-10-17 19:05:04 +08:00
|
|
|
if (((zap_user && n->level == '.') ||
|
|
|
|
(zap_kernel && n->level != '.') ||
|
2015-03-18 04:18:58 +08:00
|
|
|
hists__decay_entry(hists, n))) {
|
2014-12-19 23:41:28 +08:00
|
|
|
hists__delete_entry(hists, n);
|
perf top: Reuse the 'report' hist_entry/hists classes
This actually fixes several problems we had in the old 'perf top':
1. Unresolved symbols not show, limitation that came from the old
"KernelTop" codebase, to solve it we would need to do changes
that would make sym_entry have most of the hist_entry fields.
2. It was using the number of samples, not the sum of sample->period.
And brings the --sort code that allows us to have all the views in
'perf report', for instance:
[root@emilia ~]# perf top --sort dso
PerfTop: 5903 irqs/sec kernel:77.5% exact: 0.0% [1000Hz cycles], (all, 8 CPUs)
------------------------------------------------------------------------------
31.59% libcrypto.so.1.0.0
21.55% [kernel]
18.57% libpython2.6.so.1.0
7.04% libc-2.12.so
6.99% _backend_agg.so
4.72% sshd
1.48% multiarray.so
1.39% libfreetype.so.6.3.22
1.37% perf
0.71% libgobject-2.0.so.0.2200.5
0.53% [tg3]
0.48% libglib-2.0.so.0.2200.5
0.44% libstdc++.so.6.0.13
0.40% libcairo.so.2.10800.8
0.38% libm-2.12.so
0.34% umath.so
0.30% libgdk-x11-2.0.so.0.1800.9
0.22% libpthread-2.12.so
0.20% libgtk-x11-2.0.so.0.1800.9
0.20% librt-2.12.so
0.15% _path.so
0.13% libpango-1.0.so.0.2800.1
0.11% libatlas.so.3.0
0.09% ft2font.so
0.09% libpangoft2-1.0.so.0.2800.1
0.08% libX11.so.6.3.0
0.07% [vdso]
0.06% cyclictest
^C
All the filter lists can be used as well: --dsos, --comms, --symbols,
etc.
The 'perf report' TUI is also reused, being possible to apply all the
zoom operations, do annotation, etc.
This change will allow multiple simplifications in the symbol system as
well, that will be detailed in upcoming changesets.
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-xzaaldxq7zhqrrxdxjifk1mh@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2011-10-06 06:16:15 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-12 16:16:05 +08:00
|
|
|
void hists__delete_entries(struct hists *hists)
|
|
|
|
{
|
|
|
|
struct rb_node *next = rb_first(&hists->entries);
|
|
|
|
struct hist_entry *n;
|
|
|
|
|
|
|
|
while (next) {
|
|
|
|
n = rb_entry(next, struct hist_entry, rb_node);
|
|
|
|
next = rb_next(&n->rb_node);
|
|
|
|
|
2014-12-19 23:41:28 +08:00
|
|
|
hists__delete_entry(hists, n);
|
2014-08-12 16:16:05 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-09-28 21:32:55 +08:00
|
|
|
/*
|
2010-05-15 01:19:35 +08:00
|
|
|
* histogram, sorted on item, collects periods
|
2009-09-28 21:32:55 +08:00
|
|
|
*/
|
|
|
|
|
2012-09-11 12:34:27 +08:00
|
|
|
static struct hist_entry *hist_entry__new(struct hist_entry *template,
|
|
|
|
bool sample_self)
|
2010-05-10 00:02:23 +08:00
|
|
|
{
|
2012-09-11 12:15:07 +08:00
|
|
|
size_t callchain_size = 0;
|
|
|
|
struct hist_entry *he;
|
|
|
|
|
2014-12-22 12:44:14 +08:00
|
|
|
if (symbol_conf.use_callchain)
|
2012-09-11 12:15:07 +08:00
|
|
|
callchain_size = sizeof(struct callchain_root);
|
|
|
|
|
|
|
|
he = zalloc(sizeof(*he) + callchain_size);
|
2010-05-10 00:02:23 +08:00
|
|
|
|
2012-01-04 22:27:03 +08:00
|
|
|
if (he != NULL) {
|
|
|
|
*he = *template;
|
2012-10-04 20:49:42 +08:00
|
|
|
|
2012-09-11 12:15:07 +08:00
|
|
|
if (symbol_conf.cumulate_callchain) {
|
|
|
|
he->stat_acc = malloc(sizeof(he->stat));
|
|
|
|
if (he->stat_acc == NULL) {
|
|
|
|
free(he);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
|
2012-09-11 12:34:27 +08:00
|
|
|
if (!sample_self)
|
|
|
|
memset(&he->stat, 0, sizeof(he->stat));
|
2012-09-11 12:15:07 +08:00
|
|
|
}
|
|
|
|
|
2015-06-16 10:29:51 +08:00
|
|
|
map__get(he->ms.map);
|
2013-01-14 22:02:45 +08:00
|
|
|
|
|
|
|
if (he->branch_info) {
|
2013-04-01 19:35:17 +08:00
|
|
|
/*
|
|
|
|
* This branch info is (a part of) allocated from
|
2014-01-23 00:15:36 +08:00
|
|
|
* sample__resolve_bstack() and will be freed after
|
2013-04-01 19:35:17 +08:00
|
|
|
* adding new entries. So we need to save a copy.
|
|
|
|
*/
|
|
|
|
he->branch_info = malloc(sizeof(*he->branch_info));
|
|
|
|
if (he->branch_info == NULL) {
|
2015-06-16 10:29:51 +08:00
|
|
|
map__zput(he->ms.map);
|
2012-09-11 12:15:07 +08:00
|
|
|
free(he->stat_acc);
|
2013-04-01 19:35:17 +08:00
|
|
|
free(he);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(he->branch_info, template->branch_info,
|
|
|
|
sizeof(*he->branch_info));
|
|
|
|
|
2015-06-16 10:29:51 +08:00
|
|
|
map__get(he->branch_info->from.map);
|
|
|
|
map__get(he->branch_info->to.map);
|
2013-01-14 22:02:45 +08:00
|
|
|
}
|
|
|
|
|
2013-01-24 23:10:35 +08:00
|
|
|
if (he->mem_info) {
|
2015-06-16 10:29:51 +08:00
|
|
|
map__get(he->mem_info->iaddr.map);
|
|
|
|
map__get(he->mem_info->daddr.map);
|
2013-01-24 23:10:35 +08:00
|
|
|
}
|
|
|
|
|
2010-05-10 00:02:23 +08:00
|
|
|
if (symbol_conf.use_callchain)
|
2012-01-04 22:27:03 +08:00
|
|
|
callchain_init(he->callchain);
|
2012-10-26 00:42:45 +08:00
|
|
|
|
2015-12-24 10:16:17 +08:00
|
|
|
if (he->raw_data) {
|
|
|
|
he->raw_data = memdup(he->raw_data, he->raw_size);
|
|
|
|
|
|
|
|
if (he->raw_data == NULL) {
|
|
|
|
map__put(he->ms.map);
|
|
|
|
if (he->branch_info) {
|
|
|
|
map__put(he->branch_info->from.map);
|
|
|
|
map__put(he->branch_info->to.map);
|
|
|
|
free(he->branch_info);
|
|
|
|
}
|
|
|
|
if (he->mem_info) {
|
|
|
|
map__put(he->mem_info->iaddr.map);
|
|
|
|
map__put(he->mem_info->daddr.map);
|
|
|
|
}
|
|
|
|
free(he->stat_acc);
|
|
|
|
free(he);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
2012-10-26 00:42:45 +08:00
|
|
|
INIT_LIST_HEAD(&he->pairs.node);
|
2015-03-03 09:21:35 +08:00
|
|
|
thread__get(he->thread);
|
2010-05-10 00:02:23 +08:00
|
|
|
}
|
|
|
|
|
2012-01-04 22:27:03 +08:00
|
|
|
return he;
|
2010-05-10 00:02:23 +08:00
|
|
|
}
|
|
|
|
|
2010-07-21 20:19:41 +08:00
|
|
|
static u8 symbol__parent_filter(const struct symbol *parent)
|
|
|
|
{
|
|
|
|
if (symbol_conf.exclude_other && parent == NULL)
|
|
|
|
return 1 << HIST_FILTER__PARENT;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-05-19 22:31:22 +08:00
|
|
|
static struct hist_entry *hists__findnew_entry(struct hists *hists,
|
|
|
|
struct hist_entry *entry,
|
|
|
|
struct addr_location *al,
|
|
|
|
bool sample_self)
|
2009-10-03 21:42:45 +08:00
|
|
|
{
|
2011-10-06 04:50:23 +08:00
|
|
|
struct rb_node **p;
|
2009-10-03 21:42:45 +08:00
|
|
|
struct rb_node *parent = NULL;
|
|
|
|
struct hist_entry *he;
|
2013-10-01 22:22:15 +08:00
|
|
|
int64_t cmp;
|
2013-12-18 13:21:11 +08:00
|
|
|
u64 period = entry->stat.period;
|
|
|
|
u64 weight = entry->stat.weight;
|
2009-10-03 21:42:45 +08:00
|
|
|
|
2011-10-06 04:50:23 +08:00
|
|
|
p = &hists->entries_in->rb_node;
|
|
|
|
|
2009-10-03 21:42:45 +08:00
|
|
|
while (*p != NULL) {
|
|
|
|
parent = *p;
|
2011-10-06 04:50:23 +08:00
|
|
|
he = rb_entry(parent, struct hist_entry, rb_node_in);
|
2009-10-03 21:42:45 +08:00
|
|
|
|
2012-12-10 16:29:54 +08:00
|
|
|
/*
|
|
|
|
* Make sure that it receives arguments in a same order as
|
|
|
|
* hist_entry__collapse() so that we can use an appropriate
|
|
|
|
* function when searching an entry regardless which sort
|
|
|
|
* keys were used.
|
|
|
|
*/
|
|
|
|
cmp = hist_entry__cmp(he, entry);
|
2009-10-03 21:42:45 +08:00
|
|
|
|
|
|
|
if (!cmp) {
|
2012-09-11 12:34:27 +08:00
|
|
|
if (sample_self)
|
|
|
|
he_stat__add_period(&he->stat, period, weight);
|
2012-09-11 12:15:07 +08:00
|
|
|
if (symbol_conf.cumulate_callchain)
|
|
|
|
he_stat__add_period(he->stat_acc, period, weight);
|
2012-03-27 15:14:18 +08:00
|
|
|
|
2013-04-01 19:35:18 +08:00
|
|
|
/*
|
2014-01-23 00:05:06 +08:00
|
|
|
* This mem info was allocated from sample__resolve_mem
|
2013-04-01 19:35:18 +08:00
|
|
|
* and will not be used anymore.
|
|
|
|
*/
|
2013-12-28 03:55:14 +08:00
|
|
|
zfree(&entry->mem_info);
|
2013-04-01 19:35:18 +08:00
|
|
|
|
2012-03-27 15:14:18 +08:00
|
|
|
/* If the map of an existing hist_entry has
|
|
|
|
* become out-of-date due to an exec() or
|
|
|
|
* similar, update it. Otherwise we will
|
|
|
|
* mis-adjust symbol addresses when computing
|
|
|
|
* the history counter to increment.
|
|
|
|
*/
|
|
|
|
if (he->ms.map != entry->ms.map) {
|
2015-06-16 10:29:51 +08:00
|
|
|
map__put(he->ms.map);
|
|
|
|
he->ms.map = map__get(entry->ms.map);
|
2012-03-27 15:14:18 +08:00
|
|
|
}
|
2010-05-10 00:02:23 +08:00
|
|
|
goto out;
|
2009-10-03 21:42:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (cmp < 0)
|
|
|
|
p = &(*p)->rb_left;
|
|
|
|
else
|
|
|
|
p = &(*p)->rb_right;
|
|
|
|
}
|
|
|
|
|
2012-09-11 12:34:27 +08:00
|
|
|
he = hist_entry__new(entry, sample_self);
|
2009-10-03 21:42:45 +08:00
|
|
|
if (!he)
|
2013-05-14 10:09:02 +08:00
|
|
|
return NULL;
|
2011-10-06 04:50:23 +08:00
|
|
|
|
2014-12-22 12:44:09 +08:00
|
|
|
hists->nr_entries++;
|
|
|
|
|
2011-10-06 04:50:23 +08:00
|
|
|
rb_link_node(&he->rb_node_in, parent, p);
|
|
|
|
rb_insert_color(&he->rb_node_in, hists->entries_in);
|
2010-05-10 00:02:23 +08:00
|
|
|
out:
|
2012-09-11 12:34:27 +08:00
|
|
|
if (sample_self)
|
|
|
|
he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
|
2012-09-11 12:15:07 +08:00
|
|
|
if (symbol_conf.cumulate_callchain)
|
|
|
|
he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
|
2009-10-03 21:42:45 +08:00
|
|
|
return he;
|
|
|
|
}
|
|
|
|
|
2013-10-23 06:01:31 +08:00
|
|
|
struct hist_entry *__hists__add_entry(struct hists *hists,
|
2012-02-10 06:21:01 +08:00
|
|
|
struct addr_location *al,
|
2013-10-31 14:56:03 +08:00
|
|
|
struct symbol *sym_parent,
|
|
|
|
struct branch_info *bi,
|
|
|
|
struct mem_info *mi,
|
2015-12-23 01:06:58 +08:00
|
|
|
struct perf_sample *sample,
|
2012-09-11 12:34:27 +08:00
|
|
|
bool sample_self)
|
2012-02-10 06:21:01 +08:00
|
|
|
{
|
|
|
|
struct hist_entry entry = {
|
|
|
|
.thread = al->thread,
|
2013-09-13 15:28:57 +08:00
|
|
|
.comm = thread__comm(al->thread),
|
2012-02-10 06:21:01 +08:00
|
|
|
.ms = {
|
|
|
|
.map = al->map,
|
|
|
|
.sym = al->sym,
|
|
|
|
},
|
2015-09-04 22:45:42 +08:00
|
|
|
.socket = al->socket,
|
2014-05-28 00:28:05 +08:00
|
|
|
.cpu = al->cpu,
|
|
|
|
.cpumode = al->cpumode,
|
|
|
|
.ip = al->addr,
|
|
|
|
.level = al->level,
|
2012-10-04 20:49:41 +08:00
|
|
|
.stat = {
|
2012-10-04 20:49:42 +08:00
|
|
|
.nr_events = 1,
|
2015-12-23 01:06:58 +08:00
|
|
|
.period = sample->period,
|
|
|
|
.weight = sample->weight,
|
2012-10-04 20:49:41 +08:00
|
|
|
},
|
2012-02-10 06:21:01 +08:00
|
|
|
.parent = sym_parent,
|
2014-03-18 05:18:54 +08:00
|
|
|
.filtered = symbol__parent_filter(sym_parent) | al->filtered,
|
2013-10-23 06:01:31 +08:00
|
|
|
.hists = hists,
|
2013-10-31 14:56:03 +08:00
|
|
|
.branch_info = bi,
|
|
|
|
.mem_info = mi,
|
2015-12-23 01:06:58 +08:00
|
|
|
.transaction = sample->transaction,
|
2015-12-24 10:16:17 +08:00
|
|
|
.raw_data = sample->raw_data,
|
|
|
|
.raw_size = sample->raw_size,
|
2012-02-10 06:21:01 +08:00
|
|
|
};
|
|
|
|
|
2015-05-19 22:31:22 +08:00
|
|
|
return hists__findnew_entry(hists, &entry, al, sample_self);
|
2012-02-10 06:21:01 +08:00
|
|
|
}
|
|
|
|
|
2013-10-30 08:40:34 +08:00
|
|
|
static int
|
|
|
|
iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
|
|
|
|
struct addr_location *al __maybe_unused)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
|
|
|
|
struct addr_location *al __maybe_unused)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
|
|
|
|
{
|
|
|
|
struct perf_sample *sample = iter->sample;
|
|
|
|
struct mem_info *mi;
|
|
|
|
|
|
|
|
mi = sample__resolve_mem(sample, al);
|
|
|
|
if (mi == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
iter->priv = mi;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
|
|
|
|
{
|
|
|
|
u64 cost;
|
|
|
|
struct mem_info *mi = iter->priv;
|
2014-10-10 00:13:41 +08:00
|
|
|
struct hists *hists = evsel__hists(iter->evsel);
|
2015-12-23 01:06:58 +08:00
|
|
|
struct perf_sample *sample = iter->sample;
|
2013-10-30 08:40:34 +08:00
|
|
|
struct hist_entry *he;
|
|
|
|
|
|
|
|
if (mi == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2015-12-23 01:06:58 +08:00
|
|
|
cost = sample->weight;
|
2013-10-30 08:40:34 +08:00
|
|
|
if (!cost)
|
|
|
|
cost = 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* must pass period=weight in order to get the correct
|
|
|
|
* sorting from hists__collapse_resort() which is solely
|
|
|
|
* based on periods. We want sorting be done on nr_events * weight
|
|
|
|
* and this is indirectly achieved by passing period=weight here
|
|
|
|
* and the he_stat__add_period() function.
|
|
|
|
*/
|
2015-12-23 01:06:58 +08:00
|
|
|
sample->period = cost;
|
|
|
|
|
2014-10-10 00:13:41 +08:00
|
|
|
he = __hists__add_entry(hists, al, iter->parent, NULL, mi,
|
2015-12-23 01:06:58 +08:00
|
|
|
sample, true);
|
2013-10-30 08:40:34 +08:00
|
|
|
if (!he)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
iter->he = he;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2014-01-07 16:02:25 +08:00
|
|
|
iter_finish_mem_entry(struct hist_entry_iter *iter,
|
|
|
|
struct addr_location *al __maybe_unused)
|
2013-10-30 08:40:34 +08:00
|
|
|
{
|
|
|
|
struct perf_evsel *evsel = iter->evsel;
|
2014-10-10 00:13:41 +08:00
|
|
|
struct hists *hists = evsel__hists(evsel);
|
2013-10-30 08:40:34 +08:00
|
|
|
struct hist_entry *he = iter->he;
|
|
|
|
int err = -EINVAL;
|
|
|
|
|
|
|
|
if (he == NULL)
|
|
|
|
goto out;
|
|
|
|
|
2014-10-10 00:13:41 +08:00
|
|
|
hists__inc_nr_samples(hists, he->filtered);
|
2013-10-30 08:40:34 +08:00
|
|
|
|
|
|
|
err = hist_entry__append_callchain(he, iter->sample);
|
|
|
|
|
|
|
|
out:
|
|
|
|
/*
|
2015-05-19 22:31:22 +08:00
|
|
|
* We don't need to free iter->priv (mem_info) here since the mem info
|
|
|
|
* was either already freed in hists__findnew_entry() or passed to a
|
|
|
|
* new hist entry by hist_entry__new().
|
2013-10-30 08:40:34 +08:00
|
|
|
*/
|
|
|
|
iter->priv = NULL;
|
|
|
|
|
|
|
|
iter->he = NULL;
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
|
|
|
|
{
|
|
|
|
struct branch_info *bi;
|
|
|
|
struct perf_sample *sample = iter->sample;
|
|
|
|
|
|
|
|
bi = sample__resolve_bstack(sample, al);
|
|
|
|
if (!bi)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
iter->curr = 0;
|
|
|
|
iter->total = sample->branch_stack->nr;
|
|
|
|
|
|
|
|
iter->priv = bi;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
|
|
|
|
struct addr_location *al __maybe_unused)
|
|
|
|
{
|
2014-01-07 16:02:25 +08:00
|
|
|
/* to avoid calling callback function */
|
|
|
|
iter->he = NULL;
|
|
|
|
|
2013-10-30 08:40:34 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
|
|
|
|
{
|
|
|
|
struct branch_info *bi = iter->priv;
|
|
|
|
int i = iter->curr;
|
|
|
|
|
|
|
|
if (bi == NULL)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (iter->curr >= iter->total)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
al->map = bi[i].to.map;
|
|
|
|
al->sym = bi[i].to.sym;
|
|
|
|
al->addr = bi[i].to.addr;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
|
|
|
|
{
|
2014-01-07 16:02:25 +08:00
|
|
|
struct branch_info *bi;
|
2013-10-30 08:40:34 +08:00
|
|
|
struct perf_evsel *evsel = iter->evsel;
|
2014-10-10 00:13:41 +08:00
|
|
|
struct hists *hists = evsel__hists(evsel);
|
2015-12-23 01:06:58 +08:00
|
|
|
struct perf_sample *sample = iter->sample;
|
2013-10-30 08:40:34 +08:00
|
|
|
struct hist_entry *he = NULL;
|
|
|
|
int i = iter->curr;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
bi = iter->priv;
|
|
|
|
|
|
|
|
if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The report shows the percentage of total branches captured
|
|
|
|
* and not events sampled. Thus we use a pseudo period of 1.
|
|
|
|
*/
|
2015-12-23 01:06:58 +08:00
|
|
|
sample->period = 1;
|
|
|
|
sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
|
|
|
|
|
2014-10-10 00:13:41 +08:00
|
|
|
he = __hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
|
2015-12-23 01:06:58 +08:00
|
|
|
sample, true);
|
2013-10-30 08:40:34 +08:00
|
|
|
if (he == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2014-10-10 00:13:41 +08:00
|
|
|
hists__inc_nr_samples(hists, he->filtered);
|
2013-10-30 08:40:34 +08:00
|
|
|
|
|
|
|
out:
|
|
|
|
iter->he = he;
|
|
|
|
iter->curr++;
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iter_finish_branch_entry(struct hist_entry_iter *iter,
|
|
|
|
struct addr_location *al __maybe_unused)
|
|
|
|
{
|
|
|
|
zfree(&iter->priv);
|
|
|
|
iter->he = NULL;
|
|
|
|
|
|
|
|
return iter->curr >= iter->total ? 0 : -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
|
|
|
|
struct addr_location *al __maybe_unused)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
|
|
|
|
{
|
|
|
|
struct perf_evsel *evsel = iter->evsel;
|
|
|
|
struct perf_sample *sample = iter->sample;
|
|
|
|
struct hist_entry *he;
|
|
|
|
|
2014-10-10 00:13:41 +08:00
|
|
|
he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
|
2015-12-23 01:06:58 +08:00
|
|
|
sample, true);
|
2013-10-30 08:40:34 +08:00
|
|
|
if (he == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
iter->he = he;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2014-01-07 16:02:25 +08:00
|
|
|
iter_finish_normal_entry(struct hist_entry_iter *iter,
|
|
|
|
struct addr_location *al __maybe_unused)
|
2013-10-30 08:40:34 +08:00
|
|
|
{
|
|
|
|
struct hist_entry *he = iter->he;
|
|
|
|
struct perf_evsel *evsel = iter->evsel;
|
|
|
|
struct perf_sample *sample = iter->sample;
|
|
|
|
|
|
|
|
if (he == NULL)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
iter->he = NULL;
|
|
|
|
|
2014-10-10 00:13:41 +08:00
|
|
|
hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
|
2013-10-30 08:40:34 +08:00
|
|
|
|
|
|
|
return hist_entry__append_callchain(he, sample);
|
|
|
|
}
|
|
|
|
|
2012-09-11 13:13:04 +08:00
|
|
|
static int
|
2015-09-25 21:15:47 +08:00
|
|
|
iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
|
2012-09-11 13:13:04 +08:00
|
|
|
struct addr_location *al __maybe_unused)
|
|
|
|
{
|
2013-10-31 09:05:29 +08:00
|
|
|
struct hist_entry **he_cache;
|
|
|
|
|
2012-09-11 13:13:04 +08:00
|
|
|
callchain_cursor_commit(&callchain_cursor);
|
2013-10-31 09:05:29 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This is for detecting cycles or recursions so that they're
|
|
|
|
* cumulated only one time to prevent entries more than 100%
|
|
|
|
* overhead.
|
|
|
|
*/
|
2015-09-25 21:15:47 +08:00
|
|
|
he_cache = malloc(sizeof(*he_cache) * (iter->max_stack + 1));
|
2013-10-31 09:05:29 +08:00
|
|
|
if (he_cache == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
iter->priv = he_cache;
|
|
|
|
iter->curr = 0;
|
|
|
|
|
2012-09-11 13:13:04 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
|
|
|
|
struct addr_location *al)
|
|
|
|
{
|
|
|
|
struct perf_evsel *evsel = iter->evsel;
|
2014-10-10 00:13:41 +08:00
|
|
|
struct hists *hists = evsel__hists(evsel);
|
2012-09-11 13:13:04 +08:00
|
|
|
struct perf_sample *sample = iter->sample;
|
2013-10-31 09:05:29 +08:00
|
|
|
struct hist_entry **he_cache = iter->priv;
|
2012-09-11 13:13:04 +08:00
|
|
|
struct hist_entry *he;
|
|
|
|
int err = 0;
|
|
|
|
|
2014-10-10 00:13:41 +08:00
|
|
|
he = __hists__add_entry(hists, al, iter->parent, NULL, NULL,
|
2015-12-23 01:06:58 +08:00
|
|
|
sample, true);
|
2012-09-11 13:13:04 +08:00
|
|
|
if (he == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
iter->he = he;
|
2013-10-31 09:05:29 +08:00
|
|
|
he_cache[iter->curr++] = he;
|
2012-09-11 13:13:04 +08:00
|
|
|
|
2014-12-22 12:44:14 +08:00
|
|
|
hist_entry__append_callchain(he, sample);
|
2013-12-26 16:44:10 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We need to re-initialize the cursor since callchain_append()
|
|
|
|
* advanced the cursor to the end.
|
|
|
|
*/
|
|
|
|
callchain_cursor_commit(&callchain_cursor);
|
|
|
|
|
2014-10-10 00:13:41 +08:00
|
|
|
hists__inc_nr_samples(hists, he->filtered);
|
2012-09-11 13:13:04 +08:00
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iter_next_cumulative_entry(struct hist_entry_iter *iter,
|
|
|
|
struct addr_location *al)
|
|
|
|
{
|
|
|
|
struct callchain_cursor_node *node;
|
|
|
|
|
|
|
|
node = callchain_cursor_current(&callchain_cursor);
|
|
|
|
if (node == NULL)
|
|
|
|
return 0;
|
|
|
|
|
2013-10-31 12:58:30 +08:00
|
|
|
return fill_callchain_info(al, node, iter->hide_unresolved);
|
2012-09-11 13:13:04 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
|
|
|
|
struct addr_location *al)
|
|
|
|
{
|
|
|
|
struct perf_evsel *evsel = iter->evsel;
|
|
|
|
struct perf_sample *sample = iter->sample;
|
2013-10-31 09:05:29 +08:00
|
|
|
struct hist_entry **he_cache = iter->priv;
|
2012-09-11 13:13:04 +08:00
|
|
|
struct hist_entry *he;
|
2013-10-31 09:05:29 +08:00
|
|
|
struct hist_entry he_tmp = {
|
2015-08-11 02:45:55 +08:00
|
|
|
.hists = evsel__hists(evsel),
|
2013-10-31 09:05:29 +08:00
|
|
|
.cpu = al->cpu,
|
|
|
|
.thread = al->thread,
|
|
|
|
.comm = thread__comm(al->thread),
|
|
|
|
.ip = al->addr,
|
|
|
|
.ms = {
|
|
|
|
.map = al->map,
|
|
|
|
.sym = al->sym,
|
|
|
|
},
|
|
|
|
.parent = iter->parent,
|
2015-12-24 10:16:17 +08:00
|
|
|
.raw_data = sample->raw_data,
|
|
|
|
.raw_size = sample->raw_size,
|
2013-10-31 09:05:29 +08:00
|
|
|
};
|
|
|
|
int i;
|
2013-12-26 16:44:10 +08:00
|
|
|
struct callchain_cursor cursor;
|
|
|
|
|
|
|
|
callchain_cursor_snapshot(&cursor, &callchain_cursor);
|
|
|
|
|
|
|
|
callchain_cursor_advance(&callchain_cursor);
|
2013-10-31 09:05:29 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check if there's duplicate entries in the callchain.
|
|
|
|
* It's possible that it has cycles or recursive calls.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < iter->curr; i++) {
|
2014-01-07 16:02:25 +08:00
|
|
|
if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
|
|
|
|
/* to avoid calling callback function */
|
|
|
|
iter->he = NULL;
|
2013-10-31 09:05:29 +08:00
|
|
|
return 0;
|
2014-01-07 16:02:25 +08:00
|
|
|
}
|
2013-10-31 09:05:29 +08:00
|
|
|
}
|
2012-09-11 13:13:04 +08:00
|
|
|
|
2014-10-10 00:13:41 +08:00
|
|
|
he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
|
2015-12-23 01:06:58 +08:00
|
|
|
sample, false);
|
2012-09-11 13:13:04 +08:00
|
|
|
if (he == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
iter->he = he;
|
2013-10-31 09:05:29 +08:00
|
|
|
he_cache[iter->curr++] = he;
|
2012-09-11 13:13:04 +08:00
|
|
|
|
2014-12-22 12:44:14 +08:00
|
|
|
if (symbol_conf.use_callchain)
|
|
|
|
callchain_append(he->callchain, &cursor, sample->period);
|
2012-09-11 13:13:04 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iter_finish_cumulative_entry(struct hist_entry_iter *iter,
|
|
|
|
struct addr_location *al __maybe_unused)
|
|
|
|
{
|
2013-10-31 09:05:29 +08:00
|
|
|
zfree(&iter->priv);
|
2012-09-11 13:13:04 +08:00
|
|
|
iter->he = NULL;
|
2013-10-31 09:05:29 +08:00
|
|
|
|
2012-09-11 13:13:04 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-10-30 08:40:34 +08:00
|
|
|
const struct hist_iter_ops hist_iter_mem = {
|
|
|
|
.prepare_entry = iter_prepare_mem_entry,
|
|
|
|
.add_single_entry = iter_add_single_mem_entry,
|
|
|
|
.next_entry = iter_next_nop_entry,
|
|
|
|
.add_next_entry = iter_add_next_nop_entry,
|
|
|
|
.finish_entry = iter_finish_mem_entry,
|
|
|
|
};
|
|
|
|
|
|
|
|
const struct hist_iter_ops hist_iter_branch = {
|
|
|
|
.prepare_entry = iter_prepare_branch_entry,
|
|
|
|
.add_single_entry = iter_add_single_branch_entry,
|
|
|
|
.next_entry = iter_next_branch_entry,
|
|
|
|
.add_next_entry = iter_add_next_branch_entry,
|
|
|
|
.finish_entry = iter_finish_branch_entry,
|
|
|
|
};
|
|
|
|
|
|
|
|
const struct hist_iter_ops hist_iter_normal = {
|
|
|
|
.prepare_entry = iter_prepare_normal_entry,
|
|
|
|
.add_single_entry = iter_add_single_normal_entry,
|
|
|
|
.next_entry = iter_next_nop_entry,
|
|
|
|
.add_next_entry = iter_add_next_nop_entry,
|
|
|
|
.finish_entry = iter_finish_normal_entry,
|
|
|
|
};
|
|
|
|
|
2012-09-11 13:13:04 +08:00
|
|
|
const struct hist_iter_ops hist_iter_cumulative = {
|
|
|
|
.prepare_entry = iter_prepare_cumulative_entry,
|
|
|
|
.add_single_entry = iter_add_single_cumulative_entry,
|
|
|
|
.next_entry = iter_next_cumulative_entry,
|
|
|
|
.add_next_entry = iter_add_next_cumulative_entry,
|
|
|
|
.finish_entry = iter_finish_cumulative_entry,
|
|
|
|
};
|
|
|
|
|
2013-10-30 08:40:34 +08:00
|
|
|
int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
|
2014-01-07 16:02:25 +08:00
|
|
|
int max_stack_depth, void *arg)
|
2013-10-30 08:40:34 +08:00
|
|
|
{
|
|
|
|
int err, err2;
|
|
|
|
|
2015-05-19 16:04:10 +08:00
|
|
|
err = sample__resolve_callchain(iter->sample, &iter->parent,
|
|
|
|
iter->evsel, al, max_stack_depth);
|
2013-10-30 08:40:34 +08:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2015-09-25 21:15:47 +08:00
|
|
|
iter->max_stack = max_stack_depth;
|
|
|
|
|
2013-10-30 08:40:34 +08:00
|
|
|
err = iter->ops->prepare_entry(iter, al);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
err = iter->ops->add_single_entry(iter, al);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
2014-01-07 16:02:25 +08:00
|
|
|
if (iter->he && iter->add_entry_cb) {
|
|
|
|
err = iter->add_entry_cb(iter, al, true, arg);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2013-10-30 08:40:34 +08:00
|
|
|
while (iter->ops->next_entry(iter, al)) {
|
|
|
|
err = iter->ops->add_next_entry(iter, al);
|
|
|
|
if (err)
|
|
|
|
break;
|
2014-01-07 16:02:25 +08:00
|
|
|
|
|
|
|
if (iter->he && iter->add_entry_cb) {
|
|
|
|
err = iter->add_entry_cb(iter, al, false, arg);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
}
|
2013-10-30 08:40:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
err2 = iter->ops->finish_entry(iter, al);
|
|
|
|
if (!err)
|
|
|
|
err = err2;
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2009-09-28 21:32:55 +08:00
|
|
|
int64_t
|
|
|
|
hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
|
|
|
|
{
|
2014-03-03 11:07:47 +08:00
|
|
|
struct perf_hpp_fmt *fmt;
|
2009-09-28 21:32:55 +08:00
|
|
|
int64_t cmp = 0;
|
|
|
|
|
2014-03-03 11:07:47 +08:00
|
|
|
perf_hpp__for_each_sort_list(fmt) {
|
2015-01-08 08:45:46 +08:00
|
|
|
cmp = fmt->cmp(fmt, left, right);
|
2009-09-28 21:32:55 +08:00
|
|
|
if (cmp)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return cmp;
|
|
|
|
}
|
|
|
|
|
|
|
|
int64_t
|
|
|
|
hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
|
|
|
|
{
|
2014-03-03 11:07:47 +08:00
|
|
|
struct perf_hpp_fmt *fmt;
|
2009-09-28 21:32:55 +08:00
|
|
|
int64_t cmp = 0;
|
|
|
|
|
2014-03-03 11:07:47 +08:00
|
|
|
perf_hpp__for_each_sort_list(fmt) {
|
2015-01-08 08:45:46 +08:00
|
|
|
cmp = fmt->collapse(fmt, left, right);
|
2009-09-28 21:32:55 +08:00
|
|
|
if (cmp)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return cmp;
|
|
|
|
}
|
|
|
|
|
2014-12-19 23:31:40 +08:00
|
|
|
void hist_entry__delete(struct hist_entry *he)
|
2009-09-28 21:32:55 +08:00
|
|
|
{
|
2015-03-03 09:21:35 +08:00
|
|
|
thread__zput(he->thread);
|
2015-06-16 10:29:51 +08:00
|
|
|
map__zput(he->ms.map);
|
|
|
|
|
|
|
|
if (he->branch_info) {
|
|
|
|
map__zput(he->branch_info->from.map);
|
|
|
|
map__zput(he->branch_info->to.map);
|
|
|
|
zfree(&he->branch_info);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (he->mem_info) {
|
|
|
|
map__zput(he->mem_info->iaddr.map);
|
|
|
|
map__zput(he->mem_info->daddr.map);
|
|
|
|
zfree(&he->mem_info);
|
|
|
|
}
|
|
|
|
|
2012-09-11 12:15:07 +08:00
|
|
|
zfree(&he->stat_acc);
|
2013-09-11 13:09:28 +08:00
|
|
|
free_srcline(he->srcline);
|
2015-08-08 06:54:24 +08:00
|
|
|
if (he->srcfile && he->srcfile[0])
|
|
|
|
free(he->srcfile);
|
2014-12-30 13:38:13 +08:00
|
|
|
free_callchain(he->callchain);
|
2015-12-23 01:07:03 +08:00
|
|
|
free(he->trace_output);
|
2015-12-24 10:16:17 +08:00
|
|
|
free(he->raw_data);
|
2009-09-28 21:32:55 +08:00
|
|
|
free(he);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* collapse the histogram
|
|
|
|
*/
|
|
|
|
|
2016-01-07 17:14:10 +08:00
|
|
|
bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
|
|
|
|
struct rb_root *root, struct hist_entry *he)
|
2009-09-28 21:32:55 +08:00
|
|
|
{
|
2009-12-14 21:37:11 +08:00
|
|
|
struct rb_node **p = &root->rb_node;
|
2009-09-28 21:32:55 +08:00
|
|
|
struct rb_node *parent = NULL;
|
|
|
|
struct hist_entry *iter;
|
|
|
|
int64_t cmp;
|
|
|
|
|
|
|
|
while (*p != NULL) {
|
|
|
|
parent = *p;
|
2011-10-06 04:50:23 +08:00
|
|
|
iter = rb_entry(parent, struct hist_entry, rb_node_in);
|
2009-09-28 21:32:55 +08:00
|
|
|
|
|
|
|
cmp = hist_entry__collapse(iter, he);
|
|
|
|
|
|
|
|
if (!cmp) {
|
2012-10-04 20:49:43 +08:00
|
|
|
he_stat__add_stat(&iter->stat, &he->stat);
|
2012-09-11 12:15:07 +08:00
|
|
|
if (symbol_conf.cumulate_callchain)
|
|
|
|
he_stat__add_stat(iter->stat_acc, he->stat_acc);
|
2012-09-26 15:47:28 +08:00
|
|
|
|
2011-01-14 11:51:58 +08:00
|
|
|
if (symbol_conf.use_callchain) {
|
2012-05-31 13:43:26 +08:00
|
|
|
callchain_cursor_reset(&callchain_cursor);
|
|
|
|
callchain_merge(&callchain_cursor,
|
|
|
|
iter->callchain,
|
2011-01-14 11:51:58 +08:00
|
|
|
he->callchain);
|
|
|
|
}
|
2014-12-19 23:31:40 +08:00
|
|
|
hist_entry__delete(he);
|
2010-05-11 00:57:51 +08:00
|
|
|
return false;
|
2009-09-28 21:32:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (cmp < 0)
|
|
|
|
p = &(*p)->rb_left;
|
|
|
|
else
|
|
|
|
p = &(*p)->rb_right;
|
|
|
|
}
|
2014-12-22 12:44:10 +08:00
|
|
|
hists->nr_entries++;
|
2009-09-28 21:32:55 +08:00
|
|
|
|
2011-10-06 04:50:23 +08:00
|
|
|
rb_link_node(&he->rb_node_in, parent, p);
|
|
|
|
rb_insert_color(&he->rb_node_in, root);
|
2010-05-11 00:57:51 +08:00
|
|
|
return true;
|
2009-09-28 21:32:55 +08:00
|
|
|
}
|
|
|
|
|
2016-01-07 17:14:10 +08:00
|
|
|
struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
|
2009-09-28 21:32:55 +08:00
|
|
|
{
|
2011-10-06 04:50:23 +08:00
|
|
|
struct rb_root *root;
|
|
|
|
|
|
|
|
pthread_mutex_lock(&hists->lock);
|
|
|
|
|
|
|
|
root = hists->entries_in;
|
|
|
|
if (++hists->entries_in > &hists->entries_in_array[1])
|
|
|
|
hists->entries_in = &hists->entries_in_array[0];
|
|
|
|
|
|
|
|
pthread_mutex_unlock(&hists->lock);
|
|
|
|
|
|
|
|
return root;
|
|
|
|
}
|
|
|
|
|
2011-10-19 23:09:10 +08:00
|
|
|
static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
|
|
|
|
{
|
|
|
|
hists__filter_entry_by_dso(hists, he);
|
|
|
|
hists__filter_entry_by_thread(hists, he);
|
2012-03-16 16:50:51 +08:00
|
|
|
hists__filter_entry_by_symbol(hists, he);
|
2015-09-04 22:45:44 +08:00
|
|
|
hists__filter_entry_by_socket(hists, he);
|
2011-10-19 23:09:10 +08:00
|
|
|
}
|
|
|
|
|
2013-10-11 13:15:38 +08:00
|
|
|
void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
|
2011-10-06 04:50:23 +08:00
|
|
|
{
|
|
|
|
struct rb_root *root;
|
2009-09-28 21:32:55 +08:00
|
|
|
struct rb_node *next;
|
|
|
|
struct hist_entry *n;
|
|
|
|
|
2013-05-14 10:09:01 +08:00
|
|
|
if (!sort__need_collapse)
|
2009-09-28 21:32:55 +08:00
|
|
|
return;
|
|
|
|
|
2014-12-22 12:44:10 +08:00
|
|
|
hists->nr_entries = 0;
|
|
|
|
|
2011-10-06 04:50:23 +08:00
|
|
|
root = hists__get_rotate_entries_in(hists);
|
2014-12-22 12:44:10 +08:00
|
|
|
|
2011-10-06 04:50:23 +08:00
|
|
|
next = rb_first(root);
|
2009-12-14 21:37:11 +08:00
|
|
|
|
2009-09-28 21:32:55 +08:00
|
|
|
while (next) {
|
2013-09-18 03:34:28 +08:00
|
|
|
if (session_done())
|
|
|
|
break;
|
2011-10-06 04:50:23 +08:00
|
|
|
n = rb_entry(next, struct hist_entry, rb_node_in);
|
|
|
|
next = rb_next(&n->rb_node_in);
|
2009-09-28 21:32:55 +08:00
|
|
|
|
2011-10-06 04:50:23 +08:00
|
|
|
rb_erase(&n->rb_node_in, root);
|
2011-10-19 23:09:10 +08:00
|
|
|
if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
|
|
|
|
/*
|
|
|
|
* If it wasn't combined with one of the entries already
|
|
|
|
* collapsed, we need to apply the filters that may have
|
|
|
|
* been set by, say, the hist_browser.
|
|
|
|
*/
|
|
|
|
hists__apply_filters(hists, n);
|
|
|
|
}
|
2013-10-11 13:15:38 +08:00
|
|
|
if (prog)
|
|
|
|
ui_progress__update(prog, 1);
|
2009-09-28 21:32:55 +08:00
|
|
|
}
|
2011-10-06 04:50:23 +08:00
|
|
|
}
|
2009-12-14 21:37:11 +08:00
|
|
|
|
2014-03-03 13:18:00 +08:00
|
|
|
static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
|
2013-01-22 17:09:33 +08:00
|
|
|
{
|
2014-03-03 13:18:00 +08:00
|
|
|
struct perf_hpp_fmt *fmt;
|
|
|
|
int64_t cmp = 0;
|
2013-01-22 17:09:33 +08:00
|
|
|
|
2014-03-03 15:16:20 +08:00
|
|
|
perf_hpp__for_each_sort_list(fmt) {
|
2015-12-23 01:07:08 +08:00
|
|
|
if (perf_hpp__should_skip(fmt, a->hists))
|
2014-03-18 12:00:59 +08:00
|
|
|
continue;
|
|
|
|
|
2015-01-08 08:45:46 +08:00
|
|
|
cmp = fmt->sort(fmt, a, b);
|
2014-03-03 13:18:00 +08:00
|
|
|
if (cmp)
|
2013-01-22 17:09:33 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2014-03-03 13:18:00 +08:00
|
|
|
return cmp;
|
2013-01-22 17:09:33 +08:00
|
|
|
}
|
|
|
|
|
2014-04-24 15:37:26 +08:00
|
|
|
static void hists__reset_filter_stats(struct hists *hists)
|
|
|
|
{
|
|
|
|
hists->nr_non_filtered_entries = 0;
|
|
|
|
hists->stats.total_non_filtered_period = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void hists__reset_stats(struct hists *hists)
|
|
|
|
{
|
|
|
|
hists->nr_entries = 0;
|
|
|
|
hists->stats.total_period = 0;
|
|
|
|
|
|
|
|
hists__reset_filter_stats(hists);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
|
|
|
|
{
|
|
|
|
hists->nr_non_filtered_entries++;
|
|
|
|
hists->stats.total_non_filtered_period += h->stat.period;
|
|
|
|
}
|
|
|
|
|
|
|
|
void hists__inc_stats(struct hists *hists, struct hist_entry *h)
|
|
|
|
{
|
|
|
|
if (!h->filtered)
|
|
|
|
hists__inc_filter_stats(hists, h);
|
|
|
|
|
|
|
|
hists->nr_entries++;
|
|
|
|
hists->stats.total_period += h->stat.period;
|
|
|
|
}
|
|
|
|
|
perf hist: Introduce hists class and move lots of methods to it
In cbbc79a we introduced support for multiple events by introducing a
new "event_stat_id" struct and then made several perf_session methods
receive a point to it instead of a pointer to perf_session, and kept the
event_stats and hists rb_tree in perf_session.
While working on the new newt based browser, I realised that it would be
better to introduce a new class, "hists" (short for "histograms"),
renaming the "event_stat_id" struct and the perf_session methods that
were really "hists" methods, as they manipulate only struct hists
members, not touching anything in the other perf_session members.
Other optimizations, such as calculating the maximum lenght of a symbol
name present in an hists instance will be possible as we add them,
avoiding a re-traversal just for finding that information.
The rationale for the name "hists" to replace "event_stat_id" is that we
may have multiple sets of hists for the same event_stat id, as, for
instance, the 'perf diff' tool has, so event stat id is not what
characterizes what this struct and the functions that manipulate it do.
Cc: Eric B Munson <ebmunson@us.ibm.com>
Cc: Frédéric Weisbecker <fweisbec@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Tom Zanussi <tzanussi@gmail.com>
LKML-Reference: <new-submission>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2010-05-11 00:04:11 +08:00
|
|
|
static void __hists__insert_output_entry(struct rb_root *entries,
|
|
|
|
struct hist_entry *he,
|
2015-08-11 18:30:48 +08:00
|
|
|
u64 min_callchain_hits,
|
|
|
|
bool use_callchain)
|
2009-09-28 21:32:55 +08:00
|
|
|
{
|
perf hist: Introduce hists class and move lots of methods to it
In cbbc79a we introduced support for multiple events by introducing a
new "event_stat_id" struct and then made several perf_session methods
receive a point to it instead of a pointer to perf_session, and kept the
event_stats and hists rb_tree in perf_session.
While working on the new newt based browser, I realised that it would be
better to introduce a new class, "hists" (short for "histograms"),
renaming the "event_stat_id" struct and the perf_session methods that
were really "hists" methods, as they manipulate only struct hists
members, not touching anything in the other perf_session members.
Other optimizations, such as calculating the maximum lenght of a symbol
name present in an hists instance will be possible as we add them,
avoiding a re-traversal just for finding that information.
The rationale for the name "hists" to replace "event_stat_id" is that we
may have multiple sets of hists for the same event_stat id, as, for
instance, the 'perf diff' tool has, so event stat id is not what
characterizes what this struct and the functions that manipulate it do.
Cc: Eric B Munson <ebmunson@us.ibm.com>
Cc: Frédéric Weisbecker <fweisbec@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Tom Zanussi <tzanussi@gmail.com>
LKML-Reference: <new-submission>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2010-05-11 00:04:11 +08:00
|
|
|
struct rb_node **p = &entries->rb_node;
|
2009-09-28 21:32:55 +08:00
|
|
|
struct rb_node *parent = NULL;
|
|
|
|
struct hist_entry *iter;
|
|
|
|
|
2015-08-11 18:30:48 +08:00
|
|
|
if (use_callchain)
|
2010-04-02 20:50:42 +08:00
|
|
|
callchain_param.sort(&he->sorted_chain, he->callchain,
|
2009-09-28 21:32:55 +08:00
|
|
|
min_callchain_hits, &callchain_param);
|
|
|
|
|
|
|
|
while (*p != NULL) {
|
|
|
|
parent = *p;
|
|
|
|
iter = rb_entry(parent, struct hist_entry, rb_node);
|
|
|
|
|
2014-03-03 13:18:00 +08:00
|
|
|
if (hist_entry__sort(he, iter) > 0)
|
2009-09-28 21:32:55 +08:00
|
|
|
p = &(*p)->rb_left;
|
|
|
|
else
|
|
|
|
p = &(*p)->rb_right;
|
|
|
|
}
|
|
|
|
|
|
|
|
rb_link_node(&he->rb_node, parent, p);
|
perf hist: Introduce hists class and move lots of methods to it
In cbbc79a we introduced support for multiple events by introducing a
new "event_stat_id" struct and then made several perf_session methods
receive a point to it instead of a pointer to perf_session, and kept the
event_stats and hists rb_tree in perf_session.
While working on the new newt based browser, I realised that it would be
better to introduce a new class, "hists" (short for "histograms"),
renaming the "event_stat_id" struct and the perf_session methods that
were really "hists" methods, as they manipulate only struct hists
members, not touching anything in the other perf_session members.
Other optimizations, such as calculating the maximum lenght of a symbol
name present in an hists instance will be possible as we add them,
avoiding a re-traversal just for finding that information.
The rationale for the name "hists" to replace "event_stat_id" is that we
may have multiple sets of hists for the same event_stat id, as, for
instance, the 'perf diff' tool has, so event stat id is not what
characterizes what this struct and the functions that manipulate it do.
Cc: Eric B Munson <ebmunson@us.ibm.com>
Cc: Frédéric Weisbecker <fweisbec@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Tom Zanussi <tzanussi@gmail.com>
LKML-Reference: <new-submission>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2010-05-11 00:04:11 +08:00
|
|
|
rb_insert_color(&he->rb_node, entries);
|
2009-09-28 21:32:55 +08:00
|
|
|
}
|
|
|
|
|
2014-12-22 12:44:10 +08:00
|
|
|
void hists__output_resort(struct hists *hists, struct ui_progress *prog)
|
2009-09-28 21:32:55 +08:00
|
|
|
{
|
2011-10-06 04:50:23 +08:00
|
|
|
struct rb_root *root;
|
2009-09-28 21:32:55 +08:00
|
|
|
struct rb_node *next;
|
|
|
|
struct hist_entry *n;
|
|
|
|
u64 min_callchain_hits;
|
2015-08-11 18:30:48 +08:00
|
|
|
struct perf_evsel *evsel = hists_to_evsel(hists);
|
2015-08-11 18:30:49 +08:00
|
|
|
bool use_callchain;
|
|
|
|
|
2015-09-30 12:34:00 +08:00
|
|
|
if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
|
2015-08-11 18:30:49 +08:00
|
|
|
use_callchain = evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN;
|
|
|
|
else
|
|
|
|
use_callchain = symbol_conf.use_callchain;
|
2009-09-28 21:32:55 +08:00
|
|
|
|
2011-09-26 23:33:28 +08:00
|
|
|
min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
|
2009-09-28 21:32:55 +08:00
|
|
|
|
2013-05-14 10:09:01 +08:00
|
|
|
if (sort__need_collapse)
|
2011-10-06 04:50:23 +08:00
|
|
|
root = &hists->entries_collapsed;
|
|
|
|
else
|
|
|
|
root = hists->entries_in;
|
|
|
|
|
|
|
|
next = rb_first(root);
|
|
|
|
hists->entries = RB_ROOT;
|
2009-09-28 21:32:55 +08:00
|
|
|
|
2014-04-24 15:37:26 +08:00
|
|
|
hists__reset_stats(hists);
|
2011-09-26 23:33:28 +08:00
|
|
|
hists__reset_col_len(hists);
|
2010-05-11 00:57:51 +08:00
|
|
|
|
2009-09-28 21:32:55 +08:00
|
|
|
while (next) {
|
2011-10-06 04:50:23 +08:00
|
|
|
n = rb_entry(next, struct hist_entry, rb_node_in);
|
|
|
|
next = rb_next(&n->rb_node_in);
|
2009-09-28 21:32:55 +08:00
|
|
|
|
2015-08-11 18:30:48 +08:00
|
|
|
__hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
|
2014-04-24 15:21:46 +08:00
|
|
|
hists__inc_stats(hists, n);
|
2014-04-24 15:25:19 +08:00
|
|
|
|
|
|
|
if (!n->filtered)
|
|
|
|
hists__calc_col_len(hists, n);
|
2014-12-22 12:44:10 +08:00
|
|
|
|
|
|
|
if (prog)
|
|
|
|
ui_progress__update(prog, 1);
|
2009-09-28 21:32:55 +08:00
|
|
|
}
|
2011-10-06 04:50:23 +08:00
|
|
|
}
|
2009-12-14 21:37:11 +08:00
|
|
|
|
2011-09-26 23:33:28 +08:00
|
|
|
static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
|
2010-07-16 23:35:07 +08:00
|
|
|
enum hist_filter filter)
|
|
|
|
{
|
|
|
|
h->filtered &= ~(1 << filter);
|
|
|
|
if (h->filtered)
|
|
|
|
return;
|
|
|
|
|
2014-04-24 15:44:16 +08:00
|
|
|
/* force fold unfiltered entry for simplicity */
|
2015-05-05 22:55:46 +08:00
|
|
|
h->unfolded = false;
|
2010-07-27 04:13:40 +08:00
|
|
|
h->row_offset = 0;
|
2015-03-11 20:36:03 +08:00
|
|
|
h->nr_rows = 0;
|
2014-04-24 15:37:26 +08:00
|
|
|
|
2013-12-26 14:11:52 +08:00
|
|
|
hists->stats.nr_non_filtered_samples += h->stat.nr_events;
|
2010-07-16 23:35:07 +08:00
|
|
|
|
2014-04-24 15:37:26 +08:00
|
|
|
hists__inc_filter_stats(hists, h);
|
2011-09-26 23:33:28 +08:00
|
|
|
hists__calc_col_len(hists, h);
|
2010-07-16 23:35:07 +08:00
|
|
|
}
|
|
|
|
|
2011-10-19 23:09:10 +08:00
|
|
|
|
|
|
|
static bool hists__filter_entry_by_dso(struct hists *hists,
|
|
|
|
struct hist_entry *he)
|
|
|
|
{
|
|
|
|
if (hists->dso_filter != NULL &&
|
|
|
|
(he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
|
|
|
|
he->filtered |= (1 << HIST_FILTER__DSO);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2011-10-19 05:07:34 +08:00
|
|
|
void hists__filter_by_dso(struct hists *hists)
|
2010-05-11 22:10:15 +08:00
|
|
|
{
|
|
|
|
struct rb_node *nd;
|
|
|
|
|
2013-12-26 14:11:52 +08:00
|
|
|
hists->stats.nr_non_filtered_samples = 0;
|
2014-04-24 15:37:26 +08:00
|
|
|
|
|
|
|
hists__reset_filter_stats(hists);
|
2011-09-26 23:33:28 +08:00
|
|
|
hists__reset_col_len(hists);
|
2010-05-11 22:10:15 +08:00
|
|
|
|
2011-09-26 23:33:28 +08:00
|
|
|
for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
|
2010-05-11 22:10:15 +08:00
|
|
|
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
|
|
|
|
|
|
|
|
if (symbol_conf.exclude_other && !h->parent)
|
|
|
|
continue;
|
|
|
|
|
2011-10-19 23:09:10 +08:00
|
|
|
if (hists__filter_entry_by_dso(hists, h))
|
2010-05-11 22:10:15 +08:00
|
|
|
continue;
|
|
|
|
|
2011-09-26 23:33:28 +08:00
|
|
|
hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
|
2010-05-11 22:10:15 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-10-19 23:09:10 +08:00
|
|
|
static bool hists__filter_entry_by_thread(struct hists *hists,
|
|
|
|
struct hist_entry *he)
|
|
|
|
{
|
|
|
|
if (hists->thread_filter != NULL &&
|
|
|
|
he->thread != hists->thread_filter) {
|
|
|
|
he->filtered |= (1 << HIST_FILTER__THREAD);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2011-10-19 05:07:34 +08:00
|
|
|
void hists__filter_by_thread(struct hists *hists)
|
2010-05-11 22:10:15 +08:00
|
|
|
{
|
|
|
|
struct rb_node *nd;
|
|
|
|
|
2013-12-26 14:11:52 +08:00
|
|
|
hists->stats.nr_non_filtered_samples = 0;
|
2014-04-24 15:37:26 +08:00
|
|
|
|
|
|
|
hists__reset_filter_stats(hists);
|
2011-09-26 23:33:28 +08:00
|
|
|
hists__reset_col_len(hists);
|
2010-05-11 22:10:15 +08:00
|
|
|
|
2011-09-26 23:33:28 +08:00
|
|
|
for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
|
2010-05-11 22:10:15 +08:00
|
|
|
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
|
|
|
|
|
2011-10-19 23:09:10 +08:00
|
|
|
if (hists__filter_entry_by_thread(hists, h))
|
2010-05-11 22:10:15 +08:00
|
|
|
continue;
|
2010-07-16 23:35:07 +08:00
|
|
|
|
2011-09-26 23:33:28 +08:00
|
|
|
hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
|
2010-05-11 22:10:15 +08:00
|
|
|
}
|
|
|
|
}
|
2010-05-12 10:18:06 +08:00
|
|
|
|
2012-03-16 16:50:51 +08:00
|
|
|
static bool hists__filter_entry_by_symbol(struct hists *hists,
|
|
|
|
struct hist_entry *he)
|
|
|
|
{
|
|
|
|
if (hists->symbol_filter_str != NULL &&
|
|
|
|
(!he->ms.sym || strstr(he->ms.sym->name,
|
|
|
|
hists->symbol_filter_str) == NULL)) {
|
|
|
|
he->filtered |= (1 << HIST_FILTER__SYMBOL);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
void hists__filter_by_symbol(struct hists *hists)
|
|
|
|
{
|
|
|
|
struct rb_node *nd;
|
|
|
|
|
2013-12-26 14:11:52 +08:00
|
|
|
hists->stats.nr_non_filtered_samples = 0;
|
2014-04-24 15:37:26 +08:00
|
|
|
|
|
|
|
hists__reset_filter_stats(hists);
|
2012-03-16 16:50:51 +08:00
|
|
|
hists__reset_col_len(hists);
|
|
|
|
|
|
|
|
for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
|
|
|
|
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
|
|
|
|
|
|
|
|
if (hists__filter_entry_by_symbol(hists, h))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-04 22:45:44 +08:00
|
|
|
static bool hists__filter_entry_by_socket(struct hists *hists,
|
|
|
|
struct hist_entry *he)
|
|
|
|
{
|
|
|
|
if ((hists->socket_filter > -1) &&
|
|
|
|
(he->socket != hists->socket_filter)) {
|
|
|
|
he->filtered |= (1 << HIST_FILTER__SOCKET);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2015-09-04 22:45:45 +08:00
|
|
|
void hists__filter_by_socket(struct hists *hists)
|
|
|
|
{
|
|
|
|
struct rb_node *nd;
|
|
|
|
|
|
|
|
hists->stats.nr_non_filtered_samples = 0;
|
|
|
|
|
|
|
|
hists__reset_filter_stats(hists);
|
|
|
|
hists__reset_col_len(hists);
|
|
|
|
|
|
|
|
for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
|
|
|
|
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
|
|
|
|
|
|
|
|
if (hists__filter_entry_by_socket(hists, h))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
hists__remove_entry_filter(hists, h, HIST_FILTER__SOCKET);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-12-19 03:24:46 +08:00
|
|
|
void events_stats__inc(struct events_stats *stats, u32 type)
|
|
|
|
{
|
|
|
|
++stats->nr_events[0];
|
|
|
|
++stats->nr_events[type];
|
|
|
|
}
|
|
|
|
|
2011-09-26 23:33:28 +08:00
|
|
|
void hists__inc_nr_events(struct hists *hists, u32 type)
|
2010-05-14 21:36:42 +08:00
|
|
|
{
|
2012-12-19 03:24:46 +08:00
|
|
|
events_stats__inc(&hists->stats, type);
|
2010-05-14 21:36:42 +08:00
|
|
|
}
|
2012-11-09 04:54:33 +08:00
|
|
|
|
2014-05-28 13:12:18 +08:00
|
|
|
void hists__inc_nr_samples(struct hists *hists, bool filtered)
|
|
|
|
{
|
|
|
|
events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
|
|
|
|
if (!filtered)
|
|
|
|
hists->stats.nr_non_filtered_samples++;
|
|
|
|
}
|
|
|
|
|
2012-11-09 05:03:09 +08:00
|
|
|
static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
|
|
|
|
struct hist_entry *pair)
|
|
|
|
{
|
2012-12-10 16:29:55 +08:00
|
|
|
struct rb_root *root;
|
|
|
|
struct rb_node **p;
|
2012-11-09 05:03:09 +08:00
|
|
|
struct rb_node *parent = NULL;
|
|
|
|
struct hist_entry *he;
|
2013-10-01 22:22:15 +08:00
|
|
|
int64_t cmp;
|
2012-11-09 05:03:09 +08:00
|
|
|
|
2012-12-10 16:29:55 +08:00
|
|
|
if (sort__need_collapse)
|
|
|
|
root = &hists->entries_collapsed;
|
|
|
|
else
|
|
|
|
root = hists->entries_in;
|
|
|
|
|
|
|
|
p = &root->rb_node;
|
|
|
|
|
2012-11-09 05:03:09 +08:00
|
|
|
while (*p != NULL) {
|
|
|
|
parent = *p;
|
2012-12-10 16:29:55 +08:00
|
|
|
he = rb_entry(parent, struct hist_entry, rb_node_in);
|
2012-11-09 05:03:09 +08:00
|
|
|
|
2012-12-10 16:29:55 +08:00
|
|
|
cmp = hist_entry__collapse(he, pair);
|
2012-11-09 05:03:09 +08:00
|
|
|
|
|
|
|
if (!cmp)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (cmp < 0)
|
|
|
|
p = &(*p)->rb_left;
|
|
|
|
else
|
|
|
|
p = &(*p)->rb_right;
|
|
|
|
}
|
|
|
|
|
2012-09-11 12:34:27 +08:00
|
|
|
he = hist_entry__new(pair, true);
|
2012-11-09 05:03:09 +08:00
|
|
|
if (he) {
|
2012-11-13 00:20:03 +08:00
|
|
|
memset(&he->stat, 0, sizeof(he->stat));
|
|
|
|
he->hists = hists;
|
2012-12-10 16:29:55 +08:00
|
|
|
rb_link_node(&he->rb_node_in, parent, p);
|
|
|
|
rb_insert_color(&he->rb_node_in, root);
|
2014-04-24 15:21:46 +08:00
|
|
|
hists__inc_stats(hists, he);
|
2012-12-02 04:18:20 +08:00
|
|
|
he->dummy = true;
|
2012-11-09 05:03:09 +08:00
|
|
|
}
|
|
|
|
out:
|
|
|
|
return he;
|
|
|
|
}
|
|
|
|
|
2012-11-09 04:54:33 +08:00
|
|
|
static struct hist_entry *hists__find_entry(struct hists *hists,
|
|
|
|
struct hist_entry *he)
|
|
|
|
{
|
2012-12-10 16:29:55 +08:00
|
|
|
struct rb_node *n;
|
|
|
|
|
|
|
|
if (sort__need_collapse)
|
|
|
|
n = hists->entries_collapsed.rb_node;
|
|
|
|
else
|
|
|
|
n = hists->entries_in->rb_node;
|
2012-11-09 04:54:33 +08:00
|
|
|
|
|
|
|
while (n) {
|
2012-12-10 16:29:55 +08:00
|
|
|
struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
|
|
|
|
int64_t cmp = hist_entry__collapse(iter, he);
|
2012-11-09 04:54:33 +08:00
|
|
|
|
|
|
|
if (cmp < 0)
|
|
|
|
n = n->rb_left;
|
|
|
|
else if (cmp > 0)
|
|
|
|
n = n->rb_right;
|
|
|
|
else
|
|
|
|
return iter;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Look for pairs to link to the leader buckets (hist_entries):
|
|
|
|
*/
|
|
|
|
void hists__match(struct hists *leader, struct hists *other)
|
|
|
|
{
|
2012-12-10 16:29:55 +08:00
|
|
|
struct rb_root *root;
|
2012-11-09 04:54:33 +08:00
|
|
|
struct rb_node *nd;
|
|
|
|
struct hist_entry *pos, *pair;
|
|
|
|
|
2012-12-10 16:29:55 +08:00
|
|
|
if (sort__need_collapse)
|
|
|
|
root = &leader->entries_collapsed;
|
|
|
|
else
|
|
|
|
root = leader->entries_in;
|
|
|
|
|
|
|
|
for (nd = rb_first(root); nd; nd = rb_next(nd)) {
|
|
|
|
pos = rb_entry(nd, struct hist_entry, rb_node_in);
|
2012-11-09 04:54:33 +08:00
|
|
|
pair = hists__find_entry(other, pos);
|
|
|
|
|
|
|
|
if (pair)
|
2012-11-29 14:38:34 +08:00
|
|
|
hist_entry__add_pair(pair, pos);
|
2012-11-09 04:54:33 +08:00
|
|
|
}
|
|
|
|
}
|
2012-11-09 05:03:09 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Look for entries in the other hists that are not present in the leader, if
|
|
|
|
* we find them, just add a dummy entry on the leader hists, with period=0,
|
|
|
|
* nr_events=0, to serve as the list header.
|
|
|
|
*/
|
|
|
|
int hists__link(struct hists *leader, struct hists *other)
|
|
|
|
{
|
2012-12-10 16:29:55 +08:00
|
|
|
struct rb_root *root;
|
2012-11-09 05:03:09 +08:00
|
|
|
struct rb_node *nd;
|
|
|
|
struct hist_entry *pos, *pair;
|
|
|
|
|
2012-12-10 16:29:55 +08:00
|
|
|
if (sort__need_collapse)
|
|
|
|
root = &other->entries_collapsed;
|
|
|
|
else
|
|
|
|
root = other->entries_in;
|
|
|
|
|
|
|
|
for (nd = rb_first(root); nd; nd = rb_next(nd)) {
|
|
|
|
pos = rb_entry(nd, struct hist_entry, rb_node_in);
|
2012-11-09 05:03:09 +08:00
|
|
|
|
|
|
|
if (!hist_entry__has_pairs(pos)) {
|
|
|
|
pair = hists__add_dummy_entry(leader, pos);
|
|
|
|
if (pair == NULL)
|
|
|
|
return -1;
|
2012-11-29 14:38:34 +08:00
|
|
|
hist_entry__add_pair(pos, pair);
|
2012-11-09 05:03:09 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2014-01-14 10:52:48 +08:00
|
|
|
|
2015-07-18 23:24:49 +08:00
|
|
|
void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
|
|
|
|
struct perf_sample *sample, bool nonany_branch_mode)
|
|
|
|
{
|
|
|
|
struct branch_info *bi;
|
|
|
|
|
|
|
|
/* If we have branch cycles always annotate them. */
|
|
|
|
if (bs && bs->nr && bs->entries[0].flags.cycles) {
|
|
|
|
int i;
|
|
|
|
|
|
|
|
bi = sample__resolve_bstack(sample, al);
|
|
|
|
if (bi) {
|
|
|
|
struct addr_map_symbol *prev = NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ignore errors, still want to process the
|
|
|
|
* other entries.
|
|
|
|
*
|
|
|
|
* For non standard branch modes always
|
|
|
|
* force no IPC (prev == NULL)
|
|
|
|
*
|
|
|
|
* Note that perf stores branches reversed from
|
|
|
|
* program order!
|
|
|
|
*/
|
|
|
|
for (i = bs->nr - 1; i >= 0; i--) {
|
|
|
|
addr_map_symbol__account_cycles(&bi[i].from,
|
|
|
|
nonany_branch_mode ? NULL : prev,
|
|
|
|
bi[i].flags.cycles);
|
|
|
|
prev = &bi[i].to;
|
|
|
|
}
|
|
|
|
free(bi);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-10-11 02:49:21 +08:00
|
|
|
|
|
|
|
size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp)
|
|
|
|
{
|
|
|
|
struct perf_evsel *pos;
|
|
|
|
size_t ret = 0;
|
|
|
|
|
|
|
|
evlist__for_each(evlist, pos) {
|
|
|
|
ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
|
|
|
|
ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-01-14 10:52:48 +08:00
|
|
|
u64 hists__total_period(struct hists *hists)
|
|
|
|
{
|
|
|
|
return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
|
|
|
|
hists->stats.total_period;
|
|
|
|
}
|
2014-02-07 11:06:07 +08:00
|
|
|
|
|
|
|
int parse_filter_percentage(const struct option *opt __maybe_unused,
|
|
|
|
const char *arg, int unset __maybe_unused)
|
|
|
|
{
|
|
|
|
if (!strcmp(arg, "relative"))
|
|
|
|
symbol_conf.filter_relative = true;
|
|
|
|
else if (!strcmp(arg, "absolute"))
|
|
|
|
symbol_conf.filter_relative = false;
|
|
|
|
else
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2014-01-14 11:02:15 +08:00
|
|
|
|
|
|
|
int perf_hist_config(const char *var, const char *value)
|
|
|
|
{
|
|
|
|
if (!strcmp(var, "hist.percentage"))
|
|
|
|
return parse_filter_percentage(NULL, value, 0);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2014-10-10 03:16:00 +08:00
|
|
|
|
2016-01-07 17:14:10 +08:00
|
|
|
int __hists__init(struct hists *hists)
|
2014-10-10 03:16:00 +08:00
|
|
|
{
|
|
|
|
memset(hists, 0, sizeof(*hists));
|
|
|
|
hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
|
|
|
|
hists->entries_in = &hists->entries_in_array[0];
|
|
|
|
hists->entries_collapsed = RB_ROOT;
|
|
|
|
hists->entries = RB_ROOT;
|
|
|
|
pthread_mutex_init(&hists->lock, NULL);
|
2015-09-04 22:45:44 +08:00
|
|
|
hists->socket_filter = -1;
|
2014-10-10 03:16:00 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-12-10 15:53:20 +08:00
|
|
|
static void hists__delete_remaining_entries(struct rb_root *root)
|
|
|
|
{
|
|
|
|
struct rb_node *node;
|
|
|
|
struct hist_entry *he;
|
|
|
|
|
|
|
|
while (!RB_EMPTY_ROOT(root)) {
|
|
|
|
node = rb_first(root);
|
|
|
|
rb_erase(node, root);
|
|
|
|
|
|
|
|
he = rb_entry(node, struct hist_entry, rb_node_in);
|
|
|
|
hist_entry__delete(he);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hists__delete_all_entries(struct hists *hists)
|
|
|
|
{
|
|
|
|
hists__delete_entries(hists);
|
|
|
|
hists__delete_remaining_entries(&hists->entries_in_array[0]);
|
|
|
|
hists__delete_remaining_entries(&hists->entries_in_array[1]);
|
|
|
|
hists__delete_remaining_entries(&hists->entries_collapsed);
|
|
|
|
}
|
|
|
|
|
2015-12-09 10:11:29 +08:00
|
|
|
static void hists_evsel__exit(struct perf_evsel *evsel)
|
|
|
|
{
|
|
|
|
struct hists *hists = evsel__hists(evsel);
|
|
|
|
|
2015-12-10 15:53:20 +08:00
|
|
|
hists__delete_all_entries(hists);
|
2015-12-09 10:11:29 +08:00
|
|
|
}
|
|
|
|
|
2016-01-07 17:14:10 +08:00
|
|
|
static int hists_evsel__init(struct perf_evsel *evsel)
|
|
|
|
{
|
|
|
|
struct hists *hists = evsel__hists(evsel);
|
|
|
|
|
|
|
|
__hists__init(hists);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-10-10 03:16:00 +08:00
|
|
|
/*
|
|
|
|
* XXX We probably need a hists_evsel__exit() to free the hist_entries
|
|
|
|
* stored in the rbtree...
|
|
|
|
*/
|
|
|
|
|
|
|
|
int hists__init(void)
|
|
|
|
{
|
|
|
|
int err = perf_evsel__object_config(sizeof(struct hists_evsel),
|
2015-12-09 10:11:29 +08:00
|
|
|
hists_evsel__init,
|
|
|
|
hists_evsel__exit);
|
2014-10-10 03:16:00 +08:00
|
|
|
if (err)
|
|
|
|
fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|