2011-01-30 20:46:46 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
|
|
|
|
*
|
|
|
|
* Parts came from builtin-{top,stat,record}.c, see those files for further
|
|
|
|
* copyright notes.
|
|
|
|
*
|
|
|
|
* Released under the GPL v2. (and only v2, not any later version)
|
|
|
|
*/
|
|
|
|
|
2011-09-06 23:12:26 +08:00
|
|
|
#include <byteswap.h>
|
2012-08-07 21:20:45 +08:00
|
|
|
#include <linux/bitops.h>
|
2011-09-06 23:12:26 +08:00
|
|
|
#include "asm/bug.h"
|
2012-09-18 22:21:50 +08:00
|
|
|
#include "debugfs.h"
|
2012-09-12 06:24:23 +08:00
|
|
|
#include "event-parse.h"
|
2011-01-04 02:39:04 +08:00
|
|
|
#include "evsel.h"
|
2011-01-13 03:03:24 +08:00
|
|
|
#include "evlist.h"
|
2011-01-04 02:39:04 +08:00
|
|
|
#include "util.h"
|
2011-01-04 09:09:46 +08:00
|
|
|
#include "cpumap.h"
|
2011-01-19 01:15:24 +08:00
|
|
|
#include "thread_map.h"
|
2012-04-26 13:15:22 +08:00
|
|
|
#include "target.h"
|
2012-06-29 05:18:49 +08:00
|
|
|
#include "../../../include/linux/hw_breakpoint.h"
|
2012-08-07 21:20:47 +08:00
|
|
|
#include "../../include/linux/perf_event.h"
|
|
|
|
#include "perf_regs.h"
|
2011-01-04 02:39:04 +08:00
|
|
|
|
2011-01-04 03:45:52 +08:00
|
|
|
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
|
|
|
|
|
2012-08-02 05:53:11 +08:00
|
|
|
static int __perf_evsel__sample_size(u64 sample_type)
|
2011-06-02 22:04:54 +08:00
|
|
|
{
|
|
|
|
u64 mask = sample_type & PERF_SAMPLE_MASK;
|
|
|
|
int size = 0;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < 64; i++) {
|
|
|
|
if (mask & (1ULL << i))
|
|
|
|
size++;
|
|
|
|
}
|
|
|
|
|
|
|
|
size *= sizeof(u64);
|
|
|
|
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
2012-03-22 21:37:26 +08:00
|
|
|
void hists__init(struct hists *hists)
|
2011-11-04 18:16:58 +08:00
|
|
|
{
|
|
|
|
memset(hists, 0, sizeof(*hists));
|
|
|
|
hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
|
|
|
|
hists->entries_in = &hists->entries_in_array[0];
|
|
|
|
hists->entries_collapsed = RB_ROOT;
|
|
|
|
hists->entries = RB_ROOT;
|
|
|
|
pthread_mutex_init(&hists->lock, NULL);
|
|
|
|
}
|
|
|
|
|
2011-01-19 07:41:45 +08:00
|
|
|
void perf_evsel__init(struct perf_evsel *evsel,
|
|
|
|
struct perf_event_attr *attr, int idx)
|
|
|
|
{
|
|
|
|
evsel->idx = idx;
|
|
|
|
evsel->attr = *attr;
|
|
|
|
INIT_LIST_HEAD(&evsel->node);
|
2011-10-06 04:50:23 +08:00
|
|
|
hists__init(&evsel->hists);
|
2012-08-02 05:53:11 +08:00
|
|
|
evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
|
2011-01-19 07:41:45 +08:00
|
|
|
}
|
|
|
|
|
2011-01-07 11:11:09 +08:00
|
|
|
struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
|
2011-01-04 02:39:04 +08:00
|
|
|
{
|
|
|
|
struct perf_evsel *evsel = zalloc(sizeof(*evsel));
|
|
|
|
|
2011-01-19 07:41:45 +08:00
|
|
|
if (evsel != NULL)
|
|
|
|
perf_evsel__init(evsel, attr, idx);
|
2011-01-04 02:39:04 +08:00
|
|
|
|
|
|
|
return evsel;
|
|
|
|
}
|
|
|
|
|
2012-09-18 22:21:50 +08:00
|
|
|
static struct event_format *event_format__new(const char *sys, const char *name)
|
|
|
|
{
|
|
|
|
int fd, n;
|
|
|
|
char *filename;
|
|
|
|
void *bf = NULL, *nbf;
|
|
|
|
size_t size = 0, alloc_size = 0;
|
|
|
|
struct event_format *format = NULL;
|
|
|
|
|
|
|
|
if (asprintf(&filename, "%s/%s/%s/format", tracing_events_path, sys, name) < 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
fd = open(filename, O_RDONLY);
|
|
|
|
if (fd < 0)
|
|
|
|
goto out_free_filename;
|
|
|
|
|
|
|
|
do {
|
|
|
|
if (size == alloc_size) {
|
|
|
|
alloc_size += BUFSIZ;
|
|
|
|
nbf = realloc(bf, alloc_size);
|
|
|
|
if (nbf == NULL)
|
|
|
|
goto out_free_bf;
|
|
|
|
bf = nbf;
|
|
|
|
}
|
|
|
|
|
|
|
|
n = read(fd, bf + size, BUFSIZ);
|
|
|
|
if (n < 0)
|
|
|
|
goto out_free_bf;
|
|
|
|
size += n;
|
|
|
|
} while (n > 0);
|
|
|
|
|
|
|
|
pevent_parse_format(&format, bf, size, sys);
|
|
|
|
|
|
|
|
out_free_bf:
|
|
|
|
free(bf);
|
|
|
|
close(fd);
|
|
|
|
out_free_filename:
|
|
|
|
free(filename);
|
|
|
|
out:
|
|
|
|
return format;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name, int idx)
|
|
|
|
{
|
|
|
|
struct perf_evsel *evsel = zalloc(sizeof(*evsel));
|
|
|
|
|
|
|
|
if (evsel != NULL) {
|
|
|
|
struct perf_event_attr attr = {
|
2012-09-26 23:28:26 +08:00
|
|
|
.type = PERF_TYPE_TRACEPOINT,
|
|
|
|
.sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
|
|
|
|
PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
|
2012-09-18 22:21:50 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
evsel->tp_format = event_format__new(sys, name);
|
|
|
|
if (evsel->tp_format == NULL)
|
|
|
|
goto out_free;
|
|
|
|
|
2012-09-26 23:28:26 +08:00
|
|
|
event_attr_init(&attr);
|
2012-09-18 22:21:50 +08:00
|
|
|
attr.config = evsel->tp_format->id;
|
2012-09-26 23:28:26 +08:00
|
|
|
attr.sample_period = 1;
|
2012-09-18 22:21:50 +08:00
|
|
|
perf_evsel__init(evsel, &attr, idx);
|
|
|
|
evsel->name = evsel->tp_format->name;
|
|
|
|
}
|
|
|
|
|
|
|
|
return evsel;
|
|
|
|
|
|
|
|
out_free:
|
|
|
|
free(evsel);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2012-09-07 00:11:18 +08:00
|
|
|
const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
|
2012-05-26 03:38:11 +08:00
|
|
|
"cycles",
|
|
|
|
"instructions",
|
|
|
|
"cache-references",
|
|
|
|
"cache-misses",
|
|
|
|
"branches",
|
|
|
|
"branch-misses",
|
|
|
|
"bus-cycles",
|
|
|
|
"stalled-cycles-frontend",
|
|
|
|
"stalled-cycles-backend",
|
|
|
|
"ref-cycles",
|
|
|
|
};
|
|
|
|
|
2012-06-14 02:52:42 +08:00
|
|
|
static const char *__perf_evsel__hw_name(u64 config)
|
2012-05-26 03:38:11 +08:00
|
|
|
{
|
|
|
|
if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config])
|
|
|
|
return perf_evsel__hw_names[config];
|
|
|
|
|
|
|
|
return "unknown-hardware";
|
|
|
|
}
|
|
|
|
|
2012-06-12 00:33:09 +08:00
|
|
|
static int perf_evsel__add_modifiers(struct perf_evsel *evsel, char *bf, size_t size)
|
2012-05-26 03:38:11 +08:00
|
|
|
{
|
2012-06-12 00:33:09 +08:00
|
|
|
int colon = 0, r = 0;
|
2012-05-26 03:38:11 +08:00
|
|
|
struct perf_event_attr *attr = &evsel->attr;
|
|
|
|
bool exclude_guest_default = false;
|
|
|
|
|
|
|
|
#define MOD_PRINT(context, mod) do { \
|
|
|
|
if (!attr->exclude_##context) { \
|
2012-06-12 00:33:09 +08:00
|
|
|
if (!colon) colon = ++r; \
|
2012-05-26 03:38:11 +08:00
|
|
|
r += scnprintf(bf + r, size - r, "%c", mod); \
|
|
|
|
} } while(0)
|
|
|
|
|
|
|
|
if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
|
|
|
|
MOD_PRINT(kernel, 'k');
|
|
|
|
MOD_PRINT(user, 'u');
|
|
|
|
MOD_PRINT(hv, 'h');
|
|
|
|
exclude_guest_default = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (attr->precise_ip) {
|
|
|
|
if (!colon)
|
2012-06-12 00:33:09 +08:00
|
|
|
colon = ++r;
|
2012-05-26 03:38:11 +08:00
|
|
|
r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
|
|
|
|
exclude_guest_default = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
|
|
|
|
MOD_PRINT(host, 'H');
|
|
|
|
MOD_PRINT(guest, 'G');
|
|
|
|
}
|
|
|
|
#undef MOD_PRINT
|
|
|
|
if (colon)
|
2012-06-12 00:33:09 +08:00
|
|
|
bf[colon - 1] = ':';
|
2012-05-26 03:38:11 +08:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2012-06-12 00:33:09 +08:00
|
|
|
static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
|
|
|
|
{
|
|
|
|
int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->attr.config));
|
|
|
|
return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
|
|
|
|
}
|
|
|
|
|
2012-09-07 00:11:18 +08:00
|
|
|
const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = {
|
2012-06-12 01:36:20 +08:00
|
|
|
"cpu-clock",
|
|
|
|
"task-clock",
|
|
|
|
"page-faults",
|
|
|
|
"context-switches",
|
2012-09-07 00:11:18 +08:00
|
|
|
"cpu-migrations",
|
2012-06-12 01:36:20 +08:00
|
|
|
"minor-faults",
|
|
|
|
"major-faults",
|
|
|
|
"alignment-faults",
|
|
|
|
"emulation-faults",
|
|
|
|
};
|
|
|
|
|
2012-06-14 02:52:42 +08:00
|
|
|
static const char *__perf_evsel__sw_name(u64 config)
|
2012-06-12 01:36:20 +08:00
|
|
|
{
|
|
|
|
if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config])
|
|
|
|
return perf_evsel__sw_names[config];
|
|
|
|
return "unknown-software";
|
|
|
|
}
|
|
|
|
|
|
|
|
static int perf_evsel__sw_name(struct perf_evsel *evsel, char *bf, size_t size)
|
|
|
|
{
|
|
|
|
int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->attr.config));
|
|
|
|
return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
|
|
|
|
}
|
|
|
|
|
2012-06-29 05:18:49 +08:00
|
|
|
static int __perf_evsel__bp_name(char *bf, size_t size, u64 addr, u64 type)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
|
|
|
|
r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr);
|
|
|
|
|
|
|
|
if (type & HW_BREAKPOINT_R)
|
|
|
|
r += scnprintf(bf + r, size - r, "r");
|
|
|
|
|
|
|
|
if (type & HW_BREAKPOINT_W)
|
|
|
|
r += scnprintf(bf + r, size - r, "w");
|
|
|
|
|
|
|
|
if (type & HW_BREAKPOINT_X)
|
|
|
|
r += scnprintf(bf + r, size - r, "x");
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int perf_evsel__bp_name(struct perf_evsel *evsel, char *bf, size_t size)
|
|
|
|
{
|
|
|
|
struct perf_event_attr *attr = &evsel->attr;
|
|
|
|
int r = __perf_evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type);
|
|
|
|
return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
|
|
|
|
}
|
|
|
|
|
2012-06-12 01:08:07 +08:00
|
|
|
const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
|
|
|
|
[PERF_EVSEL__MAX_ALIASES] = {
|
|
|
|
{ "L1-dcache", "l1-d", "l1d", "L1-data", },
|
|
|
|
{ "L1-icache", "l1-i", "l1i", "L1-instruction", },
|
|
|
|
{ "LLC", "L2", },
|
|
|
|
{ "dTLB", "d-tlb", "Data-TLB", },
|
|
|
|
{ "iTLB", "i-tlb", "Instruction-TLB", },
|
|
|
|
{ "branch", "branches", "bpu", "btb", "bpc", },
|
|
|
|
{ "node", },
|
|
|
|
};
|
|
|
|
|
|
|
|
const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
|
|
|
|
[PERF_EVSEL__MAX_ALIASES] = {
|
|
|
|
{ "load", "loads", "read", },
|
|
|
|
{ "store", "stores", "write", },
|
|
|
|
{ "prefetch", "prefetches", "speculative-read", "speculative-load", },
|
|
|
|
};
|
|
|
|
|
|
|
|
const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
|
|
|
|
[PERF_EVSEL__MAX_ALIASES] = {
|
|
|
|
{ "refs", "Reference", "ops", "access", },
|
|
|
|
{ "misses", "miss", },
|
|
|
|
};
|
|
|
|
|
|
|
|
#define C(x) PERF_COUNT_HW_CACHE_##x
|
|
|
|
#define CACHE_READ (1 << C(OP_READ))
|
|
|
|
#define CACHE_WRITE (1 << C(OP_WRITE))
|
|
|
|
#define CACHE_PREFETCH (1 << C(OP_PREFETCH))
|
|
|
|
#define COP(x) (1 << x)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* cache operartion stat
|
|
|
|
* L1I : Read and prefetch only
|
|
|
|
* ITLB and BPU : Read-only
|
|
|
|
*/
|
|
|
|
static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = {
|
|
|
|
[C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
|
|
|
|
[C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
|
|
|
|
[C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
|
|
|
|
[C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
|
|
|
|
[C(ITLB)] = (CACHE_READ),
|
|
|
|
[C(BPU)] = (CACHE_READ),
|
|
|
|
[C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
|
|
|
|
};
|
|
|
|
|
|
|
|
bool perf_evsel__is_cache_op_valid(u8 type, u8 op)
|
|
|
|
{
|
|
|
|
if (perf_evsel__hw_cache_stat[type] & COP(op))
|
|
|
|
return true; /* valid */
|
|
|
|
else
|
|
|
|
return false; /* invalid */
|
|
|
|
}
|
|
|
|
|
|
|
|
int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
|
|
|
|
char *bf, size_t size)
|
|
|
|
{
|
|
|
|
if (result) {
|
|
|
|
return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0],
|
|
|
|
perf_evsel__hw_cache_op[op][0],
|
|
|
|
perf_evsel__hw_cache_result[result][0]);
|
|
|
|
}
|
|
|
|
|
|
|
|
return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0],
|
|
|
|
perf_evsel__hw_cache_op[op][1]);
|
|
|
|
}
|
|
|
|
|
2012-06-14 02:52:42 +08:00
|
|
|
static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size)
|
2012-06-12 01:08:07 +08:00
|
|
|
{
|
|
|
|
u8 op, result, type = (config >> 0) & 0xff;
|
|
|
|
const char *err = "unknown-ext-hardware-cache-type";
|
|
|
|
|
|
|
|
if (type > PERF_COUNT_HW_CACHE_MAX)
|
|
|
|
goto out_err;
|
|
|
|
|
|
|
|
op = (config >> 8) & 0xff;
|
|
|
|
err = "unknown-ext-hardware-cache-op";
|
|
|
|
if (op > PERF_COUNT_HW_CACHE_OP_MAX)
|
|
|
|
goto out_err;
|
|
|
|
|
|
|
|
result = (config >> 16) & 0xff;
|
|
|
|
err = "unknown-ext-hardware-cache-result";
|
|
|
|
if (result > PERF_COUNT_HW_CACHE_RESULT_MAX)
|
|
|
|
goto out_err;
|
|
|
|
|
|
|
|
err = "invalid-cache";
|
|
|
|
if (!perf_evsel__is_cache_op_valid(type, op))
|
|
|
|
goto out_err;
|
|
|
|
|
|
|
|
return __perf_evsel__hw_cache_type_op_res_name(type, op, result, bf, size);
|
|
|
|
out_err:
|
|
|
|
return scnprintf(bf, size, "%s", err);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int perf_evsel__hw_cache_name(struct perf_evsel *evsel, char *bf, size_t size)
|
|
|
|
{
|
|
|
|
int ret = __perf_evsel__hw_cache_name(evsel->attr.config, bf, size);
|
|
|
|
return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
|
|
|
|
}
|
|
|
|
|
2012-06-13 22:53:37 +08:00
|
|
|
static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size)
|
|
|
|
{
|
|
|
|
int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config);
|
|
|
|
return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
|
|
|
|
}
|
|
|
|
|
2012-06-12 23:34:58 +08:00
|
|
|
const char *perf_evsel__name(struct perf_evsel *evsel)
|
2012-06-12 21:29:12 +08:00
|
|
|
{
|
2012-06-12 23:34:58 +08:00
|
|
|
char bf[128];
|
2012-06-12 21:29:12 +08:00
|
|
|
|
2012-06-12 23:34:58 +08:00
|
|
|
if (evsel->name)
|
|
|
|
return evsel->name;
|
2012-05-26 03:38:11 +08:00
|
|
|
|
|
|
|
switch (evsel->attr.type) {
|
|
|
|
case PERF_TYPE_RAW:
|
2012-06-13 22:53:37 +08:00
|
|
|
perf_evsel__raw_name(evsel, bf, sizeof(bf));
|
2012-05-26 03:38:11 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case PERF_TYPE_HARDWARE:
|
2012-06-12 23:34:58 +08:00
|
|
|
perf_evsel__hw_name(evsel, bf, sizeof(bf));
|
2012-05-26 03:38:11 +08:00
|
|
|
break;
|
2012-06-12 01:08:07 +08:00
|
|
|
|
|
|
|
case PERF_TYPE_HW_CACHE:
|
2012-06-12 23:34:58 +08:00
|
|
|
perf_evsel__hw_cache_name(evsel, bf, sizeof(bf));
|
2012-06-12 01:08:07 +08:00
|
|
|
break;
|
|
|
|
|
2012-06-12 01:36:20 +08:00
|
|
|
case PERF_TYPE_SOFTWARE:
|
2012-06-12 23:34:58 +08:00
|
|
|
perf_evsel__sw_name(evsel, bf, sizeof(bf));
|
2012-06-12 01:36:20 +08:00
|
|
|
break;
|
|
|
|
|
2012-06-12 21:29:12 +08:00
|
|
|
case PERF_TYPE_TRACEPOINT:
|
2012-06-12 23:34:58 +08:00
|
|
|
scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint");
|
2012-06-12 21:29:12 +08:00
|
|
|
break;
|
|
|
|
|
2012-06-29 05:18:49 +08:00
|
|
|
case PERF_TYPE_BREAKPOINT:
|
|
|
|
perf_evsel__bp_name(evsel, bf, sizeof(bf));
|
|
|
|
break;
|
|
|
|
|
2012-05-26 03:38:11 +08:00
|
|
|
default:
|
2012-08-17 03:10:18 +08:00
|
|
|
scnprintf(bf, sizeof(bf), "unknown attr type: %d",
|
|
|
|
evsel->attr.type);
|
2012-06-12 21:29:12 +08:00
|
|
|
break;
|
2012-05-26 03:38:11 +08:00
|
|
|
}
|
|
|
|
|
2012-06-12 23:34:58 +08:00
|
|
|
evsel->name = strdup(bf);
|
|
|
|
|
|
|
|
return evsel->name ?: "unknown";
|
2012-05-26 03:38:11 +08:00
|
|
|
}
|
|
|
|
|
2012-03-16 16:42:20 +08:00
|
|
|
void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts,
|
|
|
|
struct perf_evsel *first)
|
2011-11-09 00:41:57 +08:00
|
|
|
{
|
|
|
|
struct perf_event_attr *attr = &evsel->attr;
|
|
|
|
int track = !evsel->idx; /* only the first counter needs these */
|
|
|
|
|
2012-05-14 12:01:28 +08:00
|
|
|
attr->disabled = 1;
|
2012-02-15 00:18:57 +08:00
|
|
|
attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1;
|
2011-11-09 00:41:57 +08:00
|
|
|
attr->inherit = !opts->no_inherit;
|
|
|
|
attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
|
|
|
|
PERF_FORMAT_TOTAL_TIME_RUNNING |
|
|
|
|
PERF_FORMAT_ID;
|
|
|
|
|
|
|
|
attr->sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We default some events to a 1 default interval. But keep
|
|
|
|
* it a weak assumption overridable by the user.
|
|
|
|
*/
|
|
|
|
if (!attr->sample_period || (opts->user_freq != UINT_MAX &&
|
|
|
|
opts->user_interval != ULLONG_MAX)) {
|
|
|
|
if (opts->freq) {
|
|
|
|
attr->sample_type |= PERF_SAMPLE_PERIOD;
|
|
|
|
attr->freq = 1;
|
|
|
|
attr->sample_freq = opts->freq;
|
|
|
|
} else {
|
|
|
|
attr->sample_period = opts->default_interval;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (opts->no_samples)
|
|
|
|
attr->sample_freq = 0;
|
|
|
|
|
|
|
|
if (opts->inherit_stat)
|
|
|
|
attr->inherit_stat = 1;
|
|
|
|
|
|
|
|
if (opts->sample_address) {
|
|
|
|
attr->sample_type |= PERF_SAMPLE_ADDR;
|
|
|
|
attr->mmap_data = track;
|
|
|
|
}
|
|
|
|
|
2012-08-07 21:20:47 +08:00
|
|
|
if (opts->call_graph) {
|
2011-11-09 00:41:57 +08:00
|
|
|
attr->sample_type |= PERF_SAMPLE_CALLCHAIN;
|
|
|
|
|
2012-08-07 21:20:47 +08:00
|
|
|
if (opts->call_graph == CALLCHAIN_DWARF) {
|
|
|
|
attr->sample_type |= PERF_SAMPLE_REGS_USER |
|
|
|
|
PERF_SAMPLE_STACK_USER;
|
|
|
|
attr->sample_regs_user = PERF_REGS_MASK;
|
|
|
|
attr->sample_stack_user = opts->stack_dump_size;
|
|
|
|
attr->exclude_callchain_user = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-05-21 09:42:07 +08:00
|
|
|
if (perf_target__has_cpu(&opts->target))
|
2011-11-09 00:41:57 +08:00
|
|
|
attr->sample_type |= PERF_SAMPLE_CPU;
|
|
|
|
|
2011-12-20 22:32:45 +08:00
|
|
|
if (opts->period)
|
|
|
|
attr->sample_type |= PERF_SAMPLE_PERIOD;
|
|
|
|
|
2012-02-15 00:18:57 +08:00
|
|
|
if (!opts->sample_id_all_missing &&
|
2012-05-07 13:09:03 +08:00
|
|
|
(opts->sample_time || !opts->no_inherit ||
|
2012-05-16 17:45:47 +08:00
|
|
|
perf_target__has_cpu(&opts->target)))
|
2011-11-09 00:41:57 +08:00
|
|
|
attr->sample_type |= PERF_SAMPLE_TIME;
|
|
|
|
|
|
|
|
if (opts->raw_samples) {
|
|
|
|
attr->sample_type |= PERF_SAMPLE_TIME;
|
|
|
|
attr->sample_type |= PERF_SAMPLE_RAW;
|
|
|
|
attr->sample_type |= PERF_SAMPLE_CPU;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (opts->no_delay) {
|
|
|
|
attr->watermark = 0;
|
|
|
|
attr->wakeup_events = 1;
|
|
|
|
}
|
2012-02-10 06:21:02 +08:00
|
|
|
if (opts->branch_stack) {
|
|
|
|
attr->sample_type |= PERF_SAMPLE_BRANCH_STACK;
|
|
|
|
attr->branch_sample_type = opts->branch_stack;
|
|
|
|
}
|
2011-11-09 00:41:57 +08:00
|
|
|
|
|
|
|
attr->mmap = track;
|
|
|
|
attr->comm = track;
|
|
|
|
|
2012-05-07 13:09:03 +08:00
|
|
|
if (perf_target__none(&opts->target) &&
|
|
|
|
(!opts->group || evsel == first)) {
|
2011-11-09 00:41:57 +08:00
|
|
|
attr->enable_on_exec = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-01-04 02:39:04 +08:00
|
|
|
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
|
|
|
|
{
|
2011-05-27 23:58:34 +08:00
|
|
|
int cpu, thread;
|
2011-01-04 02:39:04 +08:00
|
|
|
evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
|
2011-05-27 23:58:34 +08:00
|
|
|
|
|
|
|
if (evsel->fd) {
|
|
|
|
for (cpu = 0; cpu < ncpus; cpu++) {
|
|
|
|
for (thread = 0; thread < nthreads; thread++) {
|
|
|
|
FD(evsel, cpu, thread) = -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-01-04 02:39:04 +08:00
|
|
|
return evsel->fd != NULL ? 0 : -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2011-01-13 08:39:13 +08:00
|
|
|
int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
|
|
|
|
{
|
2011-03-10 22:15:54 +08:00
|
|
|
evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
|
|
|
|
if (evsel->sample_id == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
|
|
|
|
if (evsel->id == NULL) {
|
|
|
|
xyarray__delete(evsel->sample_id);
|
|
|
|
evsel->sample_id = NULL;
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2011-01-13 08:39:13 +08:00
|
|
|
}
|
|
|
|
|
2011-01-04 03:45:52 +08:00
|
|
|
int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
|
|
|
|
{
|
|
|
|
evsel->counts = zalloc((sizeof(*evsel->counts) +
|
|
|
|
(ncpus * sizeof(struct perf_counts_values))));
|
|
|
|
return evsel->counts != NULL ? 0 : -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2011-01-04 02:39:04 +08:00
|
|
|
void perf_evsel__free_fd(struct perf_evsel *evsel)
|
|
|
|
{
|
|
|
|
xyarray__delete(evsel->fd);
|
|
|
|
evsel->fd = NULL;
|
|
|
|
}
|
|
|
|
|
2011-01-13 08:39:13 +08:00
|
|
|
void perf_evsel__free_id(struct perf_evsel *evsel)
|
|
|
|
{
|
2011-03-10 22:15:54 +08:00
|
|
|
xyarray__delete(evsel->sample_id);
|
|
|
|
evsel->sample_id = NULL;
|
|
|
|
free(evsel->id);
|
2011-01-13 08:39:13 +08:00
|
|
|
evsel->id = NULL;
|
|
|
|
}
|
|
|
|
|
2011-01-04 03:45:52 +08:00
|
|
|
void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
|
|
|
|
{
|
|
|
|
int cpu, thread;
|
|
|
|
|
|
|
|
for (cpu = 0; cpu < ncpus; cpu++)
|
|
|
|
for (thread = 0; thread < nthreads; ++thread) {
|
|
|
|
close(FD(evsel, cpu, thread));
|
|
|
|
FD(evsel, cpu, thread) = -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-01-19 07:41:45 +08:00
|
|
|
void perf_evsel__exit(struct perf_evsel *evsel)
|
2011-01-04 02:39:04 +08:00
|
|
|
{
|
|
|
|
assert(list_empty(&evsel->node));
|
|
|
|
xyarray__delete(evsel->fd);
|
2011-03-10 22:15:54 +08:00
|
|
|
xyarray__delete(evsel->sample_id);
|
|
|
|
free(evsel->id);
|
2011-01-19 07:41:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void perf_evsel__delete(struct perf_evsel *evsel)
|
|
|
|
{
|
|
|
|
perf_evsel__exit(evsel);
|
perf tool: Add cgroup support
This patch adds the ability to filter monitoring based on container groups
(cgroups) for both perf stat and perf record. It is possible to monitor
multiple cgroup in parallel. There is one cgroup per event. The cgroups to
monitor are passed via a new -G option followed by a comma separated list of
cgroup names.
The cgroup filesystem has to be mounted. Given a cgroup name, the perf tool
finds the corresponding directory in the cgroup filesystem and opens it. It
then passes that file descriptor to the kernel.
Example:
$ perf stat -B -a -e cycles:u,cycles:u,cycles:u -G test1,,test2 -- sleep 1
Performance counter stats for 'sleep 1':
2,368,667,414 cycles test1
2,369,661,459 cycles
<not counted> cycles test2
1.001856890 seconds time elapsed
Signed-off-by: Stephane Eranian <eranian@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <4d590290.825bdf0a.7d0a.4890@mx.google.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2011-02-14 17:20:01 +08:00
|
|
|
close_cgroup(evsel->cgrp);
|
perf tools: Enable grouping logic for parsed events
This patch adds a functionality that allows to create event groups
based on the way they are specified on the command line. Adding
functionality to the '{}' group syntax introduced in earlier patch.
The current '--group/-g' option behaviour remains intact. If you
specify it for record/stat/top command, all the specified events
become members of a single group with the first event as a group
leader.
With the new '{}' group syntax you can create group like:
# perf record -e '{cycles,faults}' ls
resulting in single event group containing 'cycles' and 'faults'
events, with cycles event as group leader.
All groups are created with regards to threads and cpus. Thus
recording an event group within a 2 threads on server with
4 CPUs will create 8 separate groups.
Examples (first event in brackets is group leader):
# 1 group (cpu-clock,task-clock)
perf record --group -e cpu-clock,task-clock ls
perf record -e '{cpu-clock,task-clock}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock},{minor-faults,major-faults}' ls
# 1 group (cpu-clock,task-clock,minor-faults,major-faults)
perf record --group -e cpu-clock,task-clock -e minor-faults,major-faults ls
perf record -e '{cpu-clock,task-clock,minor-faults,major-faults}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock} -e '{minor-faults,major-faults}' \
-e instructions ls
# 1 group
# (cpu-clock,task-clock,minor-faults,major-faults,instructions)
perf record --group -e cpu-clock,task-clock \
-e minor-faults,major-faults -e instructions ls perf record -e
'{cpu-clock,task-clock,minor-faults,major-faults,instructions}' ls
It's possible to use standard event modifier for a group, which spans
over all events in the group and updates each event modifier settings,
for example:
# perf record -r '{faults:k,cache-references}:p'
resulting in ':kp' modifier being used for 'faults' and ':p' modifier
being used for 'cache-references' event.
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ulrich Drepper <drepper@gmail.com>
Link: http://lkml.kernel.org/n/tip-ho42u0wcr8mn1otkalqi13qp@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-08-08 18:22:36 +08:00
|
|
|
free(evsel->group_name);
|
2012-09-18 22:21:50 +08:00
|
|
|
if (evsel->tp_format && evsel->name == evsel->tp_format->name) {
|
|
|
|
evsel->name = NULL;
|
|
|
|
pevent_free_format(evsel->tp_format);
|
|
|
|
}
|
2011-02-16 21:10:01 +08:00
|
|
|
free(evsel->name);
|
2011-01-04 02:39:04 +08:00
|
|
|
free(evsel);
|
|
|
|
}
|
2011-01-04 03:45:52 +08:00
|
|
|
|
|
|
|
int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
|
|
|
|
int cpu, int thread, bool scale)
|
|
|
|
{
|
|
|
|
struct perf_counts_values count;
|
|
|
|
size_t nv = scale ? 3 : 1;
|
|
|
|
|
|
|
|
if (FD(evsel, cpu, thread) < 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2011-01-04 10:13:17 +08:00
|
|
|
if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2011-01-04 03:45:52 +08:00
|
|
|
if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
|
|
|
|
return -errno;
|
|
|
|
|
|
|
|
if (scale) {
|
|
|
|
if (count.run == 0)
|
|
|
|
count.val = 0;
|
|
|
|
else if (count.run < count.ena)
|
|
|
|
count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
|
|
|
|
} else
|
|
|
|
count.ena = count.run = 0;
|
|
|
|
|
|
|
|
evsel->counts->cpu[cpu] = count;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int __perf_evsel__read(struct perf_evsel *evsel,
|
|
|
|
int ncpus, int nthreads, bool scale)
|
|
|
|
{
|
|
|
|
size_t nv = scale ? 3 : 1;
|
|
|
|
int cpu, thread;
|
|
|
|
struct perf_counts_values *aggr = &evsel->counts->aggr, count;
|
|
|
|
|
2011-02-04 03:26:06 +08:00
|
|
|
aggr->val = aggr->ena = aggr->run = 0;
|
2011-01-04 03:45:52 +08:00
|
|
|
|
|
|
|
for (cpu = 0; cpu < ncpus; cpu++) {
|
|
|
|
for (thread = 0; thread < nthreads; thread++) {
|
|
|
|
if (FD(evsel, cpu, thread) < 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (readn(FD(evsel, cpu, thread),
|
|
|
|
&count, nv * sizeof(u64)) < 0)
|
|
|
|
return -errno;
|
|
|
|
|
|
|
|
aggr->val += count.val;
|
|
|
|
if (scale) {
|
|
|
|
aggr->ena += count.ena;
|
|
|
|
aggr->run += count.run;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
evsel->counts->scaled = 0;
|
|
|
|
if (scale) {
|
|
|
|
if (aggr->run == 0) {
|
|
|
|
evsel->counts->scaled = -1;
|
|
|
|
aggr->val = 0;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (aggr->run < aggr->ena) {
|
|
|
|
evsel->counts->scaled = 1;
|
|
|
|
aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
aggr->ena = aggr->run = 0;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2011-01-04 03:48:12 +08:00
|
|
|
|
perf tools: Enable grouping logic for parsed events
This patch adds a functionality that allows to create event groups
based on the way they are specified on the command line. Adding
functionality to the '{}' group syntax introduced in earlier patch.
The current '--group/-g' option behaviour remains intact. If you
specify it for record/stat/top command, all the specified events
become members of a single group with the first event as a group
leader.
With the new '{}' group syntax you can create group like:
# perf record -e '{cycles,faults}' ls
resulting in single event group containing 'cycles' and 'faults'
events, with cycles event as group leader.
All groups are created with regards to threads and cpus. Thus
recording an event group within a 2 threads on server with
4 CPUs will create 8 separate groups.
Examples (first event in brackets is group leader):
# 1 group (cpu-clock,task-clock)
perf record --group -e cpu-clock,task-clock ls
perf record -e '{cpu-clock,task-clock}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock},{minor-faults,major-faults}' ls
# 1 group (cpu-clock,task-clock,minor-faults,major-faults)
perf record --group -e cpu-clock,task-clock -e minor-faults,major-faults ls
perf record -e '{cpu-clock,task-clock,minor-faults,major-faults}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock} -e '{minor-faults,major-faults}' \
-e instructions ls
# 1 group
# (cpu-clock,task-clock,minor-faults,major-faults,instructions)
perf record --group -e cpu-clock,task-clock \
-e minor-faults,major-faults -e instructions ls perf record -e
'{cpu-clock,task-clock,minor-faults,major-faults,instructions}' ls
It's possible to use standard event modifier for a group, which spans
over all events in the group and updates each event modifier settings,
for example:
# perf record -r '{faults:k,cache-references}:p'
resulting in ':kp' modifier being used for 'faults' and ':p' modifier
being used for 'cache-references' event.
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ulrich Drepper <drepper@gmail.com>
Link: http://lkml.kernel.org/n/tip-ho42u0wcr8mn1otkalqi13qp@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-08-08 18:22:36 +08:00
|
|
|
static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread)
|
|
|
|
{
|
|
|
|
struct perf_evsel *leader = evsel->leader;
|
|
|
|
int fd;
|
|
|
|
|
|
|
|
if (!leader)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Leader must be already processed/open,
|
|
|
|
* if not it's a bug.
|
|
|
|
*/
|
|
|
|
BUG_ON(!leader->fd);
|
|
|
|
|
|
|
|
fd = FD(leader, cpu, thread);
|
|
|
|
BUG_ON(fd == -1);
|
|
|
|
|
|
|
|
return fd;
|
|
|
|
}
|
|
|
|
|
2011-01-04 21:55:27 +08:00
|
|
|
static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
|
perf tools: Enable grouping logic for parsed events
This patch adds a functionality that allows to create event groups
based on the way they are specified on the command line. Adding
functionality to the '{}' group syntax introduced in earlier patch.
The current '--group/-g' option behaviour remains intact. If you
specify it for record/stat/top command, all the specified events
become members of a single group with the first event as a group
leader.
With the new '{}' group syntax you can create group like:
# perf record -e '{cycles,faults}' ls
resulting in single event group containing 'cycles' and 'faults'
events, with cycles event as group leader.
All groups are created with regards to threads and cpus. Thus
recording an event group within a 2 threads on server with
4 CPUs will create 8 separate groups.
Examples (first event in brackets is group leader):
# 1 group (cpu-clock,task-clock)
perf record --group -e cpu-clock,task-clock ls
perf record -e '{cpu-clock,task-clock}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock},{minor-faults,major-faults}' ls
# 1 group (cpu-clock,task-clock,minor-faults,major-faults)
perf record --group -e cpu-clock,task-clock -e minor-faults,major-faults ls
perf record -e '{cpu-clock,task-clock,minor-faults,major-faults}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock} -e '{minor-faults,major-faults}' \
-e instructions ls
# 1 group
# (cpu-clock,task-clock,minor-faults,major-faults,instructions)
perf record --group -e cpu-clock,task-clock \
-e minor-faults,major-faults -e instructions ls perf record -e
'{cpu-clock,task-clock,minor-faults,major-faults,instructions}' ls
It's possible to use standard event modifier for a group, which spans
over all events in the group and updates each event modifier settings,
for example:
# perf record -r '{faults:k,cache-references}:p'
resulting in ':kp' modifier being used for 'faults' and ':p' modifier
being used for 'cache-references' event.
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ulrich Drepper <drepper@gmail.com>
Link: http://lkml.kernel.org/n/tip-ho42u0wcr8mn1otkalqi13qp@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-08-08 18:22:36 +08:00
|
|
|
struct thread_map *threads)
|
2011-01-04 03:48:12 +08:00
|
|
|
{
|
2011-01-04 21:55:27 +08:00
|
|
|
int cpu, thread;
|
perf tool: Add cgroup support
This patch adds the ability to filter monitoring based on container groups
(cgroups) for both perf stat and perf record. It is possible to monitor
multiple cgroup in parallel. There is one cgroup per event. The cgroups to
monitor are passed via a new -G option followed by a comma separated list of
cgroup names.
The cgroup filesystem has to be mounted. Given a cgroup name, the perf tool
finds the corresponding directory in the cgroup filesystem and opens it. It
then passes that file descriptor to the kernel.
Example:
$ perf stat -B -a -e cycles:u,cycles:u,cycles:u -G test1,,test2 -- sleep 1
Performance counter stats for 'sleep 1':
2,368,667,414 cycles test1
2,369,661,459 cycles
<not counted> cycles test2
1.001856890 seconds time elapsed
Signed-off-by: Stephane Eranian <eranian@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <4d590290.825bdf0a.7d0a.4890@mx.google.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2011-02-14 17:20:01 +08:00
|
|
|
unsigned long flags = 0;
|
2011-10-25 20:42:19 +08:00
|
|
|
int pid = -1, err;
|
2011-01-04 03:48:12 +08:00
|
|
|
|
2011-01-04 21:55:27 +08:00
|
|
|
if (evsel->fd == NULL &&
|
|
|
|
perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
|
2011-10-25 20:42:19 +08:00
|
|
|
return -ENOMEM;
|
2011-01-04 10:13:17 +08:00
|
|
|
|
perf tool: Add cgroup support
This patch adds the ability to filter monitoring based on container groups
(cgroups) for both perf stat and perf record. It is possible to monitor
multiple cgroup in parallel. There is one cgroup per event. The cgroups to
monitor are passed via a new -G option followed by a comma separated list of
cgroup names.
The cgroup filesystem has to be mounted. Given a cgroup name, the perf tool
finds the corresponding directory in the cgroup filesystem and opens it. It
then passes that file descriptor to the kernel.
Example:
$ perf stat -B -a -e cycles:u,cycles:u,cycles:u -G test1,,test2 -- sleep 1
Performance counter stats for 'sleep 1':
2,368,667,414 cycles test1
2,369,661,459 cycles
<not counted> cycles test2
1.001856890 seconds time elapsed
Signed-off-by: Stephane Eranian <eranian@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <4d590290.825bdf0a.7d0a.4890@mx.google.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2011-02-14 17:20:01 +08:00
|
|
|
if (evsel->cgrp) {
|
|
|
|
flags = PERF_FLAG_PID_CGROUP;
|
|
|
|
pid = evsel->cgrp->fd;
|
|
|
|
}
|
|
|
|
|
2011-01-04 09:09:46 +08:00
|
|
|
for (cpu = 0; cpu < cpus->nr; cpu++) {
|
2011-01-12 10:08:18 +08:00
|
|
|
|
2011-01-04 21:55:27 +08:00
|
|
|
for (thread = 0; thread < threads->nr; thread++) {
|
perf tools: Enable grouping logic for parsed events
This patch adds a functionality that allows to create event groups
based on the way they are specified on the command line. Adding
functionality to the '{}' group syntax introduced in earlier patch.
The current '--group/-g' option behaviour remains intact. If you
specify it for record/stat/top command, all the specified events
become members of a single group with the first event as a group
leader.
With the new '{}' group syntax you can create group like:
# perf record -e '{cycles,faults}' ls
resulting in single event group containing 'cycles' and 'faults'
events, with cycles event as group leader.
All groups are created with regards to threads and cpus. Thus
recording an event group within a 2 threads on server with
4 CPUs will create 8 separate groups.
Examples (first event in brackets is group leader):
# 1 group (cpu-clock,task-clock)
perf record --group -e cpu-clock,task-clock ls
perf record -e '{cpu-clock,task-clock}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock},{minor-faults,major-faults}' ls
# 1 group (cpu-clock,task-clock,minor-faults,major-faults)
perf record --group -e cpu-clock,task-clock -e minor-faults,major-faults ls
perf record -e '{cpu-clock,task-clock,minor-faults,major-faults}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock} -e '{minor-faults,major-faults}' \
-e instructions ls
# 1 group
# (cpu-clock,task-clock,minor-faults,major-faults,instructions)
perf record --group -e cpu-clock,task-clock \
-e minor-faults,major-faults -e instructions ls perf record -e
'{cpu-clock,task-clock,minor-faults,major-faults,instructions}' ls
It's possible to use standard event modifier for a group, which spans
over all events in the group and updates each event modifier settings,
for example:
# perf record -r '{faults:k,cache-references}:p'
resulting in ':kp' modifier being used for 'faults' and ':p' modifier
being used for 'cache-references' event.
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ulrich Drepper <drepper@gmail.com>
Link: http://lkml.kernel.org/n/tip-ho42u0wcr8mn1otkalqi13qp@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-08-08 18:22:36 +08:00
|
|
|
int group_fd;
|
perf tool: Add cgroup support
This patch adds the ability to filter monitoring based on container groups
(cgroups) for both perf stat and perf record. It is possible to monitor
multiple cgroup in parallel. There is one cgroup per event. The cgroups to
monitor are passed via a new -G option followed by a comma separated list of
cgroup names.
The cgroup filesystem has to be mounted. Given a cgroup name, the perf tool
finds the corresponding directory in the cgroup filesystem and opens it. It
then passes that file descriptor to the kernel.
Example:
$ perf stat -B -a -e cycles:u,cycles:u,cycles:u -G test1,,test2 -- sleep 1
Performance counter stats for 'sleep 1':
2,368,667,414 cycles test1
2,369,661,459 cycles
<not counted> cycles test2
1.001856890 seconds time elapsed
Signed-off-by: Stephane Eranian <eranian@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <4d590290.825bdf0a.7d0a.4890@mx.google.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2011-02-14 17:20:01 +08:00
|
|
|
|
|
|
|
if (!evsel->cgrp)
|
|
|
|
pid = threads->map[thread];
|
|
|
|
|
perf tools: Enable grouping logic for parsed events
This patch adds a functionality that allows to create event groups
based on the way they are specified on the command line. Adding
functionality to the '{}' group syntax introduced in earlier patch.
The current '--group/-g' option behaviour remains intact. If you
specify it for record/stat/top command, all the specified events
become members of a single group with the first event as a group
leader.
With the new '{}' group syntax you can create group like:
# perf record -e '{cycles,faults}' ls
resulting in single event group containing 'cycles' and 'faults'
events, with cycles event as group leader.
All groups are created with regards to threads and cpus. Thus
recording an event group within a 2 threads on server with
4 CPUs will create 8 separate groups.
Examples (first event in brackets is group leader):
# 1 group (cpu-clock,task-clock)
perf record --group -e cpu-clock,task-clock ls
perf record -e '{cpu-clock,task-clock}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock},{minor-faults,major-faults}' ls
# 1 group (cpu-clock,task-clock,minor-faults,major-faults)
perf record --group -e cpu-clock,task-clock -e minor-faults,major-faults ls
perf record -e '{cpu-clock,task-clock,minor-faults,major-faults}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock} -e '{minor-faults,major-faults}' \
-e instructions ls
# 1 group
# (cpu-clock,task-clock,minor-faults,major-faults,instructions)
perf record --group -e cpu-clock,task-clock \
-e minor-faults,major-faults -e instructions ls perf record -e
'{cpu-clock,task-clock,minor-faults,major-faults,instructions}' ls
It's possible to use standard event modifier for a group, which spans
over all events in the group and updates each event modifier settings,
for example:
# perf record -r '{faults:k,cache-references}:p'
resulting in ':kp' modifier being used for 'faults' and ':p' modifier
being used for 'cache-references' event.
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ulrich Drepper <drepper@gmail.com>
Link: http://lkml.kernel.org/n/tip-ho42u0wcr8mn1otkalqi13qp@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-08-08 18:22:36 +08:00
|
|
|
group_fd = get_group_fd(evsel, cpu, thread);
|
|
|
|
|
2011-01-04 21:55:27 +08:00
|
|
|
FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
|
perf tool: Add cgroup support
This patch adds the ability to filter monitoring based on container groups
(cgroups) for both perf stat and perf record. It is possible to monitor
multiple cgroup in parallel. There is one cgroup per event. The cgroups to
monitor are passed via a new -G option followed by a comma separated list of
cgroup names.
The cgroup filesystem has to be mounted. Given a cgroup name, the perf tool
finds the corresponding directory in the cgroup filesystem and opens it. It
then passes that file descriptor to the kernel.
Example:
$ perf stat -B -a -e cycles:u,cycles:u,cycles:u -G test1,,test2 -- sleep 1
Performance counter stats for 'sleep 1':
2,368,667,414 cycles test1
2,369,661,459 cycles
<not counted> cycles test2
1.001856890 seconds time elapsed
Signed-off-by: Stephane Eranian <eranian@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <4d590290.825bdf0a.7d0a.4890@mx.google.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2011-02-14 17:20:01 +08:00
|
|
|
pid,
|
2011-01-12 09:42:19 +08:00
|
|
|
cpus->map[cpu],
|
perf tool: Add cgroup support
This patch adds the ability to filter monitoring based on container groups
(cgroups) for both perf stat and perf record. It is possible to monitor
multiple cgroup in parallel. There is one cgroup per event. The cgroups to
monitor are passed via a new -G option followed by a comma separated list of
cgroup names.
The cgroup filesystem has to be mounted. Given a cgroup name, the perf tool
finds the corresponding directory in the cgroup filesystem and opens it. It
then passes that file descriptor to the kernel.
Example:
$ perf stat -B -a -e cycles:u,cycles:u,cycles:u -G test1,,test2 -- sleep 1
Performance counter stats for 'sleep 1':
2,368,667,414 cycles test1
2,369,661,459 cycles
<not counted> cycles test2
1.001856890 seconds time elapsed
Signed-off-by: Stephane Eranian <eranian@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <4d590290.825bdf0a.7d0a.4890@mx.google.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2011-02-14 17:20:01 +08:00
|
|
|
group_fd, flags);
|
2011-10-25 20:42:19 +08:00
|
|
|
if (FD(evsel, cpu, thread) < 0) {
|
|
|
|
err = -errno;
|
2011-01-04 21:55:27 +08:00
|
|
|
goto out_close;
|
2011-10-25 20:42:19 +08:00
|
|
|
}
|
2011-01-04 21:55:27 +08:00
|
|
|
}
|
2011-01-04 03:48:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_close:
|
2011-01-04 21:55:27 +08:00
|
|
|
do {
|
|
|
|
while (--thread >= 0) {
|
|
|
|
close(FD(evsel, cpu, thread));
|
|
|
|
FD(evsel, cpu, thread) = -1;
|
|
|
|
}
|
|
|
|
thread = threads->nr;
|
|
|
|
} while (--cpu >= 0);
|
2011-10-25 20:42:19 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads)
|
|
|
|
{
|
|
|
|
if (evsel->fd == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
perf_evsel__close_fd(evsel, ncpus, nthreads);
|
|
|
|
perf_evsel__free_fd(evsel);
|
|
|
|
evsel->fd = NULL;
|
2011-01-04 03:48:12 +08:00
|
|
|
}
|
|
|
|
|
2011-01-04 21:55:27 +08:00
|
|
|
static struct {
|
|
|
|
struct cpu_map map;
|
|
|
|
int cpus[1];
|
|
|
|
} empty_cpu_map = {
|
|
|
|
.map.nr = 1,
|
|
|
|
.cpus = { -1, },
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct {
|
|
|
|
struct thread_map map;
|
|
|
|
int threads[1];
|
|
|
|
} empty_thread_map = {
|
|
|
|
.map.nr = 1,
|
|
|
|
.threads = { -1, },
|
|
|
|
};
|
|
|
|
|
2011-01-12 09:42:19 +08:00
|
|
|
int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
|
perf tools: Enable grouping logic for parsed events
This patch adds a functionality that allows to create event groups
based on the way they are specified on the command line. Adding
functionality to the '{}' group syntax introduced in earlier patch.
The current '--group/-g' option behaviour remains intact. If you
specify it for record/stat/top command, all the specified events
become members of a single group with the first event as a group
leader.
With the new '{}' group syntax you can create group like:
# perf record -e '{cycles,faults}' ls
resulting in single event group containing 'cycles' and 'faults'
events, with cycles event as group leader.
All groups are created with regards to threads and cpus. Thus
recording an event group within a 2 threads on server with
4 CPUs will create 8 separate groups.
Examples (first event in brackets is group leader):
# 1 group (cpu-clock,task-clock)
perf record --group -e cpu-clock,task-clock ls
perf record -e '{cpu-clock,task-clock}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock},{minor-faults,major-faults}' ls
# 1 group (cpu-clock,task-clock,minor-faults,major-faults)
perf record --group -e cpu-clock,task-clock -e minor-faults,major-faults ls
perf record -e '{cpu-clock,task-clock,minor-faults,major-faults}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock} -e '{minor-faults,major-faults}' \
-e instructions ls
# 1 group
# (cpu-clock,task-clock,minor-faults,major-faults,instructions)
perf record --group -e cpu-clock,task-clock \
-e minor-faults,major-faults -e instructions ls perf record -e
'{cpu-clock,task-clock,minor-faults,major-faults,instructions}' ls
It's possible to use standard event modifier for a group, which spans
over all events in the group and updates each event modifier settings,
for example:
# perf record -r '{faults:k,cache-references}:p'
resulting in ':kp' modifier being used for 'faults' and ':p' modifier
being used for 'cache-references' event.
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ulrich Drepper <drepper@gmail.com>
Link: http://lkml.kernel.org/n/tip-ho42u0wcr8mn1otkalqi13qp@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-08-08 18:22:36 +08:00
|
|
|
struct thread_map *threads)
|
2011-01-04 03:48:12 +08:00
|
|
|
{
|
2011-01-04 21:55:27 +08:00
|
|
|
if (cpus == NULL) {
|
|
|
|
/* Work around old compiler warnings about strict aliasing */
|
|
|
|
cpus = &empty_cpu_map.map;
|
2011-01-04 03:48:12 +08:00
|
|
|
}
|
|
|
|
|
2011-01-04 21:55:27 +08:00
|
|
|
if (threads == NULL)
|
|
|
|
threads = &empty_thread_map.map;
|
2011-01-04 03:48:12 +08:00
|
|
|
|
perf tools: Enable grouping logic for parsed events
This patch adds a functionality that allows to create event groups
based on the way they are specified on the command line. Adding
functionality to the '{}' group syntax introduced in earlier patch.
The current '--group/-g' option behaviour remains intact. If you
specify it for record/stat/top command, all the specified events
become members of a single group with the first event as a group
leader.
With the new '{}' group syntax you can create group like:
# perf record -e '{cycles,faults}' ls
resulting in single event group containing 'cycles' and 'faults'
events, with cycles event as group leader.
All groups are created with regards to threads and cpus. Thus
recording an event group within a 2 threads on server with
4 CPUs will create 8 separate groups.
Examples (first event in brackets is group leader):
# 1 group (cpu-clock,task-clock)
perf record --group -e cpu-clock,task-clock ls
perf record -e '{cpu-clock,task-clock}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock},{minor-faults,major-faults}' ls
# 1 group (cpu-clock,task-clock,minor-faults,major-faults)
perf record --group -e cpu-clock,task-clock -e minor-faults,major-faults ls
perf record -e '{cpu-clock,task-clock,minor-faults,major-faults}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock} -e '{minor-faults,major-faults}' \
-e instructions ls
# 1 group
# (cpu-clock,task-clock,minor-faults,major-faults,instructions)
perf record --group -e cpu-clock,task-clock \
-e minor-faults,major-faults -e instructions ls perf record -e
'{cpu-clock,task-clock,minor-faults,major-faults,instructions}' ls
It's possible to use standard event modifier for a group, which spans
over all events in the group and updates each event modifier settings,
for example:
# perf record -r '{faults:k,cache-references}:p'
resulting in ':kp' modifier being used for 'faults' and ':p' modifier
being used for 'cache-references' event.
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ulrich Drepper <drepper@gmail.com>
Link: http://lkml.kernel.org/n/tip-ho42u0wcr8mn1otkalqi13qp@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-08-08 18:22:36 +08:00
|
|
|
return __perf_evsel__open(evsel, cpus, threads);
|
2011-01-04 03:48:12 +08:00
|
|
|
}
|
|
|
|
|
2011-01-12 09:42:19 +08:00
|
|
|
int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
|
perf tools: Enable grouping logic for parsed events
This patch adds a functionality that allows to create event groups
based on the way they are specified on the command line. Adding
functionality to the '{}' group syntax introduced in earlier patch.
The current '--group/-g' option behaviour remains intact. If you
specify it for record/stat/top command, all the specified events
become members of a single group with the first event as a group
leader.
With the new '{}' group syntax you can create group like:
# perf record -e '{cycles,faults}' ls
resulting in single event group containing 'cycles' and 'faults'
events, with cycles event as group leader.
All groups are created with regards to threads and cpus. Thus
recording an event group within a 2 threads on server with
4 CPUs will create 8 separate groups.
Examples (first event in brackets is group leader):
# 1 group (cpu-clock,task-clock)
perf record --group -e cpu-clock,task-clock ls
perf record -e '{cpu-clock,task-clock}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock},{minor-faults,major-faults}' ls
# 1 group (cpu-clock,task-clock,minor-faults,major-faults)
perf record --group -e cpu-clock,task-clock -e minor-faults,major-faults ls
perf record -e '{cpu-clock,task-clock,minor-faults,major-faults}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock} -e '{minor-faults,major-faults}' \
-e instructions ls
# 1 group
# (cpu-clock,task-clock,minor-faults,major-faults,instructions)
perf record --group -e cpu-clock,task-clock \
-e minor-faults,major-faults -e instructions ls perf record -e
'{cpu-clock,task-clock,minor-faults,major-faults,instructions}' ls
It's possible to use standard event modifier for a group, which spans
over all events in the group and updates each event modifier settings,
for example:
# perf record -r '{faults:k,cache-references}:p'
resulting in ':kp' modifier being used for 'faults' and ':p' modifier
being used for 'cache-references' event.
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ulrich Drepper <drepper@gmail.com>
Link: http://lkml.kernel.org/n/tip-ho42u0wcr8mn1otkalqi13qp@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-08-08 18:22:36 +08:00
|
|
|
struct cpu_map *cpus)
|
2011-01-04 03:48:12 +08:00
|
|
|
{
|
perf tools: Enable grouping logic for parsed events
This patch adds a functionality that allows to create event groups
based on the way they are specified on the command line. Adding
functionality to the '{}' group syntax introduced in earlier patch.
The current '--group/-g' option behaviour remains intact. If you
specify it for record/stat/top command, all the specified events
become members of a single group with the first event as a group
leader.
With the new '{}' group syntax you can create group like:
# perf record -e '{cycles,faults}' ls
resulting in single event group containing 'cycles' and 'faults'
events, with cycles event as group leader.
All groups are created with regards to threads and cpus. Thus
recording an event group within a 2 threads on server with
4 CPUs will create 8 separate groups.
Examples (first event in brackets is group leader):
# 1 group (cpu-clock,task-clock)
perf record --group -e cpu-clock,task-clock ls
perf record -e '{cpu-clock,task-clock}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock},{minor-faults,major-faults}' ls
# 1 group (cpu-clock,task-clock,minor-faults,major-faults)
perf record --group -e cpu-clock,task-clock -e minor-faults,major-faults ls
perf record -e '{cpu-clock,task-clock,minor-faults,major-faults}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock} -e '{minor-faults,major-faults}' \
-e instructions ls
# 1 group
# (cpu-clock,task-clock,minor-faults,major-faults,instructions)
perf record --group -e cpu-clock,task-clock \
-e minor-faults,major-faults -e instructions ls perf record -e
'{cpu-clock,task-clock,minor-faults,major-faults,instructions}' ls
It's possible to use standard event modifier for a group, which spans
over all events in the group and updates each event modifier settings,
for example:
# perf record -r '{faults:k,cache-references}:p'
resulting in ':kp' modifier being used for 'faults' and ':p' modifier
being used for 'cache-references' event.
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ulrich Drepper <drepper@gmail.com>
Link: http://lkml.kernel.org/n/tip-ho42u0wcr8mn1otkalqi13qp@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-08-08 18:22:36 +08:00
|
|
|
return __perf_evsel__open(evsel, cpus, &empty_thread_map.map);
|
2011-01-04 21:55:27 +08:00
|
|
|
}
|
2011-01-04 03:48:12 +08:00
|
|
|
|
2011-01-12 09:42:19 +08:00
|
|
|
int perf_evsel__open_per_thread(struct perf_evsel *evsel,
|
perf tools: Enable grouping logic for parsed events
This patch adds a functionality that allows to create event groups
based on the way they are specified on the command line. Adding
functionality to the '{}' group syntax introduced in earlier patch.
The current '--group/-g' option behaviour remains intact. If you
specify it for record/stat/top command, all the specified events
become members of a single group with the first event as a group
leader.
With the new '{}' group syntax you can create group like:
# perf record -e '{cycles,faults}' ls
resulting in single event group containing 'cycles' and 'faults'
events, with cycles event as group leader.
All groups are created with regards to threads and cpus. Thus
recording an event group within a 2 threads on server with
4 CPUs will create 8 separate groups.
Examples (first event in brackets is group leader):
# 1 group (cpu-clock,task-clock)
perf record --group -e cpu-clock,task-clock ls
perf record -e '{cpu-clock,task-clock}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock},{minor-faults,major-faults}' ls
# 1 group (cpu-clock,task-clock,minor-faults,major-faults)
perf record --group -e cpu-clock,task-clock -e minor-faults,major-faults ls
perf record -e '{cpu-clock,task-clock,minor-faults,major-faults}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock} -e '{minor-faults,major-faults}' \
-e instructions ls
# 1 group
# (cpu-clock,task-clock,minor-faults,major-faults,instructions)
perf record --group -e cpu-clock,task-clock \
-e minor-faults,major-faults -e instructions ls perf record -e
'{cpu-clock,task-clock,minor-faults,major-faults,instructions}' ls
It's possible to use standard event modifier for a group, which spans
over all events in the group and updates each event modifier settings,
for example:
# perf record -r '{faults:k,cache-references}:p'
resulting in ':kp' modifier being used for 'faults' and ':p' modifier
being used for 'cache-references' event.
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ulrich Drepper <drepper@gmail.com>
Link: http://lkml.kernel.org/n/tip-ho42u0wcr8mn1otkalqi13qp@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-08-08 18:22:36 +08:00
|
|
|
struct thread_map *threads)
|
2011-01-04 21:55:27 +08:00
|
|
|
{
|
perf tools: Enable grouping logic for parsed events
This patch adds a functionality that allows to create event groups
based on the way they are specified on the command line. Adding
functionality to the '{}' group syntax introduced in earlier patch.
The current '--group/-g' option behaviour remains intact. If you
specify it for record/stat/top command, all the specified events
become members of a single group with the first event as a group
leader.
With the new '{}' group syntax you can create group like:
# perf record -e '{cycles,faults}' ls
resulting in single event group containing 'cycles' and 'faults'
events, with cycles event as group leader.
All groups are created with regards to threads and cpus. Thus
recording an event group within a 2 threads on server with
4 CPUs will create 8 separate groups.
Examples (first event in brackets is group leader):
# 1 group (cpu-clock,task-clock)
perf record --group -e cpu-clock,task-clock ls
perf record -e '{cpu-clock,task-clock}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock},{minor-faults,major-faults}' ls
# 1 group (cpu-clock,task-clock,minor-faults,major-faults)
perf record --group -e cpu-clock,task-clock -e minor-faults,major-faults ls
perf record -e '{cpu-clock,task-clock,minor-faults,major-faults}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock} -e '{minor-faults,major-faults}' \
-e instructions ls
# 1 group
# (cpu-clock,task-clock,minor-faults,major-faults,instructions)
perf record --group -e cpu-clock,task-clock \
-e minor-faults,major-faults -e instructions ls perf record -e
'{cpu-clock,task-clock,minor-faults,major-faults,instructions}' ls
It's possible to use standard event modifier for a group, which spans
over all events in the group and updates each event modifier settings,
for example:
# perf record -r '{faults:k,cache-references}:p'
resulting in ':kp' modifier being used for 'faults' and ':p' modifier
being used for 'cache-references' event.
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ulrich Drepper <drepper@gmail.com>
Link: http://lkml.kernel.org/n/tip-ho42u0wcr8mn1otkalqi13qp@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-08-08 18:22:36 +08:00
|
|
|
return __perf_evsel__open(evsel, &empty_cpu_map.map, threads);
|
2011-01-04 03:48:12 +08:00
|
|
|
}
|
2011-01-13 03:03:24 +08:00
|
|
|
|
2012-09-26 23:48:18 +08:00
|
|
|
static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel,
|
|
|
|
const union perf_event *event,
|
|
|
|
struct perf_sample *sample)
|
2011-01-21 23:46:41 +08:00
|
|
|
{
|
2012-09-26 23:48:18 +08:00
|
|
|
u64 type = evsel->attr.sample_type;
|
2011-01-21 23:46:41 +08:00
|
|
|
const u64 *array = event->sample.array;
|
2012-09-26 23:48:18 +08:00
|
|
|
bool swapped = evsel->needs_swap;
|
2012-05-30 20:23:44 +08:00
|
|
|
union u64_swap u;
|
2011-01-21 23:46:41 +08:00
|
|
|
|
|
|
|
array += ((event->header.size -
|
|
|
|
sizeof(event->header)) / sizeof(u64)) - 1;
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_CPU) {
|
2012-05-30 20:23:44 +08:00
|
|
|
u.val64 = *array;
|
|
|
|
if (swapped) {
|
|
|
|
/* undo swap of u64, then swap on individual u32s */
|
|
|
|
u.val64 = bswap_64(u.val64);
|
|
|
|
u.val32[0] = bswap_32(u.val32[0]);
|
|
|
|
}
|
|
|
|
|
|
|
|
sample->cpu = u.val32[0];
|
2011-01-21 23:46:41 +08:00
|
|
|
array--;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_STREAM_ID) {
|
|
|
|
sample->stream_id = *array;
|
|
|
|
array--;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_ID) {
|
|
|
|
sample->id = *array;
|
|
|
|
array--;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_TIME) {
|
|
|
|
sample->time = *array;
|
|
|
|
array--;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_TID) {
|
2012-05-30 20:23:44 +08:00
|
|
|
u.val64 = *array;
|
|
|
|
if (swapped) {
|
|
|
|
/* undo swap of u64, then swap on individual u32s */
|
|
|
|
u.val64 = bswap_64(u.val64);
|
|
|
|
u.val32[0] = bswap_32(u.val32[0]);
|
|
|
|
u.val32[1] = bswap_32(u.val32[1]);
|
|
|
|
}
|
|
|
|
|
|
|
|
sample->pid = u.val32[0];
|
|
|
|
sample->tid = u.val32[1];
|
2011-01-21 23:46:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-05-22 02:08:15 +08:00
|
|
|
static bool sample_overlap(const union perf_event *event,
|
|
|
|
const void *offset, u64 size)
|
|
|
|
{
|
|
|
|
const void *base = event;
|
|
|
|
|
|
|
|
if (offset + size > base + event->header.size)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2012-08-02 23:23:46 +08:00
|
|
|
int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
|
2012-09-26 23:48:18 +08:00
|
|
|
struct perf_sample *data)
|
2011-01-21 23:46:41 +08:00
|
|
|
{
|
2012-08-02 23:23:46 +08:00
|
|
|
u64 type = evsel->attr.sample_type;
|
2012-08-07 21:20:45 +08:00
|
|
|
u64 regs_user = evsel->attr.sample_regs_user;
|
2012-09-26 23:48:18 +08:00
|
|
|
bool swapped = evsel->needs_swap;
|
2011-01-21 23:46:41 +08:00
|
|
|
const u64 *array;
|
|
|
|
|
2011-09-06 23:12:26 +08:00
|
|
|
/*
|
|
|
|
* used for cross-endian analysis. See git commit 65014ab3
|
|
|
|
* for why this goofiness is needed.
|
|
|
|
*/
|
2012-05-16 14:59:04 +08:00
|
|
|
union u64_swap u;
|
2011-09-06 23:12:26 +08:00
|
|
|
|
2011-12-16 00:32:39 +08:00
|
|
|
memset(data, 0, sizeof(*data));
|
2011-01-21 23:46:41 +08:00
|
|
|
data->cpu = data->pid = data->tid = -1;
|
|
|
|
data->stream_id = data->id = data->time = -1ULL;
|
2012-02-04 01:01:13 +08:00
|
|
|
data->period = 1;
|
2011-01-21 23:46:41 +08:00
|
|
|
|
|
|
|
if (event->header.type != PERF_RECORD_SAMPLE) {
|
2012-08-02 23:23:46 +08:00
|
|
|
if (!evsel->attr.sample_id_all)
|
2011-01-21 23:46:41 +08:00
|
|
|
return 0;
|
2012-09-26 23:48:18 +08:00
|
|
|
return perf_evsel__parse_id_sample(evsel, event, data);
|
2011-01-21 23:46:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
array = event->sample.array;
|
|
|
|
|
2012-08-02 23:23:46 +08:00
|
|
|
if (evsel->sample_size + sizeof(event->header) > event->header.size)
|
2011-05-22 01:33:04 +08:00
|
|
|
return -EFAULT;
|
|
|
|
|
2011-01-21 23:46:41 +08:00
|
|
|
if (type & PERF_SAMPLE_IP) {
|
|
|
|
data->ip = event->ip.ip;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_TID) {
|
2011-09-06 23:12:26 +08:00
|
|
|
u.val64 = *array;
|
|
|
|
if (swapped) {
|
|
|
|
/* undo swap of u64, then swap on individual u32s */
|
|
|
|
u.val64 = bswap_64(u.val64);
|
|
|
|
u.val32[0] = bswap_32(u.val32[0]);
|
|
|
|
u.val32[1] = bswap_32(u.val32[1]);
|
|
|
|
}
|
|
|
|
|
|
|
|
data->pid = u.val32[0];
|
|
|
|
data->tid = u.val32[1];
|
2011-01-21 23:46:41 +08:00
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_TIME) {
|
|
|
|
data->time = *array;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
2011-05-31 03:08:23 +08:00
|
|
|
data->addr = 0;
|
2011-01-21 23:46:41 +08:00
|
|
|
if (type & PERF_SAMPLE_ADDR) {
|
|
|
|
data->addr = *array;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
data->id = -1ULL;
|
|
|
|
if (type & PERF_SAMPLE_ID) {
|
|
|
|
data->id = *array;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_STREAM_ID) {
|
|
|
|
data->stream_id = *array;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_CPU) {
|
2011-09-06 23:12:26 +08:00
|
|
|
|
|
|
|
u.val64 = *array;
|
|
|
|
if (swapped) {
|
|
|
|
/* undo swap of u64, then swap on individual u32s */
|
|
|
|
u.val64 = bswap_64(u.val64);
|
|
|
|
u.val32[0] = bswap_32(u.val32[0]);
|
|
|
|
}
|
|
|
|
|
|
|
|
data->cpu = u.val32[0];
|
2011-01-21 23:46:41 +08:00
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_PERIOD) {
|
|
|
|
data->period = *array;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_READ) {
|
2012-01-25 22:20:40 +08:00
|
|
|
fprintf(stderr, "PERF_SAMPLE_READ is unsupported for now\n");
|
2011-01-21 23:46:41 +08:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_CALLCHAIN) {
|
2011-05-22 02:08:15 +08:00
|
|
|
if (sample_overlap(event, array, sizeof(data->callchain->nr)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
2011-01-21 23:46:41 +08:00
|
|
|
data->callchain = (struct ip_callchain *)array;
|
2011-05-22 02:08:15 +08:00
|
|
|
|
|
|
|
if (sample_overlap(event, array, data->callchain->nr))
|
|
|
|
return -EFAULT;
|
|
|
|
|
2011-01-21 23:46:41 +08:00
|
|
|
array += 1 + data->callchain->nr;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_RAW) {
|
2011-09-29 23:05:08 +08:00
|
|
|
const u64 *pdata;
|
|
|
|
|
2011-09-06 23:12:26 +08:00
|
|
|
u.val64 = *array;
|
|
|
|
if (WARN_ONCE(swapped,
|
|
|
|
"Endianness of raw data not corrected!\n")) {
|
|
|
|
/* undo swap of u64, then swap on individual u32s */
|
|
|
|
u.val64 = bswap_64(u.val64);
|
|
|
|
u.val32[0] = bswap_32(u.val32[0]);
|
|
|
|
u.val32[1] = bswap_32(u.val32[1]);
|
|
|
|
}
|
2011-05-22 02:08:15 +08:00
|
|
|
|
|
|
|
if (sample_overlap(event, array, sizeof(u32)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
2011-09-06 23:12:26 +08:00
|
|
|
data->raw_size = u.val32[0];
|
2011-09-29 23:05:08 +08:00
|
|
|
pdata = (void *) array + sizeof(u32);
|
2011-05-22 02:08:15 +08:00
|
|
|
|
2011-09-29 23:05:08 +08:00
|
|
|
if (sample_overlap(event, pdata, data->raw_size))
|
2011-05-22 02:08:15 +08:00
|
|
|
return -EFAULT;
|
|
|
|
|
2011-09-29 23:05:08 +08:00
|
|
|
data->raw_data = (void *) pdata;
|
2012-03-18 06:23:18 +08:00
|
|
|
|
|
|
|
array = (void *)array + data->raw_size + sizeof(u32);
|
2011-01-21 23:46:41 +08:00
|
|
|
}
|
|
|
|
|
2012-02-10 06:21:01 +08:00
|
|
|
if (type & PERF_SAMPLE_BRANCH_STACK) {
|
|
|
|
u64 sz;
|
|
|
|
|
|
|
|
data->branch_stack = (struct branch_stack *)array;
|
|
|
|
array++; /* nr */
|
|
|
|
|
|
|
|
sz = data->branch_stack->nr * sizeof(struct branch_entry);
|
|
|
|
sz /= sizeof(u64);
|
|
|
|
array += sz;
|
|
|
|
}
|
2012-08-07 21:20:45 +08:00
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_REGS_USER) {
|
|
|
|
/* First u64 tells us if we have any regs in sample. */
|
|
|
|
u64 avail = *array++;
|
|
|
|
|
|
|
|
if (avail) {
|
|
|
|
data->user_regs.regs = (u64 *)array;
|
|
|
|
array += hweight_long(regs_user);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_STACK_USER) {
|
|
|
|
u64 size = *array++;
|
|
|
|
|
|
|
|
data->user_stack.offset = ((char *)(array - 1)
|
|
|
|
- (char *) event);
|
|
|
|
|
|
|
|
if (!size) {
|
|
|
|
data->user_stack.size = 0;
|
|
|
|
} else {
|
|
|
|
data->user_stack.data = (char *)array;
|
|
|
|
array += size / sizeof(*array);
|
|
|
|
data->user_stack.size = *array;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-01-21 23:46:41 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2011-11-28 17:03:31 +08:00
|
|
|
|
|
|
|
int perf_event__synthesize_sample(union perf_event *event, u64 type,
|
|
|
|
const struct perf_sample *sample,
|
|
|
|
bool swapped)
|
|
|
|
{
|
|
|
|
u64 *array;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* used for cross-endian analysis. See git commit 65014ab3
|
|
|
|
* for why this goofiness is needed.
|
|
|
|
*/
|
2012-05-16 14:59:04 +08:00
|
|
|
union u64_swap u;
|
2011-11-28 17:03:31 +08:00
|
|
|
|
|
|
|
array = event->sample.array;
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_IP) {
|
|
|
|
event->ip.ip = sample->ip;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_TID) {
|
|
|
|
u.val32[0] = sample->pid;
|
|
|
|
u.val32[1] = sample->tid;
|
|
|
|
if (swapped) {
|
|
|
|
/*
|
2012-08-02 23:23:46 +08:00
|
|
|
* Inverse of what is done in perf_evsel__parse_sample
|
2011-11-28 17:03:31 +08:00
|
|
|
*/
|
|
|
|
u.val32[0] = bswap_32(u.val32[0]);
|
|
|
|
u.val32[1] = bswap_32(u.val32[1]);
|
|
|
|
u.val64 = bswap_64(u.val64);
|
|
|
|
}
|
|
|
|
|
|
|
|
*array = u.val64;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_TIME) {
|
|
|
|
*array = sample->time;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_ADDR) {
|
|
|
|
*array = sample->addr;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_ID) {
|
|
|
|
*array = sample->id;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_STREAM_ID) {
|
|
|
|
*array = sample->stream_id;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_CPU) {
|
|
|
|
u.val32[0] = sample->cpu;
|
|
|
|
if (swapped) {
|
|
|
|
/*
|
2012-08-02 23:23:46 +08:00
|
|
|
* Inverse of what is done in perf_evsel__parse_sample
|
2011-11-28 17:03:31 +08:00
|
|
|
*/
|
|
|
|
u.val32[0] = bswap_32(u.val32[0]);
|
|
|
|
u.val64 = bswap_64(u.val64);
|
|
|
|
}
|
|
|
|
*array = u.val64;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type & PERF_SAMPLE_PERIOD) {
|
|
|
|
*array = sample->period;
|
|
|
|
array++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2012-09-12 06:24:23 +08:00
|
|
|
|
2012-09-18 22:21:50 +08:00
|
|
|
struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name)
|
|
|
|
{
|
|
|
|
return pevent_find_field(evsel->tp_format, name);
|
|
|
|
}
|
|
|
|
|
2012-09-12 06:24:23 +08:00
|
|
|
char *perf_evsel__strval(struct perf_evsel *evsel, struct perf_sample *sample,
|
|
|
|
const char *name)
|
|
|
|
{
|
2012-09-18 22:21:50 +08:00
|
|
|
struct format_field *field = perf_evsel__field(evsel, name);
|
2012-09-12 06:24:23 +08:00
|
|
|
int offset;
|
|
|
|
|
2012-09-18 22:21:50 +08:00
|
|
|
if (!field)
|
|
|
|
return NULL;
|
2012-09-12 06:24:23 +08:00
|
|
|
|
|
|
|
offset = field->offset;
|
|
|
|
|
|
|
|
if (field->flags & FIELD_IS_DYNAMIC) {
|
|
|
|
offset = *(int *)(sample->raw_data + field->offset);
|
|
|
|
offset &= 0xffff;
|
|
|
|
}
|
|
|
|
|
|
|
|
return sample->raw_data + offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
|
|
|
|
const char *name)
|
|
|
|
{
|
2012-09-18 22:21:50 +08:00
|
|
|
struct format_field *field = perf_evsel__field(evsel, name);
|
2012-09-12 06:24:23 +08:00
|
|
|
u64 val;
|
|
|
|
|
2012-09-18 22:21:50 +08:00
|
|
|
if (!field)
|
|
|
|
return 0;
|
2012-09-12 06:24:23 +08:00
|
|
|
|
|
|
|
val = pevent_read_number(evsel->tp_format->pevent,
|
|
|
|
sample->raw_data + field->offset, field->size);
|
|
|
|
return val;
|
|
|
|
|
|
|
|
}
|