2012-11-10 08:46:44 +08:00
|
|
|
#include "evlist.h"
|
|
|
|
#include "evsel.h"
|
|
|
|
#include "thread_map.h"
|
|
|
|
#include "cpumap.h"
|
|
|
|
#include "tests.h"
|
2015-09-07 16:38:06 +08:00
|
|
|
#include <linux/err.h>
|
2012-11-10 08:46:44 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This test will generate random numbers of calls to some getpid syscalls,
|
|
|
|
* then establish an mmap for a group of events that are created to monitor
|
|
|
|
* the syscalls.
|
|
|
|
*
|
|
|
|
* It will receive the events, using mmap, use its PERF_SAMPLE_ID generated
|
|
|
|
* sample.id field to map back to its respective perf_evsel instance.
|
|
|
|
*
|
|
|
|
* Then it checks if the number of syscalls reported as perf events by
|
|
|
|
* the kernel corresponds to the number of syscalls made.
|
|
|
|
*/
|
perf tests: Pass the subtest index to each test routine
Some tests have sub-tests we want to run, so allow passing this.
Wang tried to avoid having to touch all tests, but then, having the
test.func in an anonymous union makes the build fail on older compilers,
like the one in RHEL6, where:
test a = {
.func = foo,
};
fails.
To fix it leave the func pointer in the main structure and pass the subtest
index to all tests, end result function is the same, but we have just one
function pointer, not two, with and without the subtest index as an argument.
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Wang Nan <wangnan0@huawei.com>
Link: http://lkml.kernel.org/n/tip-5genj0ficwdmelpoqlds0u4y@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-11-19 23:01:48 +08:00
|
|
|
int test__basic_mmap(int subtest __maybe_unused)
|
2012-11-10 08:46:44 +08:00
|
|
|
{
|
|
|
|
int err = -1;
|
|
|
|
union perf_event *event;
|
|
|
|
struct thread_map *threads;
|
|
|
|
struct cpu_map *cpus;
|
|
|
|
struct perf_evlist *evlist;
|
|
|
|
cpu_set_t cpu_set;
|
2015-04-16 21:52:55 +08:00
|
|
|
const char *syscall_names[] = { "getsid", "getppid", "getpgid", };
|
|
|
|
pid_t (*syscalls[])(void) = { (void *)getsid, getppid, (void*)getpgid };
|
2012-11-10 08:46:44 +08:00
|
|
|
#define nsyscalls ARRAY_SIZE(syscall_names)
|
|
|
|
unsigned int nr_events[nsyscalls],
|
|
|
|
expected_nr_events[nsyscalls], i, j;
|
|
|
|
struct perf_evsel *evsels[nsyscalls], *evsel;
|
2014-08-14 10:22:45 +08:00
|
|
|
char sbuf[STRERR_BUFSIZE];
|
2012-11-10 08:46:44 +08:00
|
|
|
|
|
|
|
threads = thread_map__new(-1, getpid(), UINT_MAX);
|
|
|
|
if (threads == NULL) {
|
|
|
|
pr_debug("thread_map__new\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
cpus = cpu_map__new(NULL);
|
|
|
|
if (cpus == NULL) {
|
|
|
|
pr_debug("cpu_map__new\n");
|
|
|
|
goto out_free_threads;
|
|
|
|
}
|
|
|
|
|
|
|
|
CPU_ZERO(&cpu_set);
|
|
|
|
CPU_SET(cpus->map[0], &cpu_set);
|
|
|
|
sched_setaffinity(0, sizeof(cpu_set), &cpu_set);
|
|
|
|
if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
|
|
|
|
pr_debug("sched_setaffinity() failed on CPU %d: %s ",
|
2014-08-14 10:22:45 +08:00
|
|
|
cpus->map[0], strerror_r(errno, sbuf, sizeof(sbuf)));
|
2012-11-10 08:46:44 +08:00
|
|
|
goto out_free_cpus;
|
|
|
|
}
|
|
|
|
|
2013-03-11 15:43:12 +08:00
|
|
|
evlist = perf_evlist__new();
|
2012-11-10 08:46:44 +08:00
|
|
|
if (evlist == NULL) {
|
|
|
|
pr_debug("perf_evlist__new\n");
|
|
|
|
goto out_free_cpus;
|
|
|
|
}
|
|
|
|
|
2013-03-11 15:43:12 +08:00
|
|
|
perf_evlist__set_maps(evlist, cpus, threads);
|
|
|
|
|
2012-11-10 08:46:44 +08:00
|
|
|
for (i = 0; i < nsyscalls; ++i) {
|
2012-12-11 02:11:43 +08:00
|
|
|
char name[64];
|
|
|
|
|
|
|
|
snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]);
|
2013-11-08 03:41:19 +08:00
|
|
|
evsels[i] = perf_evsel__newtp("syscalls", name);
|
2015-09-07 16:38:06 +08:00
|
|
|
if (IS_ERR(evsels[i])) {
|
2012-11-10 08:46:44 +08:00
|
|
|
pr_debug("perf_evsel__new\n");
|
2014-01-04 03:54:12 +08:00
|
|
|
goto out_delete_evlist;
|
2012-11-10 08:46:44 +08:00
|
|
|
}
|
|
|
|
|
2012-12-11 02:11:43 +08:00
|
|
|
evsels[i]->attr.wakeup_events = 1;
|
2013-08-27 16:23:09 +08:00
|
|
|
perf_evsel__set_sample_id(evsels[i], false);
|
2012-12-11 02:11:43 +08:00
|
|
|
|
2012-11-10 08:46:44 +08:00
|
|
|
perf_evlist__add(evlist, evsels[i]);
|
|
|
|
|
|
|
|
if (perf_evsel__open(evsels[i], cpus, threads) < 0) {
|
|
|
|
pr_debug("failed to open counter: %s, "
|
|
|
|
"tweak /proc/sys/kernel/perf_event_paranoid?\n",
|
2014-08-14 10:22:45 +08:00
|
|
|
strerror_r(errno, sbuf, sizeof(sbuf)));
|
2014-01-04 03:54:12 +08:00
|
|
|
goto out_delete_evlist;
|
2012-11-10 08:46:44 +08:00
|
|
|
}
|
2012-12-11 02:11:43 +08:00
|
|
|
|
|
|
|
nr_events[i] = 0;
|
|
|
|
expected_nr_events[i] = 1 + rand() % 127;
|
2012-11-10 08:46:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (perf_evlist__mmap(evlist, 128, true) < 0) {
|
|
|
|
pr_debug("failed to mmap events: %d (%s)\n", errno,
|
2014-08-14 10:22:45 +08:00
|
|
|
strerror_r(errno, sbuf, sizeof(sbuf)));
|
2014-01-04 03:54:12 +08:00
|
|
|
goto out_delete_evlist;
|
2012-11-10 08:46:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < nsyscalls; ++i)
|
|
|
|
for (j = 0; j < expected_nr_events[i]; ++j) {
|
|
|
|
int foo = syscalls[i]();
|
|
|
|
++foo;
|
|
|
|
}
|
|
|
|
|
|
|
|
while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) {
|
|
|
|
struct perf_sample sample;
|
|
|
|
|
|
|
|
if (event->header.type != PERF_RECORD_SAMPLE) {
|
|
|
|
pr_debug("unexpected %s event\n",
|
|
|
|
perf_event__name(event->header.type));
|
2014-01-04 04:25:49 +08:00
|
|
|
goto out_delete_evlist;
|
2012-11-10 08:46:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
err = perf_evlist__parse_sample(evlist, event, &sample);
|
|
|
|
if (err) {
|
|
|
|
pr_err("Can't parse sample, err = %d\n", err);
|
2014-01-04 04:25:49 +08:00
|
|
|
goto out_delete_evlist;
|
2012-11-10 08:46:44 +08:00
|
|
|
}
|
|
|
|
|
2012-12-11 01:58:42 +08:00
|
|
|
err = -1;
|
2012-11-10 08:46:44 +08:00
|
|
|
evsel = perf_evlist__id2evsel(evlist, sample.id);
|
|
|
|
if (evsel == NULL) {
|
|
|
|
pr_debug("event with id %" PRIu64
|
|
|
|
" doesn't map to an evsel\n", sample.id);
|
2014-01-04 04:25:49 +08:00
|
|
|
goto out_delete_evlist;
|
2012-11-10 08:46:44 +08:00
|
|
|
}
|
|
|
|
nr_events[evsel->idx]++;
|
2013-10-24 15:43:33 +08:00
|
|
|
perf_evlist__mmap_consume(evlist, 0);
|
2012-11-10 08:46:44 +08:00
|
|
|
}
|
|
|
|
|
2012-12-11 01:58:42 +08:00
|
|
|
err = 0;
|
2016-06-23 22:26:15 +08:00
|
|
|
evlist__for_each_entry(evlist, evsel) {
|
2012-11-10 08:46:44 +08:00
|
|
|
if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) {
|
|
|
|
pr_debug("expected %d %s events, got %d\n",
|
|
|
|
expected_nr_events[evsel->idx],
|
|
|
|
perf_evsel__name(evsel), nr_events[evsel->idx]);
|
2012-12-11 01:58:42 +08:00
|
|
|
err = -1;
|
2014-01-04 04:25:49 +08:00
|
|
|
goto out_delete_evlist;
|
2012-11-10 08:46:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-04 03:54:12 +08:00
|
|
|
out_delete_evlist:
|
2012-11-10 08:46:44 +08:00
|
|
|
perf_evlist__delete(evlist);
|
2014-01-04 02:56:06 +08:00
|
|
|
cpus = NULL;
|
|
|
|
threads = NULL;
|
2012-11-10 08:46:44 +08:00
|
|
|
out_free_cpus:
|
2015-06-23 06:36:04 +08:00
|
|
|
cpu_map__put(cpus);
|
2012-11-10 08:46:44 +08:00
|
|
|
out_free_threads:
|
2015-06-23 06:36:05 +08:00
|
|
|
thread_map__put(threads);
|
2012-11-10 08:46:44 +08:00
|
|
|
return err;
|
|
|
|
}
|