libperf: Add threads to struct perf_evlist
Move threads from tools/perf's evlist to libperf's perf_evlist struct. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Budankov <alexey.budankov@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Michael Petlan <mpetlan@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20190721112506.12306-56-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
parent
f72f901d90
commit
03617c22e3
|
@ -156,9 +156,9 @@ static int set_tracing_pid(struct perf_ftrace *ftrace)
|
||||||
if (target__has_cpu(&ftrace->target))
|
if (target__has_cpu(&ftrace->target))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
for (i = 0; i < thread_map__nr(ftrace->evlist->threads); i++) {
|
for (i = 0; i < thread_map__nr(ftrace->evlist->core.threads); i++) {
|
||||||
scnprintf(buf, sizeof(buf), "%d",
|
scnprintf(buf, sizeof(buf), "%d",
|
||||||
ftrace->evlist->threads->map[i]);
|
ftrace->evlist->core.threads->map[i]);
|
||||||
if (append_tracing_file("set_ftrace_pid", buf) < 0)
|
if (append_tracing_file("set_ftrace_pid", buf) < 0)
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1450,7 +1450,7 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
|
||||||
perf_session__set_id_hdr_size(kvm->session);
|
perf_session__set_id_hdr_size(kvm->session);
|
||||||
ordered_events__set_copy_on_queue(&kvm->session->ordered_events, true);
|
ordered_events__set_copy_on_queue(&kvm->session->ordered_events, true);
|
||||||
machine__synthesize_threads(&kvm->session->machines.host, &kvm->opts.target,
|
machine__synthesize_threads(&kvm->session->machines.host, &kvm->opts.target,
|
||||||
kvm->evlist->threads, false, 1);
|
kvm->evlist->core.threads, false, 1);
|
||||||
err = kvm_live_open_events(kvm);
|
err = kvm_live_open_events(kvm);
|
||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
|
@ -1275,7 +1275,7 @@ static int record__synthesize(struct record *rec, bool tail)
|
||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->threads,
|
err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->core.threads,
|
||||||
process_synthesized_event,
|
process_synthesized_event,
|
||||||
NULL);
|
NULL);
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
|
@ -1295,7 +1295,7 @@ static int record__synthesize(struct record *rec, bool tail)
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
pr_warning("Couldn't synthesize bpf events.\n");
|
pr_warning("Couldn't synthesize bpf events.\n");
|
||||||
|
|
||||||
err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
|
err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->core.threads,
|
||||||
process_synthesized_event, opts->sample_address,
|
process_synthesized_event, opts->sample_address,
|
||||||
1);
|
1);
|
||||||
out:
|
out:
|
||||||
|
|
|
@ -263,7 +263,7 @@ static int read_single_counter(struct evsel *counter, int cpu,
|
||||||
*/
|
*/
|
||||||
static int read_counter(struct evsel *counter, struct timespec *rs)
|
static int read_counter(struct evsel *counter, struct timespec *rs)
|
||||||
{
|
{
|
||||||
int nthreads = thread_map__nr(evsel_list->threads);
|
int nthreads = thread_map__nr(evsel_list->core.threads);
|
||||||
int ncpus, cpu, thread;
|
int ncpus, cpu, thread;
|
||||||
|
|
||||||
if (target__has_cpu(&target) && !target__has_per_thread(&target))
|
if (target__has_cpu(&target) && !target__has_per_thread(&target))
|
||||||
|
@ -485,15 +485,15 @@ try_again:
|
||||||
ui__warning("%s\n", msg);
|
ui__warning("%s\n", msg);
|
||||||
goto try_again;
|
goto try_again;
|
||||||
} else if (target__has_per_thread(&target) &&
|
} else if (target__has_per_thread(&target) &&
|
||||||
evsel_list->threads &&
|
evsel_list->core.threads &&
|
||||||
evsel_list->threads->err_thread != -1) {
|
evsel_list->core.threads->err_thread != -1) {
|
||||||
/*
|
/*
|
||||||
* For global --per-thread case, skip current
|
* For global --per-thread case, skip current
|
||||||
* error thread.
|
* error thread.
|
||||||
*/
|
*/
|
||||||
if (!thread_map__remove(evsel_list->threads,
|
if (!thread_map__remove(evsel_list->core.threads,
|
||||||
evsel_list->threads->err_thread)) {
|
evsel_list->core.threads->err_thread)) {
|
||||||
evsel_list->threads->err_thread = -1;
|
evsel_list->core.threads->err_thread = -1;
|
||||||
goto try_again;
|
goto try_again;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -579,7 +579,7 @@ try_again:
|
||||||
enable_counters();
|
enable_counters();
|
||||||
while (!done) {
|
while (!done) {
|
||||||
nanosleep(&ts, NULL);
|
nanosleep(&ts, NULL);
|
||||||
if (!is_target_alive(&target, evsel_list->threads))
|
if (!is_target_alive(&target, evsel_list->core.threads))
|
||||||
break;
|
break;
|
||||||
if (timeout)
|
if (timeout)
|
||||||
break;
|
break;
|
||||||
|
@ -1889,10 +1889,10 @@ int cmd_stat(int argc, const char **argv)
|
||||||
* so we could print it out on output.
|
* so we could print it out on output.
|
||||||
*/
|
*/
|
||||||
if (stat_config.aggr_mode == AGGR_THREAD) {
|
if (stat_config.aggr_mode == AGGR_THREAD) {
|
||||||
thread_map__read_comms(evsel_list->threads);
|
thread_map__read_comms(evsel_list->core.threads);
|
||||||
if (target.system_wide) {
|
if (target.system_wide) {
|
||||||
if (runtime_stat_new(&stat_config,
|
if (runtime_stat_new(&stat_config,
|
||||||
thread_map__nr(evsel_list->threads))) {
|
thread_map__nr(evsel_list->core.threads))) {
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -990,7 +990,7 @@ static int perf_top__start_counters(struct perf_top *top)
|
||||||
evlist__for_each_entry(evlist, counter) {
|
evlist__for_each_entry(evlist, counter) {
|
||||||
try_again:
|
try_again:
|
||||||
if (evsel__open(counter, top->evlist->core.cpus,
|
if (evsel__open(counter, top->evlist->core.cpus,
|
||||||
top->evlist->threads) < 0) {
|
top->evlist->core.threads) < 0) {
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Specially handle overwrite fall back.
|
* Specially handle overwrite fall back.
|
||||||
|
@ -1222,7 +1222,7 @@ static int __cmd_top(struct perf_top *top)
|
||||||
pr_debug("Couldn't synthesize BPF events: Pre-existing BPF programs won't have symbols resolved.\n");
|
pr_debug("Couldn't synthesize BPF events: Pre-existing BPF programs won't have symbols resolved.\n");
|
||||||
|
|
||||||
machine__synthesize_threads(&top->session->machines.host, &opts->target,
|
machine__synthesize_threads(&top->session->machines.host, &opts->target,
|
||||||
top->evlist->threads, false,
|
top->evlist->core.threads, false,
|
||||||
top->nr_threads_synthesize);
|
top->nr_threads_synthesize);
|
||||||
|
|
||||||
if (top->nr_threads_synthesize > 1)
|
if (top->nr_threads_synthesize > 1)
|
||||||
|
|
|
@ -1404,7 +1404,7 @@ static int trace__symbols_init(struct trace *trace, struct evlist *evlist)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
|
err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
|
||||||
evlist->threads, trace__tool_process, false,
|
evlist->core.threads, trace__tool_process, false,
|
||||||
1);
|
1);
|
||||||
out:
|
out:
|
||||||
if (err)
|
if (err)
|
||||||
|
@ -3183,7 +3183,7 @@ static int trace__set_filter_pids(struct trace *trace)
|
||||||
err = bpf_map__set_filter_pids(trace->filter_pids.map, trace->filter_pids.nr,
|
err = bpf_map__set_filter_pids(trace->filter_pids.map, trace->filter_pids.nr,
|
||||||
trace->filter_pids.entries);
|
trace->filter_pids.entries);
|
||||||
}
|
}
|
||||||
} else if (thread_map__pid(trace->evlist->threads, 0) == -1) {
|
} else if (thread_map__pid(trace->evlist->core.threads, 0) == -1) {
|
||||||
err = trace__set_filter_loop_pids(trace);
|
err = trace__set_filter_loop_pids(trace);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3412,8 +3412,8 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
|
||||||
evlist__enable(evlist);
|
evlist__enable(evlist);
|
||||||
}
|
}
|
||||||
|
|
||||||
trace->multiple_threads = thread_map__pid(evlist->threads, 0) == -1 ||
|
trace->multiple_threads = thread_map__pid(evlist->core.threads, 0) == -1 ||
|
||||||
evlist->threads->nr > 1 ||
|
evlist->core.threads->nr > 1 ||
|
||||||
perf_evlist__first(evlist)->core.attr.inherit;
|
perf_evlist__first(evlist)->core.attr.inherit;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -3,12 +3,14 @@
|
||||||
#define __LIBPERF_INTERNAL_EVLIST_H
|
#define __LIBPERF_INTERNAL_EVLIST_H
|
||||||
|
|
||||||
struct perf_cpu_map;
|
struct perf_cpu_map;
|
||||||
|
struct perf_thread_map;
|
||||||
|
|
||||||
struct perf_evlist {
|
struct perf_evlist {
|
||||||
struct list_head entries;
|
struct list_head entries;
|
||||||
int nr_entries;
|
int nr_entries;
|
||||||
bool has_user_cpus;
|
bool has_user_cpus;
|
||||||
struct perf_cpu_map *cpus;
|
struct perf_cpu_map *cpus;
|
||||||
|
struct perf_thread_map *threads;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* __LIBPERF_INTERNAL_EVLIST_H */
|
#endif /* __LIBPERF_INTERNAL_EVLIST_H */
|
||||||
|
|
|
@ -58,7 +58,7 @@ int test__syscall_openat_tp_fields(struct test *test __maybe_unused, int subtest
|
||||||
|
|
||||||
perf_evsel__config(evsel, &opts, NULL);
|
perf_evsel__config(evsel, &opts, NULL);
|
||||||
|
|
||||||
perf_thread_map__set_pid(evlist->threads, 0, getpid());
|
perf_thread_map__set_pid(evlist->core.threads, 0, getpid());
|
||||||
|
|
||||||
err = evlist__open(evlist);
|
err = evlist__open(evlist);
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
|
|
|
@ -131,13 +131,13 @@ void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
|
||||||
|
|
||||||
if (per_cpu) {
|
if (per_cpu) {
|
||||||
mp->cpu = evlist->core.cpus->map[idx];
|
mp->cpu = evlist->core.cpus->map[idx];
|
||||||
if (evlist->threads)
|
if (evlist->core.threads)
|
||||||
mp->tid = thread_map__pid(evlist->threads, 0);
|
mp->tid = thread_map__pid(evlist->core.threads, 0);
|
||||||
else
|
else
|
||||||
mp->tid = -1;
|
mp->tid = -1;
|
||||||
} else {
|
} else {
|
||||||
mp->cpu = -1;
|
mp->cpu = -1;
|
||||||
mp->tid = thread_map__pid(evlist->threads, idx);
|
mp->tid = thread_map__pid(evlist->core.threads, idx);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -144,9 +144,9 @@ void evlist__delete(struct evlist *evlist)
|
||||||
perf_evlist__munmap(evlist);
|
perf_evlist__munmap(evlist);
|
||||||
evlist__close(evlist);
|
evlist__close(evlist);
|
||||||
perf_cpu_map__put(evlist->core.cpus);
|
perf_cpu_map__put(evlist->core.cpus);
|
||||||
perf_thread_map__put(evlist->threads);
|
perf_thread_map__put(evlist->core.threads);
|
||||||
evlist->core.cpus = NULL;
|
evlist->core.cpus = NULL;
|
||||||
evlist->threads = NULL;
|
evlist->core.threads = NULL;
|
||||||
perf_evlist__purge(evlist);
|
perf_evlist__purge(evlist);
|
||||||
perf_evlist__exit(evlist);
|
perf_evlist__exit(evlist);
|
||||||
free(evlist);
|
free(evlist);
|
||||||
|
@ -168,7 +168,7 @@ static void __perf_evlist__propagate_maps(struct evlist *evlist,
|
||||||
}
|
}
|
||||||
|
|
||||||
perf_thread_map__put(evsel->core.threads);
|
perf_thread_map__put(evsel->core.threads);
|
||||||
evsel->core.threads = perf_thread_map__get(evlist->threads);
|
evsel->core.threads = perf_thread_map__get(evlist->core.threads);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void perf_evlist__propagate_maps(struct evlist *evlist)
|
static void perf_evlist__propagate_maps(struct evlist *evlist)
|
||||||
|
@ -342,7 +342,7 @@ static int perf_evlist__nr_threads(struct evlist *evlist,
|
||||||
if (evsel->system_wide)
|
if (evsel->system_wide)
|
||||||
return 1;
|
return 1;
|
||||||
else
|
else
|
||||||
return thread_map__nr(evlist->threads);
|
return thread_map__nr(evlist->core.threads);
|
||||||
}
|
}
|
||||||
|
|
||||||
void evlist__disable(struct evlist *evlist)
|
void evlist__disable(struct evlist *evlist)
|
||||||
|
@ -425,7 +425,7 @@ int perf_evlist__enable_event_idx(struct evlist *evlist,
|
||||||
int perf_evlist__alloc_pollfd(struct evlist *evlist)
|
int perf_evlist__alloc_pollfd(struct evlist *evlist)
|
||||||
{
|
{
|
||||||
int nr_cpus = cpu_map__nr(evlist->core.cpus);
|
int nr_cpus = cpu_map__nr(evlist->core.cpus);
|
||||||
int nr_threads = thread_map__nr(evlist->threads);
|
int nr_threads = thread_map__nr(evlist->core.threads);
|
||||||
int nfds = 0;
|
int nfds = 0;
|
||||||
struct evsel *evsel;
|
struct evsel *evsel;
|
||||||
|
|
||||||
|
@ -556,8 +556,8 @@ static void perf_evlist__set_sid_idx(struct evlist *evlist,
|
||||||
sid->cpu = evlist->core.cpus->map[cpu];
|
sid->cpu = evlist->core.cpus->map[cpu];
|
||||||
else
|
else
|
||||||
sid->cpu = -1;
|
sid->cpu = -1;
|
||||||
if (!evsel->system_wide && evlist->threads && thread >= 0)
|
if (!evsel->system_wide && evlist->core.threads && thread >= 0)
|
||||||
sid->tid = thread_map__pid(evlist->threads, thread);
|
sid->tid = thread_map__pid(evlist->core.threads, thread);
|
||||||
else
|
else
|
||||||
sid->tid = -1;
|
sid->tid = -1;
|
||||||
}
|
}
|
||||||
|
@ -722,7 +722,7 @@ static struct perf_mmap *perf_evlist__alloc_mmap(struct evlist *evlist,
|
||||||
|
|
||||||
evlist->nr_mmaps = cpu_map__nr(evlist->core.cpus);
|
evlist->nr_mmaps = cpu_map__nr(evlist->core.cpus);
|
||||||
if (cpu_map__empty(evlist->core.cpus))
|
if (cpu_map__empty(evlist->core.cpus))
|
||||||
evlist->nr_mmaps = thread_map__nr(evlist->threads);
|
evlist->nr_mmaps = thread_map__nr(evlist->core.threads);
|
||||||
map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
|
map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
|
||||||
if (!map)
|
if (!map)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -836,7 +836,7 @@ static int perf_evlist__mmap_per_cpu(struct evlist *evlist,
|
||||||
{
|
{
|
||||||
int cpu, thread;
|
int cpu, thread;
|
||||||
int nr_cpus = cpu_map__nr(evlist->core.cpus);
|
int nr_cpus = cpu_map__nr(evlist->core.cpus);
|
||||||
int nr_threads = thread_map__nr(evlist->threads);
|
int nr_threads = thread_map__nr(evlist->core.threads);
|
||||||
|
|
||||||
pr_debug2("perf event ring buffer mmapped per cpu\n");
|
pr_debug2("perf event ring buffer mmapped per cpu\n");
|
||||||
for (cpu = 0; cpu < nr_cpus; cpu++) {
|
for (cpu = 0; cpu < nr_cpus; cpu++) {
|
||||||
|
@ -864,7 +864,7 @@ static int perf_evlist__mmap_per_thread(struct evlist *evlist,
|
||||||
struct mmap_params *mp)
|
struct mmap_params *mp)
|
||||||
{
|
{
|
||||||
int thread;
|
int thread;
|
||||||
int nr_threads = thread_map__nr(evlist->threads);
|
int nr_threads = thread_map__nr(evlist->core.threads);
|
||||||
|
|
||||||
pr_debug2("perf event ring buffer mmapped per thread\n");
|
pr_debug2("perf event ring buffer mmapped per thread\n");
|
||||||
for (thread = 0; thread < nr_threads; thread++) {
|
for (thread = 0; thread < nr_threads; thread++) {
|
||||||
|
@ -1015,7 +1015,7 @@ int perf_evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
|
||||||
{
|
{
|
||||||
struct evsel *evsel;
|
struct evsel *evsel;
|
||||||
const struct perf_cpu_map *cpus = evlist->core.cpus;
|
const struct perf_cpu_map *cpus = evlist->core.cpus;
|
||||||
const struct perf_thread_map *threads = evlist->threads;
|
const struct perf_thread_map *threads = evlist->core.threads;
|
||||||
/*
|
/*
|
||||||
* Delay setting mp.prot: set it before calling perf_mmap__mmap.
|
* Delay setting mp.prot: set it before calling perf_mmap__mmap.
|
||||||
* Its value is decided by evsel's write_backward.
|
* Its value is decided by evsel's write_backward.
|
||||||
|
@ -1121,9 +1121,9 @@ void perf_evlist__set_maps(struct evlist *evlist, struct perf_cpu_map *cpus,
|
||||||
evlist->core.cpus = perf_cpu_map__get(cpus);
|
evlist->core.cpus = perf_cpu_map__get(cpus);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (threads != evlist->threads) {
|
if (threads != evlist->core.threads) {
|
||||||
perf_thread_map__put(evlist->threads);
|
perf_thread_map__put(evlist->core.threads);
|
||||||
evlist->threads = perf_thread_map__get(threads);
|
evlist->core.threads = perf_thread_map__get(threads);
|
||||||
}
|
}
|
||||||
|
|
||||||
perf_evlist__propagate_maps(evlist);
|
perf_evlist__propagate_maps(evlist);
|
||||||
|
@ -1398,7 +1398,7 @@ int evlist__open(struct evlist *evlist)
|
||||||
* Default: one fd per CPU, all threads, aka systemwide
|
* Default: one fd per CPU, all threads, aka systemwide
|
||||||
* as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
|
* as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
|
||||||
*/
|
*/
|
||||||
if (evlist->threads == NULL && evlist->core.cpus == NULL) {
|
if (evlist->core.threads == NULL && evlist->core.cpus == NULL) {
|
||||||
err = perf_evlist__create_syswide_maps(evlist);
|
err = perf_evlist__create_syswide_maps(evlist);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
goto out_err;
|
goto out_err;
|
||||||
|
@ -1501,12 +1501,12 @@ int perf_evlist__prepare_workload(struct evlist *evlist, struct target *target,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (target__none(target)) {
|
if (target__none(target)) {
|
||||||
if (evlist->threads == NULL) {
|
if (evlist->core.threads == NULL) {
|
||||||
fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n",
|
fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n",
|
||||||
__func__, __LINE__);
|
__func__, __LINE__);
|
||||||
goto out_close_pipes;
|
goto out_close_pipes;
|
||||||
}
|
}
|
||||||
perf_thread_map__set_pid(evlist->threads, 0, evlist->workload.pid);
|
perf_thread_map__set_pid(evlist->core.threads, 0, evlist->workload.pid);
|
||||||
}
|
}
|
||||||
|
|
||||||
close(child_ready_pipe[1]);
|
close(child_ready_pipe[1]);
|
||||||
|
@ -1921,7 +1921,7 @@ int perf_evlist__start_sb_thread(struct evlist *evlist,
|
||||||
|
|
||||||
evlist__for_each_entry(evlist, counter) {
|
evlist__for_each_entry(evlist, counter) {
|
||||||
if (evsel__open(counter, evlist->core.cpus,
|
if (evsel__open(counter, evlist->core.cpus,
|
||||||
evlist->threads) < 0)
|
evlist->core.threads) < 0)
|
||||||
goto out_delete_evlist;
|
goto out_delete_evlist;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -43,7 +43,6 @@ struct evlist {
|
||||||
struct fdarray pollfd;
|
struct fdarray pollfd;
|
||||||
struct perf_mmap *mmap;
|
struct perf_mmap *mmap;
|
||||||
struct perf_mmap *overwrite_mmap;
|
struct perf_mmap *overwrite_mmap;
|
||||||
struct perf_thread_map *threads;
|
|
||||||
struct evsel *selected;
|
struct evsel *selected;
|
||||||
struct events_stats stats;
|
struct events_stats stats;
|
||||||
struct perf_env *env;
|
struct perf_env *env;
|
||||||
|
|
|
@ -507,7 +507,7 @@ int perf_stat_synthesize_config(struct perf_stat_config *config,
|
||||||
err = perf_event__synthesize_extra_attr(tool, evlist, process,
|
err = perf_event__synthesize_extra_attr(tool, evlist, process,
|
||||||
attrs);
|
attrs);
|
||||||
|
|
||||||
err = perf_event__synthesize_thread_map2(tool, evlist->threads,
|
err = perf_event__synthesize_thread_map2(tool, evlist->core.threads,
|
||||||
process, NULL);
|
process, NULL);
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
pr_err("Couldn't synthesize thread map.\n");
|
pr_err("Couldn't synthesize thread map.\n");
|
||||||
|
|
Loading…
Reference in New Issue