diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index d152ab04a209..88f7b4241153 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -4198,11 +4198,7 @@ int cmd_record(int argc, const char **argv) /* Enable ignoring missing threads when -u/-p option is defined. */ rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid; - if (evlist__fix_hybrid_cpus(rec->evlist, rec->opts.target.cpu_list)) { - pr_err("failed to use cpu list %s\n", - rec->opts.target.cpu_list); - goto out; - } + evlist__warn_user_requested_cpus(rec->evlist, rec->opts.target.cpu_list); if (callchain_param.enabled && callchain_param.record_mode == CALLCHAIN_FP) arch__add_leaf_frame_record_opts(&rec->opts); diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 8d4c4f4ca8ea..84d304cffd2c 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -2725,10 +2725,7 @@ int cmd_stat(int argc, const char **argv) } } - if (evlist__fix_hybrid_cpus(evsel_list, target.cpu_list)) { - pr_err("failed to use cpu list %s\n", target.cpu_list); - goto out; - } + evlist__warn_user_requested_cpus(evsel_list, target.cpu_list); if (evlist__create_maps(evsel_list, &target) < 0) { if (target__has_task(&target)) { diff --git a/tools/perf/util/evlist-hybrid.c b/tools/perf/util/evlist-hybrid.c index 57f02beef023..db3f5fbdebe1 100644 --- a/tools/perf/util/evlist-hybrid.c +++ b/tools/perf/util/evlist-hybrid.c @@ -86,77 +86,3 @@ bool evlist__has_hybrid(struct evlist *evlist) return false; } - -int evlist__fix_hybrid_cpus(struct evlist *evlist, const char *cpu_list) -{ - struct perf_cpu_map *cpus; - struct evsel *evsel, *tmp; - struct perf_pmu *pmu; - int ret, unmatched_count = 0, events_nr = 0; - - if (!perf_pmu__has_hybrid() || !cpu_list) - return 0; - - cpus = perf_cpu_map__new(cpu_list); - if (!cpus) - return -1; - - /* - * The evsels are created with hybrid pmu's cpus. But now we - * need to check and adjust the cpus of evsel by cpu_list because - * cpu_list may cause conflicts with cpus of evsel. For example, - * cpus of evsel is cpu0-7, but the cpu_list is cpu6-8, we need - * to adjust the cpus of evsel to cpu6-7. And then propatate maps - * in evlist__create_maps(). - */ - evlist__for_each_entry_safe(evlist, tmp, evsel) { - struct perf_cpu_map *matched_cpus, *unmatched_cpus; - char buf1[128], buf2[128]; - - pmu = perf_pmu__find_hybrid_pmu(evsel->pmu_name); - if (!pmu) - continue; - - ret = perf_pmu__cpus_match(pmu, cpus, &matched_cpus, - &unmatched_cpus); - if (ret) - goto out; - - events_nr++; - - if (perf_cpu_map__nr(matched_cpus) > 0 && - (perf_cpu_map__nr(unmatched_cpus) > 0 || - perf_cpu_map__nr(matched_cpus) < perf_cpu_map__nr(cpus) || - perf_cpu_map__nr(matched_cpus) < perf_cpu_map__nr(pmu->cpus))) { - perf_cpu_map__put(evsel->core.cpus); - perf_cpu_map__put(evsel->core.own_cpus); - evsel->core.cpus = perf_cpu_map__get(matched_cpus); - evsel->core.own_cpus = perf_cpu_map__get(matched_cpus); - - if (perf_cpu_map__nr(unmatched_cpus) > 0) { - cpu_map__snprint(matched_cpus, buf1, sizeof(buf1)); - pr_warning("WARNING: use %s in '%s' for '%s', skip other cpus in list.\n", - buf1, pmu->name, evsel->name); - } - } - - if (perf_cpu_map__nr(matched_cpus) == 0) { - evlist__remove(evlist, evsel); - evsel__delete(evsel); - - cpu_map__snprint(cpus, buf1, sizeof(buf1)); - cpu_map__snprint(pmu->cpus, buf2, sizeof(buf2)); - pr_warning("WARNING: %s isn't a '%s', please use a CPU list in the '%s' range (%s)\n", - buf1, pmu->name, pmu->name, buf2); - unmatched_count++; - } - - perf_cpu_map__put(matched_cpus); - perf_cpu_map__put(unmatched_cpus); - } - if (events_nr) - ret = (unmatched_count == events_nr) ? -1 : 0; -out: - perf_cpu_map__put(cpus); - return ret; -} diff --git a/tools/perf/util/evlist-hybrid.h b/tools/perf/util/evlist-hybrid.h index aacdb1b0f948..19f74b4c340a 100644 --- a/tools/perf/util/evlist-hybrid.h +++ b/tools/perf/util/evlist-hybrid.h @@ -10,6 +10,5 @@ int evlist__add_default_hybrid(struct evlist *evlist, bool precise); void evlist__warn_hybrid_group(struct evlist *evlist); bool evlist__has_hybrid(struct evlist *evlist); -int evlist__fix_hybrid_cpus(struct evlist *evlist, const char *cpu_list); #endif /* __PERF_EVLIST_HYBRID_H */ diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index 2e2c3509bec3..9dfa977193b3 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -2465,3 +2465,42 @@ void evlist__check_mem_load_aux(struct evlist *evlist) } } } + +/** + * evlist__warn_user_requested_cpus() - Check each evsel against requested CPUs + * and warn if the user CPU list is inapplicable for the event's PMU's + * CPUs. Not core PMUs list a CPU in sysfs, but this may be overwritten by a + * user requested CPU and so any online CPU is applicable. Core PMUs handle + * events on the CPUs in their list and otherwise the event isn't supported. + * @evlist: The list of events being checked. + * @cpu_list: The user provided list of CPUs. + */ +void evlist__warn_user_requested_cpus(struct evlist *evlist, const char *cpu_list) +{ + struct perf_cpu_map *user_requested_cpus; + struct evsel *pos; + + if (!cpu_list) + return; + + user_requested_cpus = perf_cpu_map__new(cpu_list); + if (!user_requested_cpus) + return; + + evlist__for_each_entry(evlist, pos) { + struct perf_cpu_map *intersect, *to_test; + const struct perf_pmu *pmu = evsel__find_pmu(pos); + + to_test = pmu && pmu->is_core ? pmu->cpus : cpu_map__online(); + intersect = perf_cpu_map__intersect(to_test, user_requested_cpus); + if (!perf_cpu_map__equal(intersect, user_requested_cpus)) { + char buf[128]; + + cpu_map__snprint(to_test, buf, sizeof(buf)); + pr_warning("WARNING: A requested CPU in '%s' is not supported by PMU '%s' (CPUs %s) for event '%s'\n", + cpu_list, pmu ? pmu->name : "cpu", buf, evsel__name(pos)); + } + perf_cpu_map__put(intersect); + } + perf_cpu_map__put(user_requested_cpus); +} diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index e7e5540cc970..5e7ff44f3043 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h @@ -447,4 +447,6 @@ struct evsel *evlist__find_evsel(struct evlist *evlist, int idx); int evlist__scnprintf_evsels(struct evlist *evlist, size_t size, char *bf); void evlist__check_mem_load_aux(struct evlist *evlist); +void evlist__warn_user_requested_cpus(struct evlist *evlist, const char *cpu_list); + #endif /* __PERF_EVLIST_H */ diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index d992f5242d99..cd94abe7a87a 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c @@ -2046,39 +2046,6 @@ int perf_pmu__match(char *pattern, char *name, char *tok) return 0; } -int perf_pmu__cpus_match(struct perf_pmu *pmu, struct perf_cpu_map *cpus, - struct perf_cpu_map **mcpus_ptr, - struct perf_cpu_map **ucpus_ptr) -{ - struct perf_cpu_map *pmu_cpus = pmu->cpus; - struct perf_cpu_map *matched_cpus, *unmatched_cpus; - struct perf_cpu cpu; - int i, matched_nr = 0, unmatched_nr = 0; - - matched_cpus = perf_cpu_map__default_new(); - if (!matched_cpus) - return -1; - - unmatched_cpus = perf_cpu_map__default_new(); - if (!unmatched_cpus) { - perf_cpu_map__put(matched_cpus); - return -1; - } - - perf_cpu_map__for_each_cpu(cpu, i, cpus) { - if (!perf_cpu_map__has(pmu_cpus, cpu)) - RC_CHK_ACCESS(unmatched_cpus)->map[unmatched_nr++] = cpu; - else - RC_CHK_ACCESS(matched_cpus)->map[matched_nr++] = cpu; - } - - perf_cpu_map__set_nr(unmatched_cpus, unmatched_nr); - perf_cpu_map__set_nr(matched_cpus, matched_nr); - *mcpus_ptr = matched_cpus; - *ucpus_ptr = unmatched_cpus; - return 0; -} - double __weak perf_pmu__cpu_slots_per_cycle(void) { return NAN; diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h index 96236a79c6fd..af10d137e2b5 100644 --- a/tools/perf/util/pmu.h +++ b/tools/perf/util/pmu.h @@ -265,10 +265,6 @@ void perf_pmu__warn_invalid_formats(struct perf_pmu *pmu); bool perf_pmu__has_hybrid(void); int perf_pmu__match(char *pattern, char *name, char *tok); -int perf_pmu__cpus_match(struct perf_pmu *pmu, struct perf_cpu_map *cpus, - struct perf_cpu_map **mcpus_ptr, - struct perf_cpu_map **ucpus_ptr); - char *pmu_find_real_name(const char *name); char *pmu_find_alias_name(const char *name); double perf_pmu__cpu_slots_per_cycle(void);