OpenCloudOS-Kernel/tools/perf/util/cputopo.c

507 lines
9.6 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0
#include <sys/param.h>
#include <sys/utsname.h>
#include <inttypes.h>
#include <stdlib.h>
#include <string.h>
#include <api/fs/fs.h>
#include <linux/zalloc.h>
#include <perf/cpumap.h>
#include "cputopo.h"
#include "cpumap.h"
#include "debug.h"
#include "env.h"
perf topology: Avoid hybrid list for hybrid topology Avoid perf_pmu__for_each_hybrid_pmu in hybrid_topology__new by scanning all PMUs and processing the is_core ones. Add early exit for non-hybrid. Reviewed-by: Kan Liang <kan.liang@linux.intel.com> Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ali Saidi <alisaidi@amazon.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: Huacai Chen <chenhuacai@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kang Minchul <tegongkang@gmail.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Mike Leach <mike.leach@linaro.org> Cc: Ming Wang <wangming01@loongson.cn> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Rob Herring <robh@kernel.org> Cc: Sandipan Das <sandipan.das@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Will Deacon <will@kernel.org> Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230527072210.2900565-19-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-05-27 15:21:54 +08:00
#include "pmu.h"
perf pmu: Separate pmu and pmus Separate and hide the pmus list in pmus.[ch]. Move pmus functionality out of pmu.[ch] into pmus.[ch] renaming pmus functions which were prefixed perf_pmu__ to perf_pmus__. Reviewed-by: Kan Liang <kan.liang@linux.intel.com> Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ali Saidi <alisaidi@amazon.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: Huacai Chen <chenhuacai@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kang Minchul <tegongkang@gmail.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Mike Leach <mike.leach@linaro.org> Cc: Ming Wang <wangming01@loongson.cn> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Rob Herring <robh@kernel.org> Cc: Sandipan Das <sandipan.das@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Will Deacon <will@kernel.org> Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230527072210.2900565-28-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-05-27 15:22:03 +08:00
#include "pmus.h"
#define PACKAGE_CPUS_FMT \
"%s/devices/system/cpu/cpu%d/topology/package_cpus_list"
#define PACKAGE_CPUS_FMT_OLD \
"%s/devices/system/cpu/cpu%d/topology/core_siblings_list"
#define DIE_CPUS_FMT \
"%s/devices/system/cpu/cpu%d/topology/die_cpus_list"
#define CORE_CPUS_FMT \
"%s/devices/system/cpu/cpu%d/topology/core_cpus_list"
#define CORE_CPUS_FMT_OLD \
"%s/devices/system/cpu/cpu%d/topology/thread_siblings_list"
#define NODE_ONLINE_FMT \
"%s/devices/system/node/online"
#define NODE_MEMINFO_FMT \
"%s/devices/system/node/node%d/meminfo"
#define NODE_CPULIST_FMT \
"%s/devices/system/node/node%d/cpulist"
static int build_cpu_topology(struct cpu_topology *tp, int cpu)
{
FILE *fp;
char filename[MAXPATHLEN];
char *buf = NULL, *p;
size_t len = 0;
ssize_t sret;
u32 i = 0;
int ret = -1;
scnprintf(filename, MAXPATHLEN, PACKAGE_CPUS_FMT,
sysfs__mountpoint(), cpu);
if (access(filename, F_OK) == -1) {
scnprintf(filename, MAXPATHLEN, PACKAGE_CPUS_FMT_OLD,
sysfs__mountpoint(), cpu);
}
fp = fopen(filename, "r");
if (!fp)
goto try_dies;
sret = getline(&buf, &len, fp);
fclose(fp);
if (sret <= 0)
goto try_dies;
p = strchr(buf, '\n');
if (p)
*p = '\0';
for (i = 0; i < tp->package_cpus_lists; i++) {
if (!strcmp(buf, tp->package_cpus_list[i]))
break;
}
if (i == tp->package_cpus_lists) {
tp->package_cpus_list[i] = buf;
tp->package_cpus_lists++;
buf = NULL;
len = 0;
}
ret = 0;
try_dies:
if (!tp->die_cpus_list)
goto try_threads;
scnprintf(filename, MAXPATHLEN, DIE_CPUS_FMT,
sysfs__mountpoint(), cpu);
fp = fopen(filename, "r");
if (!fp)
goto try_threads;
sret = getline(&buf, &len, fp);
fclose(fp);
if (sret <= 0)
goto try_threads;
p = strchr(buf, '\n');
if (p)
*p = '\0';
for (i = 0; i < tp->die_cpus_lists; i++) {
if (!strcmp(buf, tp->die_cpus_list[i]))
break;
}
if (i == tp->die_cpus_lists) {
tp->die_cpus_list[i] = buf;
tp->die_cpus_lists++;
buf = NULL;
len = 0;
}
ret = 0;
try_threads:
scnprintf(filename, MAXPATHLEN, CORE_CPUS_FMT,
sysfs__mountpoint(), cpu);
if (access(filename, F_OK) == -1) {
scnprintf(filename, MAXPATHLEN, CORE_CPUS_FMT_OLD,
sysfs__mountpoint(), cpu);
}
fp = fopen(filename, "r");
if (!fp)
goto done;
if (getline(&buf, &len, fp) <= 0)
goto done;
p = strchr(buf, '\n');
if (p)
*p = '\0';
for (i = 0; i < tp->core_cpus_lists; i++) {
if (!strcmp(buf, tp->core_cpus_list[i]))
break;
}
if (i == tp->core_cpus_lists) {
tp->core_cpus_list[i] = buf;
tp->core_cpus_lists++;
buf = NULL;
}
ret = 0;
done:
if (fp)
fclose(fp);
free(buf);
return ret;
}
void cpu_topology__delete(struct cpu_topology *tp)
{
u32 i;
if (!tp)
return;
for (i = 0 ; i < tp->package_cpus_lists; i++)
zfree(&tp->package_cpus_list[i]);
for (i = 0 ; i < tp->die_cpus_lists; i++)
zfree(&tp->die_cpus_list[i]);
for (i = 0 ; i < tp->core_cpus_lists; i++)
zfree(&tp->core_cpus_list[i]);
free(tp);
}
bool cpu_topology__smt_on(const struct cpu_topology *topology)
{
for (u32 i = 0; i < topology->core_cpus_lists; i++) {
const char *cpu_list = topology->core_cpus_list[i];
/*
* If there is a need to separate siblings in a core then SMT is
* enabled.
*/
if (strchr(cpu_list, ',') || strchr(cpu_list, '-'))
return true;
}
return false;
}
perf topology: Add core_wide It is possible to optimize metrics when all SMT threads (CPUs) on a core are measuring events in system wide mode. For example, TMA metrics defines CORE_CLKS for Sandybrdige as: if SMT is disabled: CPU_CLK_UNHALTED.THREAD if SMT is enabled and recording on all SMT threads: CPU_CLK_UNHALTED.THREAD_ANY / 2 if SMT is enabled and not recording on all SMT threads: (CPU_CLK_UNHALTED.THREAD/2)* (1+CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE/CPU_CLK_UNHALTED.REF_XCLK ) That is two more events are necessary when not gathering counts on all SMT threads. To distinguish all SMT threads on a core vs system wide (all CPUs) call the new property core wide. Add a core wide test that determines the property from user requested CPUs, the topology and system wide. System wide is required as other processes running on a SMT thread will change the counts. Signed-off-by: Ian Rogers <irogers@google.com> Cc: Ahmad Yasin <ahmad.yasin@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Caleb Biggers <caleb.biggers@intel.com> Cc: Florian Fischer <florian.fischer@muhq.space> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.garry@huawei.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Kshipra Bopardikar <kshipra.bopardikar@intel.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Miaoqian Lin <linmq006@gmail.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Perry Taylor <perry.taylor@intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com> Link: https://lore.kernel.org/r/20220831174926.579643-5-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-09-01 01:49:23 +08:00
bool cpu_topology__core_wide(const struct cpu_topology *topology,
const char *user_requested_cpu_list)
{
struct perf_cpu_map *user_requested_cpus;
/*
* If user_requested_cpu_list is empty then all CPUs are recorded and so
* core_wide is true.
*/
if (!user_requested_cpu_list)
return true;
user_requested_cpus = perf_cpu_map__new(user_requested_cpu_list);
/* Check that every user requested CPU is the complete set of SMT threads on a core. */
for (u32 i = 0; i < topology->core_cpus_lists; i++) {
const char *core_cpu_list = topology->core_cpus_list[i];
struct perf_cpu_map *core_cpus = perf_cpu_map__new(core_cpu_list);
struct perf_cpu cpu;
int idx;
bool has_first, first = true;
perf_cpu_map__for_each_cpu(cpu, idx, core_cpus) {
if (first) {
has_first = perf_cpu_map__has(user_requested_cpus, cpu);
first = false;
} else {
/*
* If the first core CPU is user requested then
* all subsequent CPUs in the core must be user
* requested too. If the first CPU isn't user
* requested then none of the others must be
* too.
*/
if (perf_cpu_map__has(user_requested_cpus, cpu) != has_first) {
perf_cpu_map__put(core_cpus);
perf_cpu_map__put(user_requested_cpus);
return false;
}
}
}
perf_cpu_map__put(core_cpus);
}
perf_cpu_map__put(user_requested_cpus);
return true;
}
static bool has_die_topology(void)
{
char filename[MAXPATHLEN];
struct utsname uts;
if (uname(&uts) < 0)
return false;
perf cputopo: Fix CPU topology reading on s/390 Commit fdf1e29b6118c18f ("perf expr: Add metric literals for topology.") fails on s390: # ./perf test -Fv 7 ... # FAILED tests/expr.c:173 #num_dies >= #num_packages ---- end ---- Simple expression parser: FAILED! # Investigating this issue leads to these functions: build_cpu_topology() +--> has_die_topology(void) { struct utsname uts; if (uname(&uts) < 0) return false; if (strncmp(uts.machine, "x86_64", 6)) return false; .... } which always returns false on s390. The caller build_cpu_topology() checks has_die_topology() return value. On false the the struct cpu_topology::die_cpu_list is not contructed and has zero entries. This leads to the failing comparison: #num_dies >= #num_packages. s390 of course has a positive number of packages. Fix this by adding s390 architecture to support CPU die list. Output after: # ./perf test -Fv 7 7: Simple expression parser : --- start --- division by zero syntax error ---- end ---- Simple expression parser: Ok # Fixes: fdf1e29b6118c18f ("perf expr: Add metric literals for topology.") Reviewed-by: Ian Rogers <irogers@google.com> Signed-off-by: Thomas Richter <tmricht@linux.ibm.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Ian Rogers <irogers@google.com> Cc: Sumanth Korikkar <sumanthk@linux.ibm.com> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Vasily Gorbik <gor@linux.ibm.com> Link: https://lore.kernel.org/r/20211124090343.9436-1-tmricht@linux.ibm.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2021-11-24 17:03:43 +08:00
if (strncmp(uts.machine, "x86_64", 6) &&
strncmp(uts.machine, "s390x", 5))
return false;
scnprintf(filename, MAXPATHLEN, DIE_CPUS_FMT,
sysfs__mountpoint(), 0);
if (access(filename, F_OK) == -1)
return false;
return true;
}
perf expr: Make the online topology accessible globally Knowing the topology of online CPUs is useful for more than just expr literals. Move to a global function that caches the value. An additional upside is that this may also avoid computing the CPU topology in some situations. Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexandre Torgue <alexandre.torgue@foss.st.com> Cc: Andrii Nakryiko <andrii@kernel.org> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Caleb Biggers <caleb.biggers@intel.com> Cc: Eduard Zingerman <eddyz87@gmail.com> Cc: Florian Fischer <florian.fischer@muhq.space> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Maxime Coquelin <mcoquelin.stm32@gmail.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Perry Taylor <perry.taylor@intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Sandipan Das <sandipan.das@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Stephane Eranian <eranian@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com> Cc: linux-arm-kernel@lists.infradead.org Cc: linux-stm32@st-md-mailman.stormreply.com Link: https://lore.kernel.org/r/20230219092848.639226-8-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-02-19 17:28:04 +08:00
const struct cpu_topology *online_topology(void)
{
static const struct cpu_topology *topology;
if (!topology) {
topology = cpu_topology__new();
if (!topology) {
pr_err("Error creating CPU topology");
abort();
}
}
return topology;
}
struct cpu_topology *cpu_topology__new(void)
{
struct cpu_topology *tp = NULL;
void *addr;
u32 nr, i, nr_addr;
size_t sz;
long ncpus;
int ret = -1;
struct perf_cpu_map *map;
bool has_die = has_die_topology();
perf cpumap: Give CPUs their own type A common problem is confusing CPU map indices with the CPU, by wrapping the CPU with a struct then this is avoided. This approach is similar to atomic_t. Committer notes: To make it build with BUILD_BPF_SKEL=1 these files needed the conversions to 'struct perf_cpu' usage: tools/perf/util/bpf_counter.c tools/perf/util/bpf_counter_cgroup.c tools/perf/util/bpf_ftrace.c Also perf_env__get_cpu() was removed back in "perf cpumap: Switch cpu_map__build_map to cpu function". Additionally these needed to be fixed for the ARM builds to complete: tools/perf/arch/arm/util/cs-etm.c tools/perf/arch/arm64/util/pmu.c Suggested-by: John Garry <john.garry@huawei.com> Signed-off-by: Ian Rogers <irogers@google.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Mathieu Poirier <mathieu.poirier@linaro.org> Cc: Mike Leach <mike.leach@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Paul Clarke <pc@us.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Riccardo Mancini <rickyman7@gmail.com> Cc: Stephane Eranian <eranian@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Vineet Singh <vineet.singh@intel.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-49-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-01-05 14:13:51 +08:00
ncpus = cpu__max_present_cpu().cpu;
/* build online CPU map */
map = perf_cpu_map__new(NULL);
if (map == NULL) {
pr_debug("failed to get system cpumap\n");
return NULL;
}
nr = (u32)(ncpus & UINT_MAX);
sz = nr * sizeof(char *);
if (has_die)
nr_addr = 3;
else
nr_addr = 2;
addr = calloc(1, sizeof(*tp) + nr_addr * sz);
if (!addr)
goto out_free;
tp = addr;
addr += sizeof(*tp);
tp->package_cpus_list = addr;
addr += sz;
if (has_die) {
tp->die_cpus_list = addr;
addr += sz;
}
tp->core_cpus_list = addr;
for (i = 0; i < nr; i++) {
perf cpumap: Give CPUs their own type A common problem is confusing CPU map indices with the CPU, by wrapping the CPU with a struct then this is avoided. This approach is similar to atomic_t. Committer notes: To make it build with BUILD_BPF_SKEL=1 these files needed the conversions to 'struct perf_cpu' usage: tools/perf/util/bpf_counter.c tools/perf/util/bpf_counter_cgroup.c tools/perf/util/bpf_ftrace.c Also perf_env__get_cpu() was removed back in "perf cpumap: Switch cpu_map__build_map to cpu function". Additionally these needed to be fixed for the ARM builds to complete: tools/perf/arch/arm/util/cs-etm.c tools/perf/arch/arm64/util/pmu.c Suggested-by: John Garry <john.garry@huawei.com> Signed-off-by: Ian Rogers <irogers@google.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Mathieu Poirier <mathieu.poirier@linaro.org> Cc: Mike Leach <mike.leach@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Paul Clarke <pc@us.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Riccardo Mancini <rickyman7@gmail.com> Cc: Stephane Eranian <eranian@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Vineet Singh <vineet.singh@intel.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-49-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-01-05 14:13:51 +08:00
if (!perf_cpu_map__has(map, (struct perf_cpu){ .cpu = i }))
continue;
ret = build_cpu_topology(tp, i);
if (ret < 0)
break;
}
out_free:
perf_cpu_map__put(map);
if (ret) {
cpu_topology__delete(tp);
tp = NULL;
}
return tp;
}
static int load_numa_node(struct numa_topology_node *node, int nr)
{
char str[MAXPATHLEN];
char field[32];
char *buf = NULL, *p;
size_t len = 0;
int ret = -1;
FILE *fp;
u64 mem;
node->node = (u32) nr;
scnprintf(str, MAXPATHLEN, NODE_MEMINFO_FMT,
sysfs__mountpoint(), nr);
fp = fopen(str, "r");
if (!fp)
return -1;
while (getline(&buf, &len, fp) > 0) {
/* skip over invalid lines */
if (!strchr(buf, ':'))
continue;
if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2)
goto err;
if (!strcmp(field, "MemTotal:"))
node->mem_total = mem;
if (!strcmp(field, "MemFree:"))
node->mem_free = mem;
if (node->mem_total && node->mem_free)
break;
}
fclose(fp);
fp = NULL;
scnprintf(str, MAXPATHLEN, NODE_CPULIST_FMT,
sysfs__mountpoint(), nr);
fp = fopen(str, "r");
if (!fp)
return -1;
if (getline(&buf, &len, fp) <= 0)
goto err;
p = strchr(buf, '\n');
if (p)
*p = '\0';
node->cpus = buf;
fclose(fp);
return 0;
err:
free(buf);
if (fp)
fclose(fp);
return ret;
}
struct numa_topology *numa_topology__new(void)
{
struct perf_cpu_map *node_map = NULL;
struct numa_topology *tp = NULL;
char path[MAXPATHLEN];
char *buf = NULL;
size_t len = 0;
u32 nr, i;
FILE *fp;
char *c;
scnprintf(path, MAXPATHLEN, NODE_ONLINE_FMT,
sysfs__mountpoint());
fp = fopen(path, "r");
if (!fp)
return NULL;
if (getline(&buf, &len, fp) <= 0)
goto out;
c = strchr(buf, '\n');
if (c)
*c = '\0';
node_map = perf_cpu_map__new(buf);
if (!node_map)
goto out;
perf cpumap: Migrate to libperf cpumap api Switch from directly accessing the perf_cpu_map to using the appropriate libperf API when possible. Using the API simplifies the job of refactoring use of perf_cpu_map. Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: André Almeida <andrealmeid@collabora.com> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Darren Hart <dvhart@infradead.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Dmitriy Vyukov <dvyukov@google.com> Cc: Eric Dumazet <edumazet@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: James Clark <james.clark@arm.com> Cc: Jin Yao <yao.jin@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: John Garry <john.garry@huawei.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Miaoqian Lin <linmq006@gmail.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Riccardo Mancini <rickyman7@gmail.com> Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com> Cc: Song Liu <song@kernel.org> Cc: Stephane Eranian <eranian@google.com> Cc: Stephen Brennan <stephen.s.brennan@oracle.com> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Yury Norov <yury.norov@gmail.com> Link: http://lore.kernel.org/lkml/20220122045811.3402706-3-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-01-22 12:58:10 +08:00
nr = (u32) perf_cpu_map__nr(node_map);
tp = zalloc(sizeof(*tp) + sizeof(tp->nodes[0])*nr);
if (!tp)
goto out;
tp->nr = nr;
for (i = 0; i < nr; i++) {
perf cpumap: Migrate to libperf cpumap api Switch from directly accessing the perf_cpu_map to using the appropriate libperf API when possible. Using the API simplifies the job of refactoring use of perf_cpu_map. Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: André Almeida <andrealmeid@collabora.com> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Darren Hart <dvhart@infradead.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Dmitriy Vyukov <dvyukov@google.com> Cc: Eric Dumazet <edumazet@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: James Clark <james.clark@arm.com> Cc: Jin Yao <yao.jin@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: John Garry <john.garry@huawei.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Miaoqian Lin <linmq006@gmail.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Riccardo Mancini <rickyman7@gmail.com> Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com> Cc: Song Liu <song@kernel.org> Cc: Stephane Eranian <eranian@google.com> Cc: Stephen Brennan <stephen.s.brennan@oracle.com> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Yury Norov <yury.norov@gmail.com> Link: http://lore.kernel.org/lkml/20220122045811.3402706-3-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-01-22 12:58:10 +08:00
if (load_numa_node(&tp->nodes[i], perf_cpu_map__cpu(node_map, i).cpu)) {
numa_topology__delete(tp);
tp = NULL;
break;
}
}
out:
free(buf);
fclose(fp);
perf_cpu_map__put(node_map);
return tp;
}
void numa_topology__delete(struct numa_topology *tp)
{
u32 i;
for (i = 0; i < tp->nr; i++)
zfree(&tp->nodes[i].cpus);
free(tp);
}
static int load_hybrid_node(struct hybrid_topology_node *node,
struct perf_pmu *pmu)
{
char *buf = NULL, *p;
FILE *fp;
size_t len = 0;
node->pmu_name = strdup(pmu->name);
if (!node->pmu_name)
return -1;
fp = perf_pmu__open_file(pmu, "cpus");
if (!fp)
goto err;
if (getline(&buf, &len, fp) <= 0) {
fclose(fp);
goto err;
}
p = strchr(buf, '\n');
if (p)
*p = '\0';
fclose(fp);
node->cpus = buf;
return 0;
err:
zfree(&node->pmu_name);
free(buf);
return -1;
}
struct hybrid_topology *hybrid_topology__new(void)
{
perf topology: Avoid hybrid list for hybrid topology Avoid perf_pmu__for_each_hybrid_pmu in hybrid_topology__new by scanning all PMUs and processing the is_core ones. Add early exit for non-hybrid. Reviewed-by: Kan Liang <kan.liang@linux.intel.com> Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ali Saidi <alisaidi@amazon.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: Huacai Chen <chenhuacai@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kang Minchul <tegongkang@gmail.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Mike Leach <mike.leach@linaro.org> Cc: Ming Wang <wangming01@loongson.cn> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Rob Herring <robh@kernel.org> Cc: Sandipan Das <sandipan.das@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Will Deacon <will@kernel.org> Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230527072210.2900565-19-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-05-27 15:21:54 +08:00
struct perf_pmu *pmu = NULL;
struct hybrid_topology *tp = NULL;
perf pmus: Remove perf_pmus__has_hybrid perf_pmus__has_hybrid was used to detect when there was >1 core PMU, this can be achieved with perf_pmus__num_core_pmus that doesn't depend upon is_pmu_hybrid and PMU name comparisons. When modifying the function calls take the opportunity to improve comments, enable/simplify tests that were previously failing for hybrid but now pass and to simplify generic code. Reviewed-by: Kan Liang <kan.liang@linux.intel.com> Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ali Saidi <alisaidi@amazon.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: Huacai Chen <chenhuacai@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kang Minchul <tegongkang@gmail.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Mike Leach <mike.leach@linaro.org> Cc: Ming Wang <wangming01@loongson.cn> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Rob Herring <robh@kernel.org> Cc: Sandipan Das <sandipan.das@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Will Deacon <will@kernel.org> Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230527072210.2900565-34-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-05-27 15:22:09 +08:00
int nr = perf_pmus__num_core_pmus(), i = 0;
perf pmus: Remove perf_pmus__has_hybrid perf_pmus__has_hybrid was used to detect when there was >1 core PMU, this can be achieved with perf_pmus__num_core_pmus that doesn't depend upon is_pmu_hybrid and PMU name comparisons. When modifying the function calls take the opportunity to improve comments, enable/simplify tests that were previously failing for hybrid but now pass and to simplify generic code. Reviewed-by: Kan Liang <kan.liang@linux.intel.com> Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ali Saidi <alisaidi@amazon.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: Huacai Chen <chenhuacai@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kang Minchul <tegongkang@gmail.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Mike Leach <mike.leach@linaro.org> Cc: Ming Wang <wangming01@loongson.cn> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Rob Herring <robh@kernel.org> Cc: Sandipan Das <sandipan.das@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Will Deacon <will@kernel.org> Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230527072210.2900565-34-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-05-27 15:22:09 +08:00
if (nr <= 1)
return NULL;
tp = zalloc(sizeof(*tp) + sizeof(tp->nodes[0]) * nr);
if (!tp)
return NULL;
tp->nr = nr;
perf pmus: Allow just core PMU scanning Scanning all PMUs is expensive as all PMUs sysfs entries are loaded, benchmarking shows more than 4x the cost: ``` $ perf bench internals pmu-scan -i 1000 Computing performance of sysfs PMU event scan for 1000 times Average core PMU scanning took: 989.231 usec (+- 1.535 usec) Average PMU scanning took: 4309.425 usec (+- 74.322 usec) ``` Add new perf_pmus__scan_core routine that scans just core PMUs. Replace perf_pmus__scan calls with perf_pmus__scan_core when non-core PMUs are being ignored. Reviewed-by: Kan Liang <kan.liang@linux.intel.com> Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ali Saidi <alisaidi@amazon.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: Huacai Chen <chenhuacai@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kang Minchul <tegongkang@gmail.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Mike Leach <mike.leach@linaro.org> Cc: Ming Wang <wangming01@loongson.cn> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Rob Herring <robh@kernel.org> Cc: Sandipan Das <sandipan.das@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Will Deacon <will@kernel.org> Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230527072210.2900565-30-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-05-27 15:22:05 +08:00
while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
if (load_hybrid_node(&tp->nodes[i], pmu)) {
hybrid_topology__delete(tp);
return NULL;
}
i++;
}
return tp;
}
void hybrid_topology__delete(struct hybrid_topology *tp)
{
u32 i;
for (i = 0; i < tp->nr; i++) {
zfree(&tp->nodes[i].pmu_name);
zfree(&tp->nodes[i].cpus);
}
free(tp);
}