perf topology: Avoid hybrid list for hybrid topology
Avoid perf_pmu__for_each_hybrid_pmu in hybrid_topology__new by scanning all PMUs and processing the is_core ones. Add early exit for non-hybrid. Reviewed-by: Kan Liang <kan.liang@linux.intel.com> Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ali Saidi <alisaidi@amazon.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: Huacai Chen <chenhuacai@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kang Minchul <tegongkang@gmail.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Mike Leach <mike.leach@linaro.org> Cc: Ming Wang <wangming01@loongson.cn> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Rob Herring <robh@kernel.org> Cc: Sandipan Das <sandipan.das@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Will Deacon <will@kernel.org> Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230527072210.2900565-19-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
parent
dd64647ecb
commit
1215795ceb
|
@ -12,7 +12,7 @@
|
|||
#include "cpumap.h"
|
||||
#include "debug.h"
|
||||
#include "env.h"
|
||||
#include "pmu-hybrid.h"
|
||||
#include "pmu.h"
|
||||
|
||||
#define PACKAGE_CPUS_FMT \
|
||||
"%s/devices/system/cpu/cpu%d/topology/package_cpus_list"
|
||||
|
@ -469,11 +469,17 @@ err:
|
|||
|
||||
struct hybrid_topology *hybrid_topology__new(void)
|
||||
{
|
||||
struct perf_pmu *pmu;
|
||||
struct perf_pmu *pmu = NULL;
|
||||
struct hybrid_topology *tp = NULL;
|
||||
u32 nr, i = 0;
|
||||
u32 nr = 0, i = 0;
|
||||
|
||||
nr = perf_pmu__hybrid_pmu_num();
|
||||
if (!perf_pmu__has_hybrid())
|
||||
return NULL;
|
||||
|
||||
while ((pmu = perf_pmu__scan(pmu)) != NULL) {
|
||||
if (pmu->is_core)
|
||||
nr++;
|
||||
}
|
||||
if (nr == 0)
|
||||
return NULL;
|
||||
|
||||
|
@ -482,7 +488,10 @@ struct hybrid_topology *hybrid_topology__new(void)
|
|||
return NULL;
|
||||
|
||||
tp->nr = nr;
|
||||
perf_pmu__for_each_hybrid_pmu(pmu) {
|
||||
while ((pmu = perf_pmu__scan(pmu)) != NULL) {
|
||||
if (!pmu->is_core)
|
||||
continue;
|
||||
|
||||
if (load_hybrid_node(&tp->nodes[i], pmu)) {
|
||||
hybrid_topology__delete(tp);
|
||||
return NULL;
|
||||
|
|
Loading…
Reference in New Issue