OpenCloudOS-Kernel/tools/perf/util/affinity.c

86 lines
2.0 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0
/* Manage affinity to optimize IPIs inside the kernel perf API. */
#define _GNU_SOURCE 1
#include <sched.h>
#include <stdlib.h>
#include <linux/bitmap.h>
#include <linux/zalloc.h>
#include "perf.h"
#include "cpumap.h"
#include "affinity.h"
static int get_cpu_set_size(void)
{
perf cpumap: Give CPUs their own type A common problem is confusing CPU map indices with the CPU, by wrapping the CPU with a struct then this is avoided. This approach is similar to atomic_t. Committer notes: To make it build with BUILD_BPF_SKEL=1 these files needed the conversions to 'struct perf_cpu' usage: tools/perf/util/bpf_counter.c tools/perf/util/bpf_counter_cgroup.c tools/perf/util/bpf_ftrace.c Also perf_env__get_cpu() was removed back in "perf cpumap: Switch cpu_map__build_map to cpu function". Additionally these needed to be fixed for the ARM builds to complete: tools/perf/arch/arm/util/cs-etm.c tools/perf/arch/arm64/util/pmu.c Suggested-by: John Garry <john.garry@huawei.com> Signed-off-by: Ian Rogers <irogers@google.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Mathieu Poirier <mathieu.poirier@linaro.org> Cc: Mike Leach <mike.leach@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Paul Clarke <pc@us.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Riccardo Mancini <rickyman7@gmail.com> Cc: Stephane Eranian <eranian@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Vineet Singh <vineet.singh@intel.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: zhengjun.xing@intel.com Link: https://lore.kernel.org/r/20220105061351.120843-49-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-01-05 14:13:51 +08:00
int sz = cpu__max_cpu().cpu + 8 - 1;
/*
* sched_getaffinity doesn't like masks smaller than the kernel.
* Hopefully that's big enough.
*/
if (sz < 4096)
sz = 4096;
return sz / 8;
}
int affinity__setup(struct affinity *a)
{
int cpu_set_size = get_cpu_set_size();
a->orig_cpus = bitmap_zalloc(cpu_set_size * 8);
if (!a->orig_cpus)
return -1;
sched_getaffinity(0, cpu_set_size, (cpu_set_t *)a->orig_cpus);
a->sched_cpus = bitmap_zalloc(cpu_set_size * 8);
if (!a->sched_cpus) {
zfree(&a->orig_cpus);
return -1;
}
bitmap_zero((unsigned long *)a->sched_cpus, cpu_set_size);
a->changed = false;
return 0;
}
/*
* perf_event_open does an IPI internally to the target CPU.
* It is more efficient to change perf's affinity to the target
* CPU and then set up all events on that CPU, so we amortize
* CPU communication.
*/
void affinity__set(struct affinity *a, int cpu)
{
int cpu_set_size = get_cpu_set_size();
perf affinity: Fix out of bound access to "sched_cpus" mask The affinity code in "affinity_set" function access array named "sched_cpus". The size for this array is allocated in affinity_setup function which is nothing but value from get_cpu_set_size. This is used to contain the cpumask value for each cpu. While setting bit for each cpu, it calls "set_bit" function which access index in sched_cpus array. If we provide a command-line option to -C which is more than the number of CPU's present in the system, the set_bit could access an array member which is out-of the array size. This is because currently, there is no boundary check for the CPU. This will result in seg fault: <<>> ./perf stat -C 12323431 ls Perf can support 2048 CPUs. Consider raising MAX_NR_CPUS Segmentation fault (core dumped) <<>> Fix this by adding boundary check for the array. After the fix from powerpc system: <<>> ./perf stat -C 12323431 ls 1>out Perf can support 2048 CPUs. Consider raising MAX_NR_CPUS Performance counter stats for 'CPU(s) 12323431': <not supported> msec cpu-clock <not supported> context-switches <not supported> cpu-migrations <not supported> page-faults <not supported> cycles <not supported> instructions <not supported> branches <not supported> branch-misses 0.001192373 seconds time elapsed <<>> Reported-by: Nageswara R Sastry <rnsastry@linux.ibm.com> Signed-off-by: Athira Jajeev <atrajeev@linux.vnet.ibm.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Tested-by: Nageswara R Sastry <rnsastry@linux.ibm.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Madhavan Srinivasan <maddy@linux.vnet.ibm.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: linuxppc-dev@lists.ozlabs.org Link: https://lore.kernel.org/r/20220905141929.7171-1-atrajeev@linux.vnet.ibm.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-09-05 22:19:28 +08:00
/*
* Return:
* - if cpu is -1
* - restrict out of bound access to sched_cpus
*/
if (cpu == -1 || ((cpu >= (cpu_set_size * 8))))
return;
perf affinity: Fix out of bound access to "sched_cpus" mask The affinity code in "affinity_set" function access array named "sched_cpus". The size for this array is allocated in affinity_setup function which is nothing but value from get_cpu_set_size. This is used to contain the cpumask value for each cpu. While setting bit for each cpu, it calls "set_bit" function which access index in sched_cpus array. If we provide a command-line option to -C which is more than the number of CPU's present in the system, the set_bit could access an array member which is out-of the array size. This is because currently, there is no boundary check for the CPU. This will result in seg fault: <<>> ./perf stat -C 12323431 ls Perf can support 2048 CPUs. Consider raising MAX_NR_CPUS Segmentation fault (core dumped) <<>> Fix this by adding boundary check for the array. After the fix from powerpc system: <<>> ./perf stat -C 12323431 ls 1>out Perf can support 2048 CPUs. Consider raising MAX_NR_CPUS Performance counter stats for 'CPU(s) 12323431': <not supported> msec cpu-clock <not supported> context-switches <not supported> cpu-migrations <not supported> page-faults <not supported> cycles <not supported> instructions <not supported> branches <not supported> branch-misses 0.001192373 seconds time elapsed <<>> Reported-by: Nageswara R Sastry <rnsastry@linux.ibm.com> Signed-off-by: Athira Jajeev <atrajeev@linux.vnet.ibm.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Tested-by: Nageswara R Sastry <rnsastry@linux.ibm.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Madhavan Srinivasan <maddy@linux.vnet.ibm.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: linuxppc-dev@lists.ozlabs.org Link: https://lore.kernel.org/r/20220905141929.7171-1-atrajeev@linux.vnet.ibm.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-09-05 22:19:28 +08:00
a->changed = true;
__set_bit(cpu, a->sched_cpus);
/*
* We ignore errors because affinity is just an optimization.
* This could happen for example with isolated CPUs or cpusets.
* In this case the IPIs inside the kernel's perf API still work.
*/
sched_setaffinity(0, cpu_set_size, (cpu_set_t *)a->sched_cpus);
__clear_bit(cpu, a->sched_cpus);
}
static void __affinity__cleanup(struct affinity *a)
{
int cpu_set_size = get_cpu_set_size();
if (a->changed)
sched_setaffinity(0, cpu_set_size, (cpu_set_t *)a->orig_cpus);
zfree(&a->sched_cpus);
zfree(&a->orig_cpus);
}
void affinity__cleanup(struct affinity *a)
{
if (a != NULL)
__affinity__cleanup(a);
}