2019-11-21 08:15:12 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/* Manage affinity to optimize IPIs inside the kernel perf API. */
|
|
|
|
#define _GNU_SOURCE 1
|
|
|
|
#include <sched.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <linux/bitmap.h>
|
|
|
|
#include <linux/zalloc.h>
|
|
|
|
#include "perf.h"
|
|
|
|
#include "cpumap.h"
|
|
|
|
#include "affinity.h"
|
|
|
|
|
|
|
|
static int get_cpu_set_size(void)
|
|
|
|
{
|
2022-01-05 14:13:51 +08:00
|
|
|
int sz = cpu__max_cpu().cpu + 8 - 1;
|
2019-11-21 08:15:12 +08:00
|
|
|
/*
|
|
|
|
* sched_getaffinity doesn't like masks smaller than the kernel.
|
|
|
|
* Hopefully that's big enough.
|
|
|
|
*/
|
|
|
|
if (sz < 4096)
|
|
|
|
sz = 4096;
|
|
|
|
return sz / 8;
|
|
|
|
}
|
|
|
|
|
|
|
|
int affinity__setup(struct affinity *a)
|
|
|
|
{
|
|
|
|
int cpu_set_size = get_cpu_set_size();
|
|
|
|
|
2021-09-08 10:59:35 +08:00
|
|
|
a->orig_cpus = bitmap_zalloc(cpu_set_size * 8);
|
2019-11-21 08:15:12 +08:00
|
|
|
if (!a->orig_cpus)
|
|
|
|
return -1;
|
|
|
|
sched_getaffinity(0, cpu_set_size, (cpu_set_t *)a->orig_cpus);
|
2021-09-08 10:59:35 +08:00
|
|
|
a->sched_cpus = bitmap_zalloc(cpu_set_size * 8);
|
2019-11-21 08:15:12 +08:00
|
|
|
if (!a->sched_cpus) {
|
|
|
|
zfree(&a->orig_cpus);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
bitmap_zero((unsigned long *)a->sched_cpus, cpu_set_size);
|
|
|
|
a->changed = false;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* perf_event_open does an IPI internally to the target CPU.
|
|
|
|
* It is more efficient to change perf's affinity to the target
|
|
|
|
* CPU and then set up all events on that CPU, so we amortize
|
|
|
|
* CPU communication.
|
|
|
|
*/
|
|
|
|
void affinity__set(struct affinity *a, int cpu)
|
|
|
|
{
|
|
|
|
int cpu_set_size = get_cpu_set_size();
|
|
|
|
|
2022-09-05 22:19:28 +08:00
|
|
|
/*
|
|
|
|
* Return:
|
|
|
|
* - if cpu is -1
|
|
|
|
* - restrict out of bound access to sched_cpus
|
|
|
|
*/
|
|
|
|
if (cpu == -1 || ((cpu >= (cpu_set_size * 8))))
|
2019-11-21 08:15:12 +08:00
|
|
|
return;
|
2022-09-05 22:19:28 +08:00
|
|
|
|
2019-11-21 08:15:12 +08:00
|
|
|
a->changed = true;
|
2022-11-19 09:34:46 +08:00
|
|
|
__set_bit(cpu, a->sched_cpus);
|
2019-11-21 08:15:12 +08:00
|
|
|
/*
|
|
|
|
* We ignore errors because affinity is just an optimization.
|
|
|
|
* This could happen for example with isolated CPUs or cpusets.
|
|
|
|
* In this case the IPIs inside the kernel's perf API still work.
|
|
|
|
*/
|
|
|
|
sched_setaffinity(0, cpu_set_size, (cpu_set_t *)a->sched_cpus);
|
2022-11-19 09:34:46 +08:00
|
|
|
__clear_bit(cpu, a->sched_cpus);
|
2019-11-21 08:15:12 +08:00
|
|
|
}
|
|
|
|
|
2022-01-18 00:09:28 +08:00
|
|
|
static void __affinity__cleanup(struct affinity *a)
|
2019-11-21 08:15:12 +08:00
|
|
|
{
|
|
|
|
int cpu_set_size = get_cpu_set_size();
|
|
|
|
|
|
|
|
if (a->changed)
|
|
|
|
sched_setaffinity(0, cpu_set_size, (cpu_set_t *)a->orig_cpus);
|
|
|
|
zfree(&a->sched_cpus);
|
|
|
|
zfree(&a->orig_cpus);
|
|
|
|
}
|
2022-01-18 00:09:28 +08:00
|
|
|
|
|
|
|
void affinity__cleanup(struct affinity *a)
|
|
|
|
{
|
|
|
|
if (a != NULL)
|
|
|
|
__affinity__cleanup(a);
|
|
|
|
}
|