2019-06-01 16:08:47 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2012-04-26 13:15:22 +08:00
|
|
|
/*
|
|
|
|
* Helper functions for handling target threads/cpus
|
|
|
|
*
|
|
|
|
* Copyright (C) 2012, LG Electronics, Namhyung Kim <namhyung.kim@lge.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "target.h"
|
|
|
|
|
2012-05-07 13:09:01 +08:00
|
|
|
#include <pwd.h>
|
2019-08-30 03:18:59 +08:00
|
|
|
#include <stdio.h>
|
2019-07-04 22:21:24 +08:00
|
|
|
#include <stdlib.h>
|
2012-05-07 13:09:02 +08:00
|
|
|
#include <string.h>
|
2019-08-30 03:18:59 +08:00
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/string.h>
|
2012-05-07 13:09:01 +08:00
|
|
|
|
2013-11-13 03:46:16 +08:00
|
|
|
enum target_errno target__validate(struct target *target)
|
2012-04-26 13:15:22 +08:00
|
|
|
{
|
2013-11-13 03:46:16 +08:00
|
|
|
enum target_errno ret = TARGET_ERRNO__SUCCESS;
|
2012-05-07 13:09:00 +08:00
|
|
|
|
2012-04-26 13:15:22 +08:00
|
|
|
if (target->pid)
|
|
|
|
target->tid = target->pid;
|
|
|
|
|
|
|
|
/* CPU and PID are mutually exclusive */
|
|
|
|
if (target->tid && target->cpu_list) {
|
|
|
|
target->cpu_list = NULL;
|
2013-11-13 03:46:16 +08:00
|
|
|
if (ret == TARGET_ERRNO__SUCCESS)
|
|
|
|
ret = TARGET_ERRNO__PID_OVERRIDE_CPU;
|
2012-04-26 13:15:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* UID and PID are mutually exclusive */
|
|
|
|
if (target->tid && target->uid_str) {
|
|
|
|
target->uid_str = NULL;
|
2013-11-13 03:46:16 +08:00
|
|
|
if (ret == TARGET_ERRNO__SUCCESS)
|
|
|
|
ret = TARGET_ERRNO__PID_OVERRIDE_UID;
|
2012-04-26 13:15:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* UID and CPU are mutually exclusive */
|
|
|
|
if (target->uid_str && target->cpu_list) {
|
|
|
|
target->cpu_list = NULL;
|
2013-11-13 03:46:16 +08:00
|
|
|
if (ret == TARGET_ERRNO__SUCCESS)
|
|
|
|
ret = TARGET_ERRNO__UID_OVERRIDE_CPU;
|
2012-04-26 13:15:22 +08:00
|
|
|
}
|
|
|
|
|
2012-05-07 13:09:00 +08:00
|
|
|
/* PID and SYSTEM are mutually exclusive */
|
|
|
|
if (target->tid && target->system_wide) {
|
2012-04-26 13:15:22 +08:00
|
|
|
target->system_wide = false;
|
2013-11-13 03:46:16 +08:00
|
|
|
if (ret == TARGET_ERRNO__SUCCESS)
|
|
|
|
ret = TARGET_ERRNO__PID_OVERRIDE_SYSTEM;
|
2012-04-26 13:15:22 +08:00
|
|
|
}
|
2012-05-07 13:09:00 +08:00
|
|
|
|
|
|
|
/* UID and SYSTEM are mutually exclusive */
|
|
|
|
if (target->uid_str && target->system_wide) {
|
|
|
|
target->system_wide = false;
|
2013-11-13 03:46:16 +08:00
|
|
|
if (ret == TARGET_ERRNO__SUCCESS)
|
|
|
|
ret = TARGET_ERRNO__UID_OVERRIDE_SYSTEM;
|
2012-05-07 13:09:00 +08:00
|
|
|
}
|
|
|
|
|
perf stat: Enable counting events for BPF programs
Introduce 'perf stat -b' option, which counts events for BPF programs, like:
[root@localhost ~]# ~/perf stat -e ref-cycles,cycles -b 254 -I 1000
1.487903822 115,200 ref-cycles
1.487903822 86,012 cycles
2.489147029 80,560 ref-cycles
2.489147029 73,784 cycles
3.490341825 60,720 ref-cycles
3.490341825 37,797 cycles
4.491540887 37,120 ref-cycles
4.491540887 31,963 cycles
The example above counts 'cycles' and 'ref-cycles' of BPF program of id
254. This is similar to bpftool-prog-profile command, but more
flexible.
'perf stat -b' creates per-cpu perf_event and loads fentry/fexit BPF
programs (monitor-progs) to the target BPF program (target-prog). The
monitor-progs read perf_event before and after the target-prog, and
aggregate the difference in a BPF map. Then the user space reads data
from these maps.
A new 'struct bpf_counter' is introduced to provide a common interface
that uses BPF programs/maps to count perf events.
Committer notes:
Removed all but bpf_counter.h includes from evsel.h, not needed at all.
Also BPF map lookups for PERCPU_ARRAYs need to have as its value receive
buffer passed to the kernel libbpf_num_possible_cpus() entries, not
evsel__nr_cpus(evsel), as the former uses
/sys/devices/system/cpu/possible while the later uses
/sys/devices/system/cpu/online, which may be less than the 'possible'
number making the bpf map lookup overwrite memory and cause hard to
debug memory corruption.
We need to continue using evsel__nr_cpus(evsel) when accessing the
perf_counts array tho, not to overwrite another are of memory :-)
Signed-off-by: Song Liu <songliubraving@fb.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Link: https://lore.kernel.org/lkml/20210120163031.GU12699@kernel.org/
Acked-by: Namhyung Kim <namhyung@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: kernel-team@fb.com
Link: http://lore.kernel.org/lkml/20201229214214.3413833-4-songliubraving@fb.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-12-30 05:42:14 +08:00
|
|
|
/* BPF and CPU are mutually exclusive */
|
|
|
|
if (target->bpf_str && target->cpu_list) {
|
|
|
|
target->cpu_list = NULL;
|
|
|
|
if (ret == TARGET_ERRNO__SUCCESS)
|
|
|
|
ret = TARGET_ERRNO__BPF_OVERRIDE_CPU;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* BPF and PID/TID are mutually exclusive */
|
|
|
|
if (target->bpf_str && target->tid) {
|
|
|
|
target->tid = NULL;
|
|
|
|
if (ret == TARGET_ERRNO__SUCCESS)
|
|
|
|
ret = TARGET_ERRNO__BPF_OVERRIDE_PID;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* BPF and UID are mutually exclusive */
|
|
|
|
if (target->bpf_str && target->uid_str) {
|
|
|
|
target->uid_str = NULL;
|
|
|
|
if (ret == TARGET_ERRNO__SUCCESS)
|
|
|
|
ret = TARGET_ERRNO__BPF_OVERRIDE_UID;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* BPF and THREADS are mutually exclusive */
|
|
|
|
if (target->bpf_str && target->per_thread) {
|
|
|
|
target->per_thread = false;
|
|
|
|
if (ret == TARGET_ERRNO__SUCCESS)
|
|
|
|
ret = TARGET_ERRNO__BPF_OVERRIDE_THREAD;
|
|
|
|
}
|
|
|
|
|
2013-11-15 21:52:29 +08:00
|
|
|
/* THREAD and SYSTEM/CPU are mutually exclusive */
|
|
|
|
if (target->per_thread && (target->system_wide || target->cpu_list)) {
|
|
|
|
target->per_thread = false;
|
|
|
|
if (ret == TARGET_ERRNO__SUCCESS)
|
|
|
|
ret = TARGET_ERRNO__SYSTEM_OVERRIDE_THREAD;
|
|
|
|
}
|
|
|
|
|
2012-05-07 13:09:00 +08:00
|
|
|
return ret;
|
2012-04-26 13:15:22 +08:00
|
|
|
}
|
2012-05-07 13:09:01 +08:00
|
|
|
|
2013-11-13 03:46:16 +08:00
|
|
|
enum target_errno target__parse_uid(struct target *target)
|
2012-05-07 13:09:01 +08:00
|
|
|
{
|
|
|
|
struct passwd pwd, *result;
|
|
|
|
char buf[1024];
|
|
|
|
const char *str = target->uid_str;
|
|
|
|
|
|
|
|
target->uid = UINT_MAX;
|
|
|
|
if (str == NULL)
|
2013-11-13 03:46:16 +08:00
|
|
|
return TARGET_ERRNO__SUCCESS;
|
2012-05-07 13:09:01 +08:00
|
|
|
|
|
|
|
/* Try user name first */
|
|
|
|
getpwnam_r(str, &pwd, buf, sizeof(buf), &result);
|
|
|
|
|
|
|
|
if (result == NULL) {
|
|
|
|
/*
|
|
|
|
* The user name not found. Maybe it's a UID number.
|
|
|
|
*/
|
|
|
|
char *endptr;
|
|
|
|
int uid = strtol(str, &endptr, 10);
|
|
|
|
|
|
|
|
if (*endptr != '\0')
|
2013-11-13 03:46:16 +08:00
|
|
|
return TARGET_ERRNO__INVALID_UID;
|
2012-05-07 13:09:01 +08:00
|
|
|
|
|
|
|
getpwuid_r(uid, &pwd, buf, sizeof(buf), &result);
|
|
|
|
|
|
|
|
if (result == NULL)
|
2013-11-13 03:46:16 +08:00
|
|
|
return TARGET_ERRNO__USER_NOT_FOUND;
|
2012-05-07 13:09:01 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
target->uid = result->pw_uid;
|
2013-11-13 03:46:16 +08:00
|
|
|
return TARGET_ERRNO__SUCCESS;
|
2012-05-07 13:09:01 +08:00
|
|
|
}
|
2012-05-07 13:09:02 +08:00
|
|
|
|
|
|
|
/*
|
2013-11-13 03:46:16 +08:00
|
|
|
* This must have a same ordering as the enum target_errno.
|
2012-05-07 13:09:02 +08:00
|
|
|
*/
|
2013-11-13 03:46:16 +08:00
|
|
|
static const char *target__error_str[] = {
|
2012-05-07 13:09:02 +08:00
|
|
|
"PID/TID switch overriding CPU",
|
|
|
|
"PID/TID switch overriding UID",
|
|
|
|
"UID switch overriding CPU",
|
|
|
|
"PID/TID switch overriding SYSTEM",
|
|
|
|
"UID switch overriding SYSTEM",
|
2013-11-15 21:52:29 +08:00
|
|
|
"SYSTEM/CPU switch overriding PER-THREAD",
|
perf stat: Enable counting events for BPF programs
Introduce 'perf stat -b' option, which counts events for BPF programs, like:
[root@localhost ~]# ~/perf stat -e ref-cycles,cycles -b 254 -I 1000
1.487903822 115,200 ref-cycles
1.487903822 86,012 cycles
2.489147029 80,560 ref-cycles
2.489147029 73,784 cycles
3.490341825 60,720 ref-cycles
3.490341825 37,797 cycles
4.491540887 37,120 ref-cycles
4.491540887 31,963 cycles
The example above counts 'cycles' and 'ref-cycles' of BPF program of id
254. This is similar to bpftool-prog-profile command, but more
flexible.
'perf stat -b' creates per-cpu perf_event and loads fentry/fexit BPF
programs (monitor-progs) to the target BPF program (target-prog). The
monitor-progs read perf_event before and after the target-prog, and
aggregate the difference in a BPF map. Then the user space reads data
from these maps.
A new 'struct bpf_counter' is introduced to provide a common interface
that uses BPF programs/maps to count perf events.
Committer notes:
Removed all but bpf_counter.h includes from evsel.h, not needed at all.
Also BPF map lookups for PERCPU_ARRAYs need to have as its value receive
buffer passed to the kernel libbpf_num_possible_cpus() entries, not
evsel__nr_cpus(evsel), as the former uses
/sys/devices/system/cpu/possible while the later uses
/sys/devices/system/cpu/online, which may be less than the 'possible'
number making the bpf map lookup overwrite memory and cause hard to
debug memory corruption.
We need to continue using evsel__nr_cpus(evsel) when accessing the
perf_counts array tho, not to overwrite another are of memory :-)
Signed-off-by: Song Liu <songliubraving@fb.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Link: https://lore.kernel.org/lkml/20210120163031.GU12699@kernel.org/
Acked-by: Namhyung Kim <namhyung@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: kernel-team@fb.com
Link: http://lore.kernel.org/lkml/20201229214214.3413833-4-songliubraving@fb.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-12-30 05:42:14 +08:00
|
|
|
"BPF switch overriding CPU",
|
|
|
|
"BPF switch overriding PID/TID",
|
|
|
|
"BPF switch overriding UID",
|
|
|
|
"BPF switch overriding THREAD",
|
2012-05-07 13:09:02 +08:00
|
|
|
"Invalid User: %s",
|
|
|
|
"Problems obtaining information for user %s",
|
|
|
|
};
|
|
|
|
|
2013-11-13 03:46:16 +08:00
|
|
|
int target__strerror(struct target *target, int errnum,
|
2012-05-07 13:09:02 +08:00
|
|
|
char *buf, size_t buflen)
|
|
|
|
{
|
|
|
|
int idx;
|
|
|
|
const char *msg;
|
|
|
|
|
2012-07-26 09:50:10 +08:00
|
|
|
BUG_ON(buflen == 0);
|
2012-07-24 05:06:54 +08:00
|
|
|
|
2012-05-07 13:09:02 +08:00
|
|
|
if (errnum >= 0) {
|
2016-07-29 22:54:35 +08:00
|
|
|
str_error_r(errnum, buf, buflen);
|
2012-05-07 13:09:02 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-11-13 03:46:16 +08:00
|
|
|
if (errnum < __TARGET_ERRNO__START || errnum >= __TARGET_ERRNO__END)
|
2012-05-07 13:09:02 +08:00
|
|
|
return -1;
|
|
|
|
|
2013-11-13 03:46:16 +08:00
|
|
|
idx = errnum - __TARGET_ERRNO__START;
|
|
|
|
msg = target__error_str[idx];
|
2012-05-07 13:09:02 +08:00
|
|
|
|
|
|
|
switch (errnum) {
|
2013-11-15 21:52:29 +08:00
|
|
|
case TARGET_ERRNO__PID_OVERRIDE_CPU ...
|
perf stat: Enable counting events for BPF programs
Introduce 'perf stat -b' option, which counts events for BPF programs, like:
[root@localhost ~]# ~/perf stat -e ref-cycles,cycles -b 254 -I 1000
1.487903822 115,200 ref-cycles
1.487903822 86,012 cycles
2.489147029 80,560 ref-cycles
2.489147029 73,784 cycles
3.490341825 60,720 ref-cycles
3.490341825 37,797 cycles
4.491540887 37,120 ref-cycles
4.491540887 31,963 cycles
The example above counts 'cycles' and 'ref-cycles' of BPF program of id
254. This is similar to bpftool-prog-profile command, but more
flexible.
'perf stat -b' creates per-cpu perf_event and loads fentry/fexit BPF
programs (monitor-progs) to the target BPF program (target-prog). The
monitor-progs read perf_event before and after the target-prog, and
aggregate the difference in a BPF map. Then the user space reads data
from these maps.
A new 'struct bpf_counter' is introduced to provide a common interface
that uses BPF programs/maps to count perf events.
Committer notes:
Removed all but bpf_counter.h includes from evsel.h, not needed at all.
Also BPF map lookups for PERCPU_ARRAYs need to have as its value receive
buffer passed to the kernel libbpf_num_possible_cpus() entries, not
evsel__nr_cpus(evsel), as the former uses
/sys/devices/system/cpu/possible while the later uses
/sys/devices/system/cpu/online, which may be less than the 'possible'
number making the bpf map lookup overwrite memory and cause hard to
debug memory corruption.
We need to continue using evsel__nr_cpus(evsel) when accessing the
perf_counts array tho, not to overwrite another are of memory :-)
Signed-off-by: Song Liu <songliubraving@fb.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Link: https://lore.kernel.org/lkml/20210120163031.GU12699@kernel.org/
Acked-by: Namhyung Kim <namhyung@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: kernel-team@fb.com
Link: http://lore.kernel.org/lkml/20201229214214.3413833-4-songliubraving@fb.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-12-30 05:42:14 +08:00
|
|
|
TARGET_ERRNO__BPF_OVERRIDE_THREAD:
|
2012-05-07 13:09:02 +08:00
|
|
|
snprintf(buf, buflen, "%s", msg);
|
|
|
|
break;
|
|
|
|
|
2013-11-13 03:46:16 +08:00
|
|
|
case TARGET_ERRNO__INVALID_UID:
|
|
|
|
case TARGET_ERRNO__USER_NOT_FOUND:
|
2012-05-07 13:09:02 +08:00
|
|
|
snprintf(buf, buflen, msg, target->uid_str);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
/* cannot reach here */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|