bpf: add a test case for helper bpf_perf_prog_read_value

The bpf sample program trace_event is enhanced to use the new
helper to print out enabled/running time.

Signed-off-by: Yonghong Song <yhs@fb.com>
Acked-by: Alexei Starovoitov <ast@fb.com>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Yonghong Song 2017-10-05 09:19:23 -07:00 committed by David S. Miller
parent 4bebdc7a85
commit 81b9cf8028
4 changed files with 23 additions and 6 deletions

View File

@ -37,10 +37,14 @@ struct bpf_map_def SEC("maps") stackmap = {
SEC("perf_event") SEC("perf_event")
int bpf_prog1(struct bpf_perf_event_data *ctx) int bpf_prog1(struct bpf_perf_event_data *ctx)
{ {
char time_fmt1[] = "Time Enabled: %llu, Time Running: %llu";
char time_fmt2[] = "Get Time Failed, ErrCode: %d";
char fmt[] = "CPU-%d period %lld ip %llx"; char fmt[] = "CPU-%d period %lld ip %llx";
u32 cpu = bpf_get_smp_processor_id(); u32 cpu = bpf_get_smp_processor_id();
struct bpf_perf_event_value value_buf;
struct key_t key; struct key_t key;
u64 *val, one = 1; u64 *val, one = 1;
int ret;
if (ctx->sample_period < 10000) if (ctx->sample_period < 10000)
/* ignore warmup */ /* ignore warmup */
@ -54,6 +58,12 @@ int bpf_prog1(struct bpf_perf_event_data *ctx)
return 0; return 0;
} }
ret = bpf_perf_prog_read_value(ctx, (void *)&value_buf, sizeof(struct bpf_perf_event_value));
if (!ret)
bpf_trace_printk(time_fmt1, sizeof(time_fmt1), value_buf.enabled, value_buf.running);
else
bpf_trace_printk(time_fmt2, sizeof(time_fmt2), ret);
val = bpf_map_lookup_elem(&counts, &key); val = bpf_map_lookup_elem(&counts, &key);
if (val) if (val)
(*val)++; (*val)++;

View File

@ -127,6 +127,9 @@ static void test_perf_event_all_cpu(struct perf_event_attr *attr)
int *pmu_fd = malloc(nr_cpus * sizeof(int)); int *pmu_fd = malloc(nr_cpus * sizeof(int));
int i, error = 0; int i, error = 0;
/* system wide perf event, no need to inherit */
attr->inherit = 0;
/* open perf_event on all cpus */ /* open perf_event on all cpus */
for (i = 0; i < nr_cpus; i++) { for (i = 0; i < nr_cpus; i++) {
pmu_fd[i] = sys_perf_event_open(attr, -1, i, -1, 0); pmu_fd[i] = sys_perf_event_open(attr, -1, i, -1, 0);
@ -154,6 +157,11 @@ static void test_perf_event_task(struct perf_event_attr *attr)
{ {
int pmu_fd; int pmu_fd;
/* per task perf event, enable inherit so the "dd ..." command can be traced properly.
* Enabling inherit will cause bpf_perf_prog_read_time helper failure.
*/
attr->inherit = 1;
/* open task bound event */ /* open task bound event */
pmu_fd = sys_perf_event_open(attr, 0, -1, -1, 0); pmu_fd = sys_perf_event_open(attr, 0, -1, -1, 0);
if (pmu_fd < 0) { if (pmu_fd < 0) {
@ -175,14 +183,12 @@ static void test_bpf_perf_event(void)
.freq = 1, .freq = 1,
.type = PERF_TYPE_HARDWARE, .type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_CPU_CYCLES, .config = PERF_COUNT_HW_CPU_CYCLES,
.inherit = 1,
}; };
struct perf_event_attr attr_type_sw = { struct perf_event_attr attr_type_sw = {
.sample_freq = SAMPLE_FREQ, .sample_freq = SAMPLE_FREQ,
.freq = 1, .freq = 1,
.type = PERF_TYPE_SOFTWARE, .type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_CPU_CLOCK, .config = PERF_COUNT_SW_CPU_CLOCK,
.inherit = 1,
}; };
struct perf_event_attr attr_hw_cache_l1d = { struct perf_event_attr attr_hw_cache_l1d = {
.sample_freq = SAMPLE_FREQ, .sample_freq = SAMPLE_FREQ,
@ -192,7 +198,6 @@ static void test_bpf_perf_event(void)
PERF_COUNT_HW_CACHE_L1D | PERF_COUNT_HW_CACHE_L1D |
(PERF_COUNT_HW_CACHE_OP_READ << 8) | (PERF_COUNT_HW_CACHE_OP_READ << 8) |
(PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16), (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
.inherit = 1,
}; };
struct perf_event_attr attr_hw_cache_branch_miss = { struct perf_event_attr attr_hw_cache_branch_miss = {
.sample_freq = SAMPLE_FREQ, .sample_freq = SAMPLE_FREQ,
@ -202,7 +207,6 @@ static void test_bpf_perf_event(void)
PERF_COUNT_HW_CACHE_BPU | PERF_COUNT_HW_CACHE_BPU |
(PERF_COUNT_HW_CACHE_OP_READ << 8) | (PERF_COUNT_HW_CACHE_OP_READ << 8) |
(PERF_COUNT_HW_CACHE_RESULT_MISS << 16), (PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
.inherit = 1,
}; };
struct perf_event_attr attr_type_raw = { struct perf_event_attr attr_type_raw = {
.sample_freq = SAMPLE_FREQ, .sample_freq = SAMPLE_FREQ,
@ -210,7 +214,6 @@ static void test_bpf_perf_event(void)
.type = PERF_TYPE_RAW, .type = PERF_TYPE_RAW,
/* Intel Instruction Retired */ /* Intel Instruction Retired */
.config = 0xc0, .config = 0xc0,
.inherit = 1,
}; };
printf("Test HW_CPU_CYCLES\n"); printf("Test HW_CPU_CYCLES\n");

View File

@ -698,7 +698,8 @@ union bpf_attr {
FN(sk_redirect_map), \ FN(sk_redirect_map), \
FN(sock_map_update), \ FN(sock_map_update), \
FN(xdp_adjust_meta), \ FN(xdp_adjust_meta), \
FN(perf_event_read_value), FN(perf_event_read_value), \
FN(perf_prog_read_value),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper /* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call * function eBPF program intends to call

View File

@ -75,6 +75,9 @@ static int (*bpf_sock_map_update)(void *map, void *key, void *value,
static int (*bpf_perf_event_read_value)(void *map, unsigned long long flags, static int (*bpf_perf_event_read_value)(void *map, unsigned long long flags,
void *buf, unsigned int buf_size) = void *buf, unsigned int buf_size) =
(void *) BPF_FUNC_perf_event_read_value; (void *) BPF_FUNC_perf_event_read_value;
static int (*bpf_perf_prog_read_value)(void *ctx, void *buf,
unsigned int buf_size) =
(void *) BPF_FUNC_perf_prog_read_value;
/* llvm builtin functions that eBPF C program may use to /* llvm builtin functions that eBPF C program may use to