bpf, trace: fetch current cpu only once
We currently have two invocations, which is unnecessary. Fetch it only once and use the smp_processor_id() variant, so we also get preemption checks along with it when DEBUG_PREEMPT is set. Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
1ca1cc98bf
commit
d793133031
|
@ -233,6 +233,7 @@ static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
|
|||
struct pt_regs *regs = (struct pt_regs *) (long) r1;
|
||||
struct bpf_map *map = (struct bpf_map *) (long) r2;
|
||||
struct bpf_array *array = container_of(map, struct bpf_array, map);
|
||||
unsigned int cpu = smp_processor_id();
|
||||
u64 index = flags & BPF_F_INDEX_MASK;
|
||||
void *data = (void *) (long) r4;
|
||||
struct perf_sample_data sample_data;
|
||||
|
@ -246,7 +247,7 @@ static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
|
|||
if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
|
||||
return -EINVAL;
|
||||
if (index == BPF_F_CURRENT_CPU)
|
||||
index = raw_smp_processor_id();
|
||||
index = cpu;
|
||||
if (unlikely(index >= array->map.max_entries))
|
||||
return -E2BIG;
|
||||
|
||||
|
@ -259,7 +260,7 @@ static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
|
|||
event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
|
||||
return -EINVAL;
|
||||
|
||||
if (unlikely(event->oncpu != smp_processor_id()))
|
||||
if (unlikely(event->oncpu != cpu))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
perf_sample_data_init(&sample_data, 0, 0);
|
||||
|
|
Loading…
Reference in New Issue