bpf: Introduce helper bpf_get_branch_snapshot
Introduce bpf_get_branch_snapshot(), which allows tracing pogram to get branch trace from hardware (e.g. Intel LBR). To use the feature, the user need to create perf_event with proper branch_record filtering on each cpu, and then calls bpf_get_branch_snapshot in the bpf function. On Intel CPUs, VLBR event (raw event 0x1b00) can be use for this. Signed-off-by: Song Liu <songliubraving@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: John Fastabend <john.fastabend@gmail.com> Acked-by: Andrii Nakryiko <andrii@kernel.org> Link: https://lore.kernel.org/bpf/20210910183352.3151445-3-songliubraving@fb.com
This commit is contained in:
parent
c22ac2a3d4
commit
856c02dbce
|
@ -4877,6 +4877,27 @@ union bpf_attr {
|
|||
* Get the struct pt_regs associated with **task**.
|
||||
* Return
|
||||
* A pointer to struct pt_regs.
|
||||
*
|
||||
* long bpf_get_branch_snapshot(void *entries, u32 size, u64 flags)
|
||||
* Description
|
||||
* Get branch trace from hardware engines like Intel LBR. The
|
||||
* hardware engine is stopped shortly after the helper is
|
||||
* called. Therefore, the user need to filter branch entries
|
||||
* based on the actual use case. To capture branch trace
|
||||
* before the trigger point of the BPF program, the helper
|
||||
* should be called at the beginning of the BPF program.
|
||||
*
|
||||
* The data is stored as struct perf_branch_entry into output
|
||||
* buffer *entries*. *size* is the size of *entries* in bytes.
|
||||
* *flags* is reserved for now and must be zero.
|
||||
*
|
||||
* Return
|
||||
* On success, number of bytes written to *buf*. On error, a
|
||||
* negative value.
|
||||
*
|
||||
* **-EINVAL** if *flags* is not zero.
|
||||
*
|
||||
* **-ENOENT** if architecture does not support branch records.
|
||||
*/
|
||||
#define __BPF_FUNC_MAPPER(FN) \
|
||||
FN(unspec), \
|
||||
|
@ -5055,6 +5076,7 @@ union bpf_attr {
|
|||
FN(get_func_ip), \
|
||||
FN(get_attach_cookie), \
|
||||
FN(task_pt_regs), \
|
||||
FN(get_branch_snapshot), \
|
||||
/* */
|
||||
|
||||
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <linux/rcupdate_trace.h>
|
||||
#include <linux/rcupdate_wait.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/static_call.h>
|
||||
|
||||
/* dummy _ops. The verifier will operate on target program's ops. */
|
||||
const struct bpf_verifier_ops bpf_extension_verifier_ops = {
|
||||
|
@ -526,7 +527,7 @@ out:
|
|||
}
|
||||
|
||||
#define NO_START_TIME 1
|
||||
static u64 notrace bpf_prog_start_time(void)
|
||||
static __always_inline u64 notrace bpf_prog_start_time(void)
|
||||
{
|
||||
u64 start = NO_START_TIME;
|
||||
|
||||
|
|
|
@ -1017,6 +1017,34 @@ static const struct bpf_func_proto bpf_get_attach_cookie_proto_pe = {
|
|||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
};
|
||||
|
||||
BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, size, u64, flags)
|
||||
{
|
||||
#ifndef CONFIG_X86
|
||||
return -ENOENT;
|
||||
#else
|
||||
static const u32 br_entry_size = sizeof(struct perf_branch_entry);
|
||||
u32 entry_cnt = size / br_entry_size;
|
||||
|
||||
entry_cnt = static_call(perf_snapshot_branch_stack)(buf, entry_cnt);
|
||||
|
||||
if (unlikely(flags))
|
||||
return -EINVAL;
|
||||
|
||||
if (!entry_cnt)
|
||||
return -ENOENT;
|
||||
|
||||
return entry_cnt * br_entry_size;
|
||||
#endif
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_get_branch_snapshot_proto = {
|
||||
.func = bpf_get_branch_snapshot,
|
||||
.gpl_only = true,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_UNINIT_MEM,
|
||||
.arg2_type = ARG_CONST_SIZE_OR_ZERO,
|
||||
};
|
||||
|
||||
static const struct bpf_func_proto *
|
||||
bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
{
|
||||
|
@ -1132,6 +1160,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
|||
return &bpf_snprintf_proto;
|
||||
case BPF_FUNC_get_func_ip:
|
||||
return &bpf_get_func_ip_proto_tracing;
|
||||
case BPF_FUNC_get_branch_snapshot:
|
||||
return &bpf_get_branch_snapshot_proto;
|
||||
default:
|
||||
return bpf_base_func_proto(func_id);
|
||||
}
|
||||
|
|
|
@ -4877,6 +4877,27 @@ union bpf_attr {
|
|||
* Get the struct pt_regs associated with **task**.
|
||||
* Return
|
||||
* A pointer to struct pt_regs.
|
||||
*
|
||||
* long bpf_get_branch_snapshot(void *entries, u32 size, u64 flags)
|
||||
* Description
|
||||
* Get branch trace from hardware engines like Intel LBR. The
|
||||
* hardware engine is stopped shortly after the helper is
|
||||
* called. Therefore, the user need to filter branch entries
|
||||
* based on the actual use case. To capture branch trace
|
||||
* before the trigger point of the BPF program, the helper
|
||||
* should be called at the beginning of the BPF program.
|
||||
*
|
||||
* The data is stored as struct perf_branch_entry into output
|
||||
* buffer *entries*. *size* is the size of *entries* in bytes.
|
||||
* *flags* is reserved for now and must be zero.
|
||||
*
|
||||
* Return
|
||||
* On success, number of bytes written to *buf*. On error, a
|
||||
* negative value.
|
||||
*
|
||||
* **-EINVAL** if *flags* is not zero.
|
||||
*
|
||||
* **-ENOENT** if architecture does not support branch records.
|
||||
*/
|
||||
#define __BPF_FUNC_MAPPER(FN) \
|
||||
FN(unspec), \
|
||||
|
@ -5055,6 +5076,7 @@ union bpf_attr {
|
|||
FN(get_func_ip), \
|
||||
FN(get_attach_cookie), \
|
||||
FN(task_pt_regs), \
|
||||
FN(get_branch_snapshot), \
|
||||
/* */
|
||||
|
||||
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
|
||||
|
|
Loading…
Reference in New Issue