Merge branch 'Add bpf_get_func_ip helper'
Jiri Olsa says: ==================== Add bpf_get_func_ip helper that returns IP address of the caller function for trampoline and krobe programs. There're 2 specific implementation of the bpf_get_func_ip helper, one for trampoline progs and one for kprobe/kretprobe progs. The trampoline helper call is replaced/inlined by the verifier with simple move instruction. The kprobe/kretprobe is actual helper call that returns prepared caller address. Also available at: https://git.kernel.org/pub/scm/linux/kernel/git/jolsa/perf.git bpf/get_func_ip v4 changes: - dropped jit/x86 check for get_func_ip tracing check [Alexei] - added code to bpf_get_func_ip_tracing [Alexei] and tested that it works without inlining [Alexei] - changed has_get_func_ip to check_get_func_ip [Andrii] - replaced test assert loop with explicit asserts [Andrii] - adde bpf_program__attach_kprobe_opts function and use it for offset setup [Andrii] - used bpf_program__set_autoload(false) for test6 [Andrii] - added Masami's ack v3 changes: - resend with Masami in cc and v3 in each patch subject v2 changes: - use kprobe_running to get kprobe instead of cpu var [Masami] - added support to add kprobe on function+offset and test for that [Alan] ==================== Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
commit
1554a080e7
|
@ -1951,6 +1951,9 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
|
||||||
if (flags & BPF_TRAMP_F_CALL_ORIG)
|
if (flags & BPF_TRAMP_F_CALL_ORIG)
|
||||||
stack_size += 8; /* room for return value of orig_call */
|
stack_size += 8; /* room for return value of orig_call */
|
||||||
|
|
||||||
|
if (flags & BPF_TRAMP_F_IP_ARG)
|
||||||
|
stack_size += 8; /* room for IP address argument */
|
||||||
|
|
||||||
if (flags & BPF_TRAMP_F_SKIP_FRAME)
|
if (flags & BPF_TRAMP_F_SKIP_FRAME)
|
||||||
/* skip patched call instruction and point orig_call to actual
|
/* skip patched call instruction and point orig_call to actual
|
||||||
* body of the kernel function.
|
* body of the kernel function.
|
||||||
|
@ -1964,6 +1967,22 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
|
||||||
EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */
|
EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */
|
||||||
EMIT1(0x53); /* push rbx */
|
EMIT1(0x53); /* push rbx */
|
||||||
|
|
||||||
|
if (flags & BPF_TRAMP_F_IP_ARG) {
|
||||||
|
/* Store IP address of the traced function:
|
||||||
|
* mov rax, QWORD PTR [rbp + 8]
|
||||||
|
* sub rax, X86_PATCH_SIZE
|
||||||
|
* mov QWORD PTR [rbp - stack_size], rax
|
||||||
|
*/
|
||||||
|
emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8);
|
||||||
|
EMIT4(0x48, 0x83, 0xe8, X86_PATCH_SIZE);
|
||||||
|
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -stack_size);
|
||||||
|
|
||||||
|
/* Continue with stack_size for regs storage, stack will
|
||||||
|
* be correctly restored with 'leave' instruction.
|
||||||
|
*/
|
||||||
|
stack_size -= 8;
|
||||||
|
}
|
||||||
|
|
||||||
save_regs(m, &prog, nr_args, stack_size);
|
save_regs(m, &prog, nr_args, stack_size);
|
||||||
|
|
||||||
if (flags & BPF_TRAMP_F_CALL_ORIG) {
|
if (flags & BPF_TRAMP_F_CALL_ORIG) {
|
||||||
|
|
|
@ -579,6 +579,11 @@ struct btf_func_model {
|
||||||
*/
|
*/
|
||||||
#define BPF_TRAMP_F_SKIP_FRAME BIT(2)
|
#define BPF_TRAMP_F_SKIP_FRAME BIT(2)
|
||||||
|
|
||||||
|
/* Store IP address of the caller on the trampoline stack,
|
||||||
|
* so it's available for trampoline's programs.
|
||||||
|
*/
|
||||||
|
#define BPF_TRAMP_F_IP_ARG BIT(3)
|
||||||
|
|
||||||
/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
|
/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
|
||||||
* bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2
|
* bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -559,7 +559,8 @@ struct bpf_prog {
|
||||||
kprobe_override:1, /* Do we override a kprobe? */
|
kprobe_override:1, /* Do we override a kprobe? */
|
||||||
has_callchain_buf:1, /* callchain buffer allocated? */
|
has_callchain_buf:1, /* callchain buffer allocated? */
|
||||||
enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */
|
enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */
|
||||||
call_get_stack:1; /* Do we call bpf_get_stack() or bpf_get_stackid() */
|
call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */
|
||||||
|
call_get_func_ip:1; /* Do we call get_func_ip() */
|
||||||
enum bpf_prog_type type; /* Type of BPF program */
|
enum bpf_prog_type type; /* Type of BPF program */
|
||||||
enum bpf_attach_type expected_attach_type; /* For some prog types */
|
enum bpf_attach_type expected_attach_type; /* For some prog types */
|
||||||
u32 len; /* Number of filter blocks */
|
u32 len; /* Number of filter blocks */
|
||||||
|
|
|
@ -4841,6 +4841,12 @@ union bpf_attr {
|
||||||
* **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier.
|
* **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier.
|
||||||
* **-EDEADLK** if callback_fn tried to call bpf_timer_cancel() on its
|
* **-EDEADLK** if callback_fn tried to call bpf_timer_cancel() on its
|
||||||
* own timer which would have led to a deadlock otherwise.
|
* own timer which would have led to a deadlock otherwise.
|
||||||
|
*
|
||||||
|
* u64 bpf_get_func_ip(void *ctx)
|
||||||
|
* Description
|
||||||
|
* Get address of the traced function (for tracing and kprobe programs).
|
||||||
|
* Return
|
||||||
|
* Address of the traced function.
|
||||||
*/
|
*/
|
||||||
#define __BPF_FUNC_MAPPER(FN) \
|
#define __BPF_FUNC_MAPPER(FN) \
|
||||||
FN(unspec), \
|
FN(unspec), \
|
||||||
|
@ -5016,6 +5022,7 @@ union bpf_attr {
|
||||||
FN(timer_set_callback), \
|
FN(timer_set_callback), \
|
||||||
FN(timer_start), \
|
FN(timer_start), \
|
||||||
FN(timer_cancel), \
|
FN(timer_cancel), \
|
||||||
|
FN(get_func_ip), \
|
||||||
/* */
|
/* */
|
||||||
|
|
||||||
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
|
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
|
||||||
|
|
|
@ -172,7 +172,7 @@ static int register_fentry(struct bpf_trampoline *tr, void *new_addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct bpf_tramp_progs *
|
static struct bpf_tramp_progs *
|
||||||
bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total)
|
bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total, bool *ip_arg)
|
||||||
{
|
{
|
||||||
const struct bpf_prog_aux *aux;
|
const struct bpf_prog_aux *aux;
|
||||||
struct bpf_tramp_progs *tprogs;
|
struct bpf_tramp_progs *tprogs;
|
||||||
|
@ -189,8 +189,10 @@ bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total)
|
||||||
*total += tr->progs_cnt[kind];
|
*total += tr->progs_cnt[kind];
|
||||||
progs = tprogs[kind].progs;
|
progs = tprogs[kind].progs;
|
||||||
|
|
||||||
hlist_for_each_entry(aux, &tr->progs_hlist[kind], tramp_hlist)
|
hlist_for_each_entry(aux, &tr->progs_hlist[kind], tramp_hlist) {
|
||||||
|
*ip_arg |= aux->prog->call_get_func_ip;
|
||||||
*progs++ = aux->prog;
|
*progs++ = aux->prog;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return tprogs;
|
return tprogs;
|
||||||
}
|
}
|
||||||
|
@ -333,9 +335,10 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr)
|
||||||
struct bpf_tramp_image *im;
|
struct bpf_tramp_image *im;
|
||||||
struct bpf_tramp_progs *tprogs;
|
struct bpf_tramp_progs *tprogs;
|
||||||
u32 flags = BPF_TRAMP_F_RESTORE_REGS;
|
u32 flags = BPF_TRAMP_F_RESTORE_REGS;
|
||||||
|
bool ip_arg = false;
|
||||||
int err, total;
|
int err, total;
|
||||||
|
|
||||||
tprogs = bpf_trampoline_get_progs(tr, &total);
|
tprogs = bpf_trampoline_get_progs(tr, &total, &ip_arg);
|
||||||
if (IS_ERR(tprogs))
|
if (IS_ERR(tprogs))
|
||||||
return PTR_ERR(tprogs);
|
return PTR_ERR(tprogs);
|
||||||
|
|
||||||
|
@ -357,6 +360,9 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr)
|
||||||
tprogs[BPF_TRAMP_MODIFY_RETURN].nr_progs)
|
tprogs[BPF_TRAMP_MODIFY_RETURN].nr_progs)
|
||||||
flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME;
|
flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME;
|
||||||
|
|
||||||
|
if (ip_arg)
|
||||||
|
flags |= BPF_TRAMP_F_IP_ARG;
|
||||||
|
|
||||||
err = arch_prepare_bpf_trampoline(im, im->image, im->image + PAGE_SIZE,
|
err = arch_prepare_bpf_trampoline(im, im->image, im->image + PAGE_SIZE,
|
||||||
&tr->func.model, flags, tprogs,
|
&tr->func.model, flags, tprogs,
|
||||||
tr->func.addr);
|
tr->func.addr);
|
||||||
|
|
|
@ -6161,6 +6161,29 @@ static int check_bpf_snprintf_call(struct bpf_verifier_env *env,
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int check_get_func_ip(struct bpf_verifier_env *env)
|
||||||
|
{
|
||||||
|
enum bpf_attach_type eatype = env->prog->expected_attach_type;
|
||||||
|
enum bpf_prog_type type = resolve_prog_type(env->prog);
|
||||||
|
int func_id = BPF_FUNC_get_func_ip;
|
||||||
|
|
||||||
|
if (type == BPF_PROG_TYPE_TRACING) {
|
||||||
|
if (eatype != BPF_TRACE_FENTRY && eatype != BPF_TRACE_FEXIT &&
|
||||||
|
eatype != BPF_MODIFY_RETURN) {
|
||||||
|
verbose(env, "func %s#%d supported only for fentry/fexit/fmod_ret programs\n",
|
||||||
|
func_id_name(func_id), func_id);
|
||||||
|
return -ENOTSUPP;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
} else if (type == BPF_PROG_TYPE_KPROBE) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
verbose(env, "func %s#%d not supported for program type %d\n",
|
||||||
|
func_id_name(func_id), func_id, type);
|
||||||
|
return -ENOTSUPP;
|
||||||
|
}
|
||||||
|
|
||||||
static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
|
static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
|
||||||
int *insn_idx_p)
|
int *insn_idx_p)
|
||||||
{
|
{
|
||||||
|
@ -6439,6 +6462,12 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
|
||||||
if (func_id == BPF_FUNC_get_stackid || func_id == BPF_FUNC_get_stack)
|
if (func_id == BPF_FUNC_get_stackid || func_id == BPF_FUNC_get_stack)
|
||||||
env->prog->call_get_stack = true;
|
env->prog->call_get_stack = true;
|
||||||
|
|
||||||
|
if (func_id == BPF_FUNC_get_func_ip) {
|
||||||
|
if (check_get_func_ip(env))
|
||||||
|
return -ENOTSUPP;
|
||||||
|
env->prog->call_get_func_ip = true;
|
||||||
|
}
|
||||||
|
|
||||||
if (changes_data)
|
if (changes_data)
|
||||||
clear_all_pkt_pointers(env);
|
clear_all_pkt_pointers(env);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -12632,6 +12661,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
|
||||||
{
|
{
|
||||||
struct bpf_prog *prog = env->prog;
|
struct bpf_prog *prog = env->prog;
|
||||||
bool expect_blinding = bpf_jit_blinding_enabled(prog);
|
bool expect_blinding = bpf_jit_blinding_enabled(prog);
|
||||||
|
enum bpf_prog_type prog_type = resolve_prog_type(prog);
|
||||||
struct bpf_insn *insn = prog->insnsi;
|
struct bpf_insn *insn = prog->insnsi;
|
||||||
const struct bpf_func_proto *fn;
|
const struct bpf_func_proto *fn;
|
||||||
const int insn_cnt = prog->len;
|
const int insn_cnt = prog->len;
|
||||||
|
@ -12998,6 +13028,21 @@ patch_map_ops_generic:
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Implement bpf_get_func_ip inline. */
|
||||||
|
if (prog_type == BPF_PROG_TYPE_TRACING &&
|
||||||
|
insn->imm == BPF_FUNC_get_func_ip) {
|
||||||
|
/* Load IP address from ctx - 8 */
|
||||||
|
insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
|
||||||
|
|
||||||
|
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1);
|
||||||
|
if (!new_prog)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
env->prog = prog = new_prog;
|
||||||
|
insn = new_prog->insnsi + i + delta;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
patch_call_imm:
|
patch_call_imm:
|
||||||
fn = env->ops->get_func_proto(insn->imm, env->prog);
|
fn = env->ops->get_func_proto(insn->imm, env->prog);
|
||||||
/* all functions that have prototype and verifier allowed
|
/* all functions that have prototype and verifier allowed
|
||||||
|
|
|
@ -948,6 +948,33 @@ const struct bpf_func_proto bpf_snprintf_btf_proto = {
|
||||||
.arg5_type = ARG_ANYTHING,
|
.arg5_type = ARG_ANYTHING,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx)
|
||||||
|
{
|
||||||
|
/* This helper call is inlined by verifier. */
|
||||||
|
return ((u64 *)ctx)[-1];
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = {
|
||||||
|
.func = bpf_get_func_ip_tracing,
|
||||||
|
.gpl_only = true,
|
||||||
|
.ret_type = RET_INTEGER,
|
||||||
|
.arg1_type = ARG_PTR_TO_CTX,
|
||||||
|
};
|
||||||
|
|
||||||
|
BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs)
|
||||||
|
{
|
||||||
|
struct kprobe *kp = kprobe_running();
|
||||||
|
|
||||||
|
return kp ? (u64) kp->addr : 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = {
|
||||||
|
.func = bpf_get_func_ip_kprobe,
|
||||||
|
.gpl_only = true,
|
||||||
|
.ret_type = RET_INTEGER,
|
||||||
|
.arg1_type = ARG_PTR_TO_CTX,
|
||||||
|
};
|
||||||
|
|
||||||
const struct bpf_func_proto *
|
const struct bpf_func_proto *
|
||||||
bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||||
{
|
{
|
||||||
|
@ -1058,6 +1085,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||||
return &bpf_for_each_map_elem_proto;
|
return &bpf_for_each_map_elem_proto;
|
||||||
case BPF_FUNC_snprintf:
|
case BPF_FUNC_snprintf:
|
||||||
return &bpf_snprintf_proto;
|
return &bpf_snprintf_proto;
|
||||||
|
case BPF_FUNC_get_func_ip:
|
||||||
|
return &bpf_get_func_ip_proto_tracing;
|
||||||
default:
|
default:
|
||||||
return bpf_base_func_proto(func_id);
|
return bpf_base_func_proto(func_id);
|
||||||
}
|
}
|
||||||
|
@ -1077,6 +1106,8 @@ kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||||
case BPF_FUNC_override_return:
|
case BPF_FUNC_override_return:
|
||||||
return &bpf_override_return_proto;
|
return &bpf_override_return_proto;
|
||||||
#endif
|
#endif
|
||||||
|
case BPF_FUNC_get_func_ip:
|
||||||
|
return &bpf_get_func_ip_proto_kprobe;
|
||||||
default:
|
default:
|
||||||
return bpf_tracing_func_proto(func_id, prog);
|
return bpf_tracing_func_proto(func_id, prog);
|
||||||
}
|
}
|
||||||
|
|
|
@ -4841,6 +4841,12 @@ union bpf_attr {
|
||||||
* **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier.
|
* **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier.
|
||||||
* **-EDEADLK** if callback_fn tried to call bpf_timer_cancel() on its
|
* **-EDEADLK** if callback_fn tried to call bpf_timer_cancel() on its
|
||||||
* own timer which would have led to a deadlock otherwise.
|
* own timer which would have led to a deadlock otherwise.
|
||||||
|
*
|
||||||
|
* u64 bpf_get_func_ip(void *ctx)
|
||||||
|
* Description
|
||||||
|
* Get address of the traced function (for tracing and kprobe programs).
|
||||||
|
* Return
|
||||||
|
* Address of the traced function.
|
||||||
*/
|
*/
|
||||||
#define __BPF_FUNC_MAPPER(FN) \
|
#define __BPF_FUNC_MAPPER(FN) \
|
||||||
FN(unspec), \
|
FN(unspec), \
|
||||||
|
@ -5016,6 +5022,7 @@ union bpf_attr {
|
||||||
FN(timer_set_callback), \
|
FN(timer_set_callback), \
|
||||||
FN(timer_start), \
|
FN(timer_start), \
|
||||||
FN(timer_cancel), \
|
FN(timer_cancel), \
|
||||||
|
FN(get_func_ip), \
|
||||||
/* */
|
/* */
|
||||||
|
|
||||||
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
|
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
|
||||||
|
|
|
@ -10346,19 +10346,25 @@ static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
|
||||||
return pfd;
|
return pfd;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct bpf_link *bpf_program__attach_kprobe(struct bpf_program *prog,
|
struct bpf_program_attach_kprobe_opts {
|
||||||
bool retprobe,
|
bool retprobe;
|
||||||
const char *func_name)
|
unsigned long offset;
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct bpf_link*
|
||||||
|
bpf_program__attach_kprobe_opts(struct bpf_program *prog,
|
||||||
|
const char *func_name,
|
||||||
|
struct bpf_program_attach_kprobe_opts *opts)
|
||||||
{
|
{
|
||||||
char errmsg[STRERR_BUFSIZE];
|
char errmsg[STRERR_BUFSIZE];
|
||||||
struct bpf_link *link;
|
struct bpf_link *link;
|
||||||
int pfd, err;
|
int pfd, err;
|
||||||
|
|
||||||
pfd = perf_event_open_probe(false /* uprobe */, retprobe, func_name,
|
pfd = perf_event_open_probe(false /* uprobe */, opts->retprobe, func_name,
|
||||||
0 /* offset */, -1 /* pid */);
|
opts->offset, -1 /* pid */);
|
||||||
if (pfd < 0) {
|
if (pfd < 0) {
|
||||||
pr_warn("prog '%s': failed to create %s '%s' perf event: %s\n",
|
pr_warn("prog '%s': failed to create %s '%s' perf event: %s\n",
|
||||||
prog->name, retprobe ? "kretprobe" : "kprobe", func_name,
|
prog->name, opts->retprobe ? "kretprobe" : "kprobe", func_name,
|
||||||
libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
|
libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
|
||||||
return libbpf_err_ptr(pfd);
|
return libbpf_err_ptr(pfd);
|
||||||
}
|
}
|
||||||
|
@ -10367,23 +10373,53 @@ struct bpf_link *bpf_program__attach_kprobe(struct bpf_program *prog,
|
||||||
if (err) {
|
if (err) {
|
||||||
close(pfd);
|
close(pfd);
|
||||||
pr_warn("prog '%s': failed to attach to %s '%s': %s\n",
|
pr_warn("prog '%s': failed to attach to %s '%s': %s\n",
|
||||||
prog->name, retprobe ? "kretprobe" : "kprobe", func_name,
|
prog->name, opts->retprobe ? "kretprobe" : "kprobe", func_name,
|
||||||
libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
|
libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
|
||||||
return libbpf_err_ptr(err);
|
return libbpf_err_ptr(err);
|
||||||
}
|
}
|
||||||
return link;
|
return link;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct bpf_link *bpf_program__attach_kprobe(struct bpf_program *prog,
|
||||||
|
bool retprobe,
|
||||||
|
const char *func_name)
|
||||||
|
{
|
||||||
|
struct bpf_program_attach_kprobe_opts opts = {
|
||||||
|
.retprobe = retprobe,
|
||||||
|
};
|
||||||
|
|
||||||
|
return bpf_program__attach_kprobe_opts(prog, func_name, &opts);
|
||||||
|
}
|
||||||
|
|
||||||
static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec,
|
static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec,
|
||||||
struct bpf_program *prog)
|
struct bpf_program *prog)
|
||||||
{
|
{
|
||||||
|
struct bpf_program_attach_kprobe_opts opts;
|
||||||
|
unsigned long offset = 0;
|
||||||
|
struct bpf_link *link;
|
||||||
const char *func_name;
|
const char *func_name;
|
||||||
bool retprobe;
|
char *func;
|
||||||
|
int n, err;
|
||||||
|
|
||||||
func_name = prog->sec_name + sec->len;
|
func_name = prog->sec_name + sec->len;
|
||||||
retprobe = strcmp(sec->sec, "kretprobe/") == 0;
|
opts.retprobe = strcmp(sec->sec, "kretprobe/") == 0;
|
||||||
|
|
||||||
return bpf_program__attach_kprobe(prog, retprobe, func_name);
|
n = sscanf(func_name, "%m[a-zA-Z0-9_.]+%lx", &func, &offset);
|
||||||
|
if (n < 1) {
|
||||||
|
err = -EINVAL;
|
||||||
|
pr_warn("kprobe name is invalid: %s\n", func_name);
|
||||||
|
return libbpf_err_ptr(err);
|
||||||
|
}
|
||||||
|
if (opts.retprobe && offset != 0) {
|
||||||
|
err = -EINVAL;
|
||||||
|
pr_warn("kretprobes do not support offset specification\n");
|
||||||
|
return libbpf_err_ptr(err);
|
||||||
|
}
|
||||||
|
|
||||||
|
opts.offset = offset;
|
||||||
|
link = bpf_program__attach_kprobe_opts(prog, func, &opts);
|
||||||
|
free(func);
|
||||||
|
return link;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct bpf_link *bpf_program__attach_uprobe(struct bpf_program *prog,
|
struct bpf_link *bpf_program__attach_uprobe(struct bpf_program *prog,
|
||||||
|
|
|
@ -0,0 +1,53 @@
|
||||||
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
|
#include <test_progs.h>
|
||||||
|
#include "get_func_ip_test.skel.h"
|
||||||
|
|
||||||
|
void test_get_func_ip_test(void)
|
||||||
|
{
|
||||||
|
struct get_func_ip_test *skel = NULL;
|
||||||
|
__u32 duration = 0, retval;
|
||||||
|
int err, prog_fd;
|
||||||
|
|
||||||
|
skel = get_func_ip_test__open();
|
||||||
|
if (!ASSERT_OK_PTR(skel, "get_func_ip_test__open"))
|
||||||
|
return;
|
||||||
|
|
||||||
|
/* test6 is x86_64 specifc because of the instruction
|
||||||
|
* offset, disabling it for all other archs
|
||||||
|
*/
|
||||||
|
#ifndef __x86_64__
|
||||||
|
bpf_program__set_autoload(skel->progs.test6, false);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
err = get_func_ip_test__load(skel);
|
||||||
|
if (!ASSERT_OK(err, "get_func_ip_test__load"))
|
||||||
|
goto cleanup;
|
||||||
|
|
||||||
|
err = get_func_ip_test__attach(skel);
|
||||||
|
if (!ASSERT_OK(err, "get_func_ip_test__attach"))
|
||||||
|
goto cleanup;
|
||||||
|
|
||||||
|
prog_fd = bpf_program__fd(skel->progs.test1);
|
||||||
|
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
|
||||||
|
NULL, NULL, &retval, &duration);
|
||||||
|
ASSERT_OK(err, "test_run");
|
||||||
|
ASSERT_EQ(retval, 0, "test_run");
|
||||||
|
|
||||||
|
prog_fd = bpf_program__fd(skel->progs.test5);
|
||||||
|
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
|
||||||
|
NULL, NULL, &retval, &duration);
|
||||||
|
|
||||||
|
ASSERT_OK(err, "test_run");
|
||||||
|
|
||||||
|
ASSERT_EQ(skel->bss->test1_result, 1, "test1_result");
|
||||||
|
ASSERT_EQ(skel->bss->test2_result, 1, "test2_result");
|
||||||
|
ASSERT_EQ(skel->bss->test3_result, 1, "test3_result");
|
||||||
|
ASSERT_EQ(skel->bss->test4_result, 1, "test4_result");
|
||||||
|
ASSERT_EQ(skel->bss->test5_result, 1, "test5_result");
|
||||||
|
#ifdef __x86_64__
|
||||||
|
ASSERT_EQ(skel->bss->test6_result, 1, "test6_result");
|
||||||
|
#endif
|
||||||
|
|
||||||
|
cleanup:
|
||||||
|
get_func_ip_test__destroy(skel);
|
||||||
|
}
|
|
@ -0,0 +1,73 @@
|
||||||
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
|
#include <linux/bpf.h>
|
||||||
|
#include <bpf/bpf_helpers.h>
|
||||||
|
#include <bpf/bpf_tracing.h>
|
||||||
|
|
||||||
|
char _license[] SEC("license") = "GPL";
|
||||||
|
|
||||||
|
extern const void bpf_fentry_test1 __ksym;
|
||||||
|
extern const void bpf_fentry_test2 __ksym;
|
||||||
|
extern const void bpf_fentry_test3 __ksym;
|
||||||
|
extern const void bpf_fentry_test4 __ksym;
|
||||||
|
extern const void bpf_modify_return_test __ksym;
|
||||||
|
extern const void bpf_fentry_test6 __ksym;
|
||||||
|
|
||||||
|
__u64 test1_result = 0;
|
||||||
|
SEC("fentry/bpf_fentry_test1")
|
||||||
|
int BPF_PROG(test1, int a)
|
||||||
|
{
|
||||||
|
__u64 addr = bpf_get_func_ip(ctx);
|
||||||
|
|
||||||
|
test1_result = (const void *) addr == &bpf_fentry_test1;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
__u64 test2_result = 0;
|
||||||
|
SEC("fexit/bpf_fentry_test2")
|
||||||
|
int BPF_PROG(test2, int a)
|
||||||
|
{
|
||||||
|
__u64 addr = bpf_get_func_ip(ctx);
|
||||||
|
|
||||||
|
test2_result = (const void *) addr == &bpf_fentry_test2;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
__u64 test3_result = 0;
|
||||||
|
SEC("kprobe/bpf_fentry_test3")
|
||||||
|
int test3(struct pt_regs *ctx)
|
||||||
|
{
|
||||||
|
__u64 addr = bpf_get_func_ip(ctx);
|
||||||
|
|
||||||
|
test3_result = (const void *) addr == &bpf_fentry_test3;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
__u64 test4_result = 0;
|
||||||
|
SEC("kretprobe/bpf_fentry_test4")
|
||||||
|
int BPF_KRETPROBE(test4)
|
||||||
|
{
|
||||||
|
__u64 addr = bpf_get_func_ip(ctx);
|
||||||
|
|
||||||
|
test4_result = (const void *) addr == &bpf_fentry_test4;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
__u64 test5_result = 0;
|
||||||
|
SEC("fmod_ret/bpf_modify_return_test")
|
||||||
|
int BPF_PROG(test5, int a, int *b, int ret)
|
||||||
|
{
|
||||||
|
__u64 addr = bpf_get_func_ip(ctx);
|
||||||
|
|
||||||
|
test5_result = (const void *) addr == &bpf_modify_return_test;
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
__u64 test6_result = 0;
|
||||||
|
SEC("kprobe/bpf_fentry_test6+0x5")
|
||||||
|
int test6(struct pt_regs *ctx)
|
||||||
|
{
|
||||||
|
__u64 addr = bpf_get_func_ip(ctx);
|
||||||
|
|
||||||
|
test6_result = (const void *) addr == &bpf_fentry_test6 + 5;
|
||||||
|
return 0;
|
||||||
|
}
|
Loading…
Reference in New Issue