bpf: Remove prog->active check for bpf_lsm and bpf_iter
[ Upstream commit271de525e1
] The commit64696c40d0
("bpf: Add __bpf_prog_{enter,exit}_struct_ops for struct_ops trampoline") removed prog->active check for struct_ops prog. The bpf_lsm and bpf_iter is also using trampoline. Like struct_ops, the bpf_lsm and bpf_iter have fixed hooks for the prog to attach. The kernel does not call the same hook in a recursive way. This patch also removes the prog->active check for bpf_lsm and bpf_iter. A later patch has a test to reproduce the recursion issue for a sleepable bpf_lsm program. This patch appends the '_recur' naming to the existing enter and exit functions that track the prog->active counter. New __bpf_prog_{enter,exit}[_sleepable] function are added to skip the prog->active tracking. The '_struct_ops' version is also removed. It also moves the decision on picking the enter and exit function to the new bpf_trampoline_{enter,exit}(). It returns the '_recur' ones for all tracing progs to use. For bpf_lsm, bpf_iter, struct_ops (no prog->active tracking after64696c40d0
), and bpf_lsm_cgroup (no prog->active tracking after69fd337a97
), it will return the functions that don't track the prog->active. Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org> Link: https://lore.kernel.org/r/20221025184524.3526117-2-martin.lau@linux.dev Signed-off-by: Alexei Starovoitov <ast@kernel.org> Stable-dep-of:7645629f7d
("bpf: Invoke __bpf_prog_exit_sleepable_recur() on recursion in kern_sys_bpf().") Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
5f09b79e99
commit
a12f15d1f8
|
@ -1655,13 +1655,8 @@ static void invoke_bpf_prog(struct jit_ctx *ctx, struct bpf_tramp_link *l,
|
|||
struct bpf_prog *p = l->link.prog;
|
||||
int cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
|
||||
|
||||
if (p->aux->sleepable) {
|
||||
enter_prog = (u64)__bpf_prog_enter_sleepable;
|
||||
exit_prog = (u64)__bpf_prog_exit_sleepable;
|
||||
} else {
|
||||
enter_prog = (u64)__bpf_prog_enter;
|
||||
exit_prog = (u64)__bpf_prog_exit;
|
||||
}
|
||||
enter_prog = (u64)bpf_trampoline_enter(p);
|
||||
exit_prog = (u64)bpf_trampoline_exit(p);
|
||||
|
||||
if (l->cookie == 0) {
|
||||
/* if cookie is zero, one instruction is enough to store it */
|
||||
|
|
|
@ -1813,10 +1813,6 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
|
|||
struct bpf_tramp_link *l, int stack_size,
|
||||
int run_ctx_off, bool save_ret)
|
||||
{
|
||||
void (*exit)(struct bpf_prog *prog, u64 start,
|
||||
struct bpf_tramp_run_ctx *run_ctx) = __bpf_prog_exit;
|
||||
u64 (*enter)(struct bpf_prog *prog,
|
||||
struct bpf_tramp_run_ctx *run_ctx) = __bpf_prog_enter;
|
||||
u8 *prog = *pprog;
|
||||
u8 *jmp_insn;
|
||||
int ctx_cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
|
||||
|
@ -1835,23 +1831,12 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
|
|||
*/
|
||||
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_1, -run_ctx_off + ctx_cookie_off);
|
||||
|
||||
if (p->aux->sleepable) {
|
||||
enter = __bpf_prog_enter_sleepable;
|
||||
exit = __bpf_prog_exit_sleepable;
|
||||
} else if (p->type == BPF_PROG_TYPE_STRUCT_OPS) {
|
||||
enter = __bpf_prog_enter_struct_ops;
|
||||
exit = __bpf_prog_exit_struct_ops;
|
||||
} else if (p->expected_attach_type == BPF_LSM_CGROUP) {
|
||||
enter = __bpf_prog_enter_lsm_cgroup;
|
||||
exit = __bpf_prog_exit_lsm_cgroup;
|
||||
}
|
||||
|
||||
/* arg1: mov rdi, progs[i] */
|
||||
emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
|
||||
/* arg2: lea rsi, [rbp - ctx_cookie_off] */
|
||||
EMIT4(0x48, 0x8D, 0x75, -run_ctx_off);
|
||||
|
||||
if (emit_call(&prog, enter, prog))
|
||||
if (emit_call(&prog, bpf_trampoline_enter(p), prog))
|
||||
return -EINVAL;
|
||||
/* remember prog start time returned by __bpf_prog_enter */
|
||||
emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
|
||||
|
@ -1896,7 +1881,7 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
|
|||
emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
|
||||
/* arg3: lea rdx, [rbp - run_ctx_off] */
|
||||
EMIT4(0x48, 0x8D, 0x55, -run_ctx_off);
|
||||
if (emit_call(&prog, exit, prog))
|
||||
if (emit_call(&prog, bpf_trampoline_exit(p), prog))
|
||||
return -EINVAL;
|
||||
|
||||
*pprog = prog;
|
||||
|
|
|
@ -862,22 +862,18 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *i
|
|||
const struct btf_func_model *m, u32 flags,
|
||||
struct bpf_tramp_links *tlinks,
|
||||
void *orig_call);
|
||||
/* these two functions are called from generated trampoline */
|
||||
u64 notrace __bpf_prog_enter(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx);
|
||||
void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start, struct bpf_tramp_run_ctx *run_ctx);
|
||||
u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx);
|
||||
void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start,
|
||||
struct bpf_tramp_run_ctx *run_ctx);
|
||||
u64 notrace __bpf_prog_enter_lsm_cgroup(struct bpf_prog *prog,
|
||||
struct bpf_tramp_run_ctx *run_ctx);
|
||||
void notrace __bpf_prog_exit_lsm_cgroup(struct bpf_prog *prog, u64 start,
|
||||
struct bpf_tramp_run_ctx *run_ctx);
|
||||
u64 notrace __bpf_prog_enter_struct_ops(struct bpf_prog *prog,
|
||||
struct bpf_tramp_run_ctx *run_ctx);
|
||||
void notrace __bpf_prog_exit_struct_ops(struct bpf_prog *prog, u64 start,
|
||||
struct bpf_tramp_run_ctx *run_ctx);
|
||||
u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog,
|
||||
struct bpf_tramp_run_ctx *run_ctx);
|
||||
void notrace __bpf_prog_exit_sleepable_recur(struct bpf_prog *prog, u64 start,
|
||||
struct bpf_tramp_run_ctx *run_ctx);
|
||||
void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr);
|
||||
void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr);
|
||||
typedef u64 (*bpf_trampoline_enter_t)(struct bpf_prog *prog,
|
||||
struct bpf_tramp_run_ctx *run_ctx);
|
||||
typedef void (*bpf_trampoline_exit_t)(struct bpf_prog *prog, u64 start,
|
||||
struct bpf_tramp_run_ctx *run_ctx);
|
||||
bpf_trampoline_enter_t bpf_trampoline_enter(const struct bpf_prog *prog);
|
||||
bpf_trampoline_exit_t bpf_trampoline_exit(const struct bpf_prog *prog);
|
||||
|
||||
struct bpf_ksym {
|
||||
unsigned long start;
|
||||
|
|
|
@ -648,4 +648,17 @@ static inline enum bpf_prog_type resolve_prog_type(const struct bpf_prog *prog)
|
|||
prog->aux->dst_prog->type : prog->type;
|
||||
}
|
||||
|
||||
static inline bool bpf_prog_check_recur(const struct bpf_prog *prog)
|
||||
{
|
||||
switch (resolve_prog_type(prog)) {
|
||||
case BPF_PROG_TYPE_TRACING:
|
||||
return prog->expected_attach_type != BPF_TRACE_ITER;
|
||||
case BPF_PROG_TYPE_STRUCT_OPS:
|
||||
case BPF_PROG_TYPE_LSM:
|
||||
return false;
|
||||
default:
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* _LINUX_BPF_VERIFIER_H */
|
||||
|
|
|
@ -5136,13 +5136,14 @@ int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size)
|
|||
|
||||
run_ctx.bpf_cookie = 0;
|
||||
run_ctx.saved_run_ctx = NULL;
|
||||
if (!__bpf_prog_enter_sleepable(prog, &run_ctx)) {
|
||||
if (!__bpf_prog_enter_sleepable_recur(prog, &run_ctx)) {
|
||||
/* recursion detected */
|
||||
bpf_prog_put(prog);
|
||||
return -EBUSY;
|
||||
}
|
||||
attr->test.retval = bpf_prog_run(prog, (void *) (long) attr->test.ctx_in);
|
||||
__bpf_prog_exit_sleepable(prog, 0 /* bpf_prog_run does runtime stats */, &run_ctx);
|
||||
__bpf_prog_exit_sleepable_recur(prog, 0 /* bpf_prog_run does runtime stats */,
|
||||
&run_ctx);
|
||||
bpf_prog_put(prog);
|
||||
return 0;
|
||||
#endif
|
||||
|
|
|
@ -874,7 +874,7 @@ static __always_inline u64 notrace bpf_prog_start_time(void)
|
|||
* [2..MAX_U64] - execute bpf prog and record execution time.
|
||||
* This is start time.
|
||||
*/
|
||||
u64 notrace __bpf_prog_enter(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx)
|
||||
static u64 notrace __bpf_prog_enter_recur(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx)
|
||||
__acquires(RCU)
|
||||
{
|
||||
rcu_read_lock();
|
||||
|
@ -911,7 +911,8 @@ static void notrace update_prog_stats(struct bpf_prog *prog,
|
|||
}
|
||||
}
|
||||
|
||||
void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start, struct bpf_tramp_run_ctx *run_ctx)
|
||||
static void notrace __bpf_prog_exit_recur(struct bpf_prog *prog, u64 start,
|
||||
struct bpf_tramp_run_ctx *run_ctx)
|
||||
__releases(RCU)
|
||||
{
|
||||
bpf_reset_run_ctx(run_ctx->saved_run_ctx);
|
||||
|
@ -922,8 +923,8 @@ void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start, struct bpf_tramp_
|
|||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
u64 notrace __bpf_prog_enter_lsm_cgroup(struct bpf_prog *prog,
|
||||
struct bpf_tramp_run_ctx *run_ctx)
|
||||
static u64 notrace __bpf_prog_enter_lsm_cgroup(struct bpf_prog *prog,
|
||||
struct bpf_tramp_run_ctx *run_ctx)
|
||||
__acquires(RCU)
|
||||
{
|
||||
/* Runtime stats are exported via actual BPF_LSM_CGROUP
|
||||
|
@ -937,8 +938,8 @@ u64 notrace __bpf_prog_enter_lsm_cgroup(struct bpf_prog *prog,
|
|||
return NO_START_TIME;
|
||||
}
|
||||
|
||||
void notrace __bpf_prog_exit_lsm_cgroup(struct bpf_prog *prog, u64 start,
|
||||
struct bpf_tramp_run_ctx *run_ctx)
|
||||
static void notrace __bpf_prog_exit_lsm_cgroup(struct bpf_prog *prog, u64 start,
|
||||
struct bpf_tramp_run_ctx *run_ctx)
|
||||
__releases(RCU)
|
||||
{
|
||||
bpf_reset_run_ctx(run_ctx->saved_run_ctx);
|
||||
|
@ -947,7 +948,8 @@ void notrace __bpf_prog_exit_lsm_cgroup(struct bpf_prog *prog, u64 start,
|
|||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx)
|
||||
u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog,
|
||||
struct bpf_tramp_run_ctx *run_ctx)
|
||||
{
|
||||
rcu_read_lock_trace();
|
||||
migrate_disable();
|
||||
|
@ -963,8 +965,8 @@ u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog, struct bpf_tramp_r
|
|||
return bpf_prog_start_time();
|
||||
}
|
||||
|
||||
void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start,
|
||||
struct bpf_tramp_run_ctx *run_ctx)
|
||||
void notrace __bpf_prog_exit_sleepable_recur(struct bpf_prog *prog, u64 start,
|
||||
struct bpf_tramp_run_ctx *run_ctx)
|
||||
{
|
||||
bpf_reset_run_ctx(run_ctx->saved_run_ctx);
|
||||
|
||||
|
@ -974,8 +976,30 @@ void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start,
|
|||
rcu_read_unlock_trace();
|
||||
}
|
||||
|
||||
u64 notrace __bpf_prog_enter_struct_ops(struct bpf_prog *prog,
|
||||
struct bpf_tramp_run_ctx *run_ctx)
|
||||
static u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog,
|
||||
struct bpf_tramp_run_ctx *run_ctx)
|
||||
{
|
||||
rcu_read_lock_trace();
|
||||
migrate_disable();
|
||||
might_fault();
|
||||
|
||||
run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
|
||||
|
||||
return bpf_prog_start_time();
|
||||
}
|
||||
|
||||
static void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start,
|
||||
struct bpf_tramp_run_ctx *run_ctx)
|
||||
{
|
||||
bpf_reset_run_ctx(run_ctx->saved_run_ctx);
|
||||
|
||||
update_prog_stats(prog, start);
|
||||
migrate_enable();
|
||||
rcu_read_unlock_trace();
|
||||
}
|
||||
|
||||
static u64 notrace __bpf_prog_enter(struct bpf_prog *prog,
|
||||
struct bpf_tramp_run_ctx *run_ctx)
|
||||
__acquires(RCU)
|
||||
{
|
||||
rcu_read_lock();
|
||||
|
@ -986,8 +1010,8 @@ u64 notrace __bpf_prog_enter_struct_ops(struct bpf_prog *prog,
|
|||
return bpf_prog_start_time();
|
||||
}
|
||||
|
||||
void notrace __bpf_prog_exit_struct_ops(struct bpf_prog *prog, u64 start,
|
||||
struct bpf_tramp_run_ctx *run_ctx)
|
||||
static void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start,
|
||||
struct bpf_tramp_run_ctx *run_ctx)
|
||||
__releases(RCU)
|
||||
{
|
||||
bpf_reset_run_ctx(run_ctx->saved_run_ctx);
|
||||
|
@ -1007,6 +1031,36 @@ void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr)
|
|||
percpu_ref_put(&tr->pcref);
|
||||
}
|
||||
|
||||
bpf_trampoline_enter_t bpf_trampoline_enter(const struct bpf_prog *prog)
|
||||
{
|
||||
bool sleepable = prog->aux->sleepable;
|
||||
|
||||
if (bpf_prog_check_recur(prog))
|
||||
return sleepable ? __bpf_prog_enter_sleepable_recur :
|
||||
__bpf_prog_enter_recur;
|
||||
|
||||
if (resolve_prog_type(prog) == BPF_PROG_TYPE_LSM &&
|
||||
prog->expected_attach_type == BPF_LSM_CGROUP)
|
||||
return __bpf_prog_enter_lsm_cgroup;
|
||||
|
||||
return sleepable ? __bpf_prog_enter_sleepable : __bpf_prog_enter;
|
||||
}
|
||||
|
||||
bpf_trampoline_exit_t bpf_trampoline_exit(const struct bpf_prog *prog)
|
||||
{
|
||||
bool sleepable = prog->aux->sleepable;
|
||||
|
||||
if (bpf_prog_check_recur(prog))
|
||||
return sleepable ? __bpf_prog_exit_sleepable_recur :
|
||||
__bpf_prog_exit_recur;
|
||||
|
||||
if (resolve_prog_type(prog) == BPF_PROG_TYPE_LSM &&
|
||||
prog->expected_attach_type == BPF_LSM_CGROUP)
|
||||
return __bpf_prog_exit_lsm_cgroup;
|
||||
|
||||
return sleepable ? __bpf_prog_exit_sleepable : __bpf_prog_exit;
|
||||
}
|
||||
|
||||
int __weak
|
||||
arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
|
||||
const struct btf_func_model *m, u32 flags,
|
||||
|
|
Loading…
Reference in New Issue