bpf: move fixup_bpf_calls() function
no functional change. move fixup_bpf_calls() to verifier.c it's being refactored in the next patch Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
4396e46187
commit
e245c5c6a5
|
@ -586,59 +586,6 @@ void bpf_register_prog_type(struct bpf_prog_type_list *tl)
|
|||
list_add(&tl->list_node, &bpf_prog_types);
|
||||
}
|
||||
|
||||
/* fixup insn->imm field of bpf_call instructions:
|
||||
* if (insn->imm == BPF_FUNC_map_lookup_elem)
|
||||
* insn->imm = bpf_map_lookup_elem - __bpf_call_base;
|
||||
* else if (insn->imm == BPF_FUNC_map_update_elem)
|
||||
* insn->imm = bpf_map_update_elem - __bpf_call_base;
|
||||
* else ...
|
||||
*
|
||||
* this function is called after eBPF program passed verification
|
||||
*/
|
||||
static void fixup_bpf_calls(struct bpf_prog *prog)
|
||||
{
|
||||
const struct bpf_func_proto *fn;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < prog->len; i++) {
|
||||
struct bpf_insn *insn = &prog->insnsi[i];
|
||||
|
||||
if (insn->code == (BPF_JMP | BPF_CALL)) {
|
||||
/* we reach here when program has bpf_call instructions
|
||||
* and it passed bpf_check(), means that
|
||||
* ops->get_func_proto must have been supplied, check it
|
||||
*/
|
||||
BUG_ON(!prog->aux->ops->get_func_proto);
|
||||
|
||||
if (insn->imm == BPF_FUNC_get_route_realm)
|
||||
prog->dst_needed = 1;
|
||||
if (insn->imm == BPF_FUNC_get_prandom_u32)
|
||||
bpf_user_rnd_init_once();
|
||||
if (insn->imm == BPF_FUNC_xdp_adjust_head)
|
||||
prog->xdp_adjust_head = 1;
|
||||
if (insn->imm == BPF_FUNC_tail_call) {
|
||||
/* mark bpf_tail_call as different opcode
|
||||
* to avoid conditional branch in
|
||||
* interpeter for every normal call
|
||||
* and to prevent accidental JITing by
|
||||
* JIT compiler that doesn't support
|
||||
* bpf_tail_call yet
|
||||
*/
|
||||
insn->imm = 0;
|
||||
insn->code |= BPF_X;
|
||||
continue;
|
||||
}
|
||||
|
||||
fn = prog->aux->ops->get_func_proto(insn->imm);
|
||||
/* all functions that have prototype and verifier allowed
|
||||
* programs to call them, must be real in-kernel functions
|
||||
*/
|
||||
BUG_ON(!fn->func);
|
||||
insn->imm = fn->func - __bpf_call_base;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* drop refcnt on maps used by eBPF program and free auxilary data */
|
||||
static void free_used_maps(struct bpf_prog_aux *aux)
|
||||
{
|
||||
|
@ -892,9 +839,6 @@ static int bpf_prog_load(union bpf_attr *attr)
|
|||
if (err < 0)
|
||||
goto free_used_maps;
|
||||
|
||||
/* fixup BPF_CALL->imm field */
|
||||
fixup_bpf_calls(prog);
|
||||
|
||||
/* eBPF program is ready to be JITed */
|
||||
prog = bpf_prog_select_runtime(prog, &err);
|
||||
if (err < 0)
|
||||
|
|
|
@ -3233,6 +3233,60 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* fixup insn->imm field of bpf_call instructions:
|
||||
* if (insn->imm == BPF_FUNC_map_lookup_elem)
|
||||
* insn->imm = bpf_map_lookup_elem - __bpf_call_base;
|
||||
* else if (insn->imm == BPF_FUNC_map_update_elem)
|
||||
* insn->imm = bpf_map_update_elem - __bpf_call_base;
|
||||
* else ...
|
||||
*
|
||||
* this function is called after eBPF program passed verification
|
||||
*/
|
||||
static void fixup_bpf_calls(struct bpf_prog *prog)
|
||||
{
|
||||
const struct bpf_func_proto *fn;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < prog->len; i++) {
|
||||
struct bpf_insn *insn = &prog->insnsi[i];
|
||||
|
||||
if (insn->code == (BPF_JMP | BPF_CALL)) {
|
||||
/* we reach here when program has bpf_call instructions
|
||||
* and it passed bpf_check(), means that
|
||||
* ops->get_func_proto must have been supplied, check it
|
||||
*/
|
||||
BUG_ON(!prog->aux->ops->get_func_proto);
|
||||
|
||||
if (insn->imm == BPF_FUNC_get_route_realm)
|
||||
prog->dst_needed = 1;
|
||||
if (insn->imm == BPF_FUNC_get_prandom_u32)
|
||||
bpf_user_rnd_init_once();
|
||||
if (insn->imm == BPF_FUNC_xdp_adjust_head)
|
||||
prog->xdp_adjust_head = 1;
|
||||
if (insn->imm == BPF_FUNC_tail_call) {
|
||||
/* mark bpf_tail_call as different opcode
|
||||
* to avoid conditional branch in
|
||||
* interpeter for every normal call
|
||||
* and to prevent accidental JITing by
|
||||
* JIT compiler that doesn't support
|
||||
* bpf_tail_call yet
|
||||
*/
|
||||
insn->imm = 0;
|
||||
insn->code |= BPF_X;
|
||||
continue;
|
||||
}
|
||||
|
||||
fn = prog->aux->ops->get_func_proto(insn->imm);
|
||||
/* all functions that have prototype and verifier allowed
|
||||
* programs to call them, must be real in-kernel functions
|
||||
*/
|
||||
BUG_ON(!fn->func);
|
||||
insn->imm = fn->func - __bpf_call_base;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void free_states(struct bpf_verifier_env *env)
|
||||
{
|
||||
struct bpf_verifier_state_list *sl, *sln;
|
||||
|
@ -3328,6 +3382,9 @@ skip_full_check:
|
|||
/* program is valid, convert *(u32*)(ctx + off) accesses */
|
||||
ret = convert_ctx_accesses(env);
|
||||
|
||||
if (ret == 0)
|
||||
fixup_bpf_calls(env->prog);
|
||||
|
||||
if (log_level && log_len >= log_size - 1) {
|
||||
BUG_ON(log_len >= log_size);
|
||||
/* verifier log exceeded user supplied buffer */
|
||||
|
|
Loading…
Reference in New Issue