bpf: clean up visit_insn()'s instruction processing
Instead of referencing processed instruction repeatedly as insns[t] throughout entire visit_insn() function, take a local insn pointer and work with it in a cleaner way. It makes enhancing this function further a bit easier as well. Signed-off-by: Andrii Nakryiko <andrii@kernel.org> Link: https://lore.kernel.org/r/20230302235015.2044271-7-andrii@kernel.org Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
fffc893b6b
commit
653ae3a874
|
@ -13484,44 +13484,43 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns,
|
|||
*/
|
||||
static int visit_insn(int t, struct bpf_verifier_env *env)
|
||||
{
|
||||
struct bpf_insn *insns = env->prog->insnsi;
|
||||
struct bpf_insn *insns = env->prog->insnsi, *insn = &insns[t];
|
||||
int ret;
|
||||
|
||||
if (bpf_pseudo_func(insns + t))
|
||||
if (bpf_pseudo_func(insn))
|
||||
return visit_func_call_insn(t, insns, env, true);
|
||||
|
||||
/* All non-branch instructions have a single fall-through edge. */
|
||||
if (BPF_CLASS(insns[t].code) != BPF_JMP &&
|
||||
BPF_CLASS(insns[t].code) != BPF_JMP32)
|
||||
if (BPF_CLASS(insn->code) != BPF_JMP &&
|
||||
BPF_CLASS(insn->code) != BPF_JMP32)
|
||||
return push_insn(t, t + 1, FALLTHROUGH, env, false);
|
||||
|
||||
switch (BPF_OP(insns[t].code)) {
|
||||
switch (BPF_OP(insn->code)) {
|
||||
case BPF_EXIT:
|
||||
return DONE_EXPLORING;
|
||||
|
||||
case BPF_CALL:
|
||||
if (insns[t].imm == BPF_FUNC_timer_set_callback)
|
||||
if (insn->imm == BPF_FUNC_timer_set_callback)
|
||||
/* Mark this call insn as a prune point to trigger
|
||||
* is_state_visited() check before call itself is
|
||||
* processed by __check_func_call(). Otherwise new
|
||||
* async state will be pushed for further exploration.
|
||||
*/
|
||||
mark_prune_point(env, t);
|
||||
return visit_func_call_insn(t, insns, env,
|
||||
insns[t].src_reg == BPF_PSEUDO_CALL);
|
||||
return visit_func_call_insn(t, insns, env, insn->src_reg == BPF_PSEUDO_CALL);
|
||||
|
||||
case BPF_JA:
|
||||
if (BPF_SRC(insns[t].code) != BPF_K)
|
||||
if (BPF_SRC(insn->code) != BPF_K)
|
||||
return -EINVAL;
|
||||
|
||||
/* unconditional jump with single edge */
|
||||
ret = push_insn(t, t + insns[t].off + 1, FALLTHROUGH, env,
|
||||
ret = push_insn(t, t + insn->off + 1, FALLTHROUGH, env,
|
||||
true);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mark_prune_point(env, t + insns[t].off + 1);
|
||||
mark_jmp_point(env, t + insns[t].off + 1);
|
||||
mark_prune_point(env, t + insn->off + 1);
|
||||
mark_jmp_point(env, t + insn->off + 1);
|
||||
|
||||
return ret;
|
||||
|
||||
|
@ -13533,7 +13532,7 @@ static int visit_insn(int t, struct bpf_verifier_env *env)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
return push_insn(t, t + insns[t].off + 1, BRANCH, env, true);
|
||||
return push_insn(t, t + insn->off + 1, BRANCH, env, true);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue