bpf: extend is_branch_taken to registers

This patch extends is_branch_taken() logic from JMP+K instructions
to JMP+X instructions.
Conditional branches are often done when src and dst registers
contain known scalars. In such case the verifier can follow
the branch that is going to be taken when program executes.
That speeds up the verification and is essential feature to support
bounded loops.

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
This commit is contained in:
Alexei Starovoitov 2019-06-15 12:12:19 -07:00 committed by Daniel Borkmann
parent fc559a70d5
commit fb8d251ee2
1 changed files with 19 additions and 15 deletions

View File

@ -5266,9 +5266,10 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
struct bpf_verifier_state *this_branch = env->cur_state; struct bpf_verifier_state *this_branch = env->cur_state;
struct bpf_verifier_state *other_branch; struct bpf_verifier_state *other_branch;
struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs; struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
struct bpf_reg_state *dst_reg, *other_branch_regs; struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL;
u8 opcode = BPF_OP(insn->code); u8 opcode = BPF_OP(insn->code);
bool is_jmp32; bool is_jmp32;
int pred = -1;
int err; int err;
/* Only conditional jumps are expected to reach here. */ /* Only conditional jumps are expected to reach here. */
@ -5293,6 +5294,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
insn->src_reg); insn->src_reg);
return -EACCES; return -EACCES;
} }
src_reg = &regs[insn->src_reg];
} else { } else {
if (insn->src_reg != BPF_REG_0) { if (insn->src_reg != BPF_REG_0) {
verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
@ -5308,10 +5310,13 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
dst_reg = &regs[insn->dst_reg]; dst_reg = &regs[insn->dst_reg];
is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
if (BPF_SRC(insn->code) == BPF_K) { if (BPF_SRC(insn->code) == BPF_K)
int pred = is_branch_taken(dst_reg, insn->imm, opcode, pred = is_branch_taken(dst_reg, insn->imm,
is_jmp32); opcode, is_jmp32);
else if (src_reg->type == SCALAR_VALUE &&
tnum_is_const(src_reg->var_off))
pred = is_branch_taken(dst_reg, src_reg->var_off.value,
opcode, is_jmp32);
if (pred == 1) { if (pred == 1) {
/* only follow the goto, ignore fall-through */ /* only follow the goto, ignore fall-through */
*insn_idx += insn->off; *insn_idx += insn->off;
@ -5322,7 +5327,6 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
*/ */
return 0; return 0;
} }
}
other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx, other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
false); false);