nfp: bpf: support optimizing dead branches
Verifier will now optimize out branches to dead code, implement the replace_insn callback to take advantage of that optimization. Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com> Reviewed-by: Quentin Monnet <quentin.monnet@netronome.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
e2fc61146a
commit
a32014b351
|
@ -412,6 +412,17 @@ static inline bool is_mbpf_div(const struct nfp_insn_meta *meta)
|
||||||
return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_DIV;
|
return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_DIV;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool is_mbpf_cond_jump(const struct nfp_insn_meta *meta)
|
||||||
|
{
|
||||||
|
u8 op;
|
||||||
|
|
||||||
|
if (BPF_CLASS(meta->insn.code) != BPF_JMP)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
op = BPF_OP(meta->insn.code);
|
||||||
|
return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL;
|
||||||
|
}
|
||||||
|
|
||||||
static inline bool is_mbpf_helper_call(const struct nfp_insn_meta *meta)
|
static inline bool is_mbpf_helper_call(const struct nfp_insn_meta *meta)
|
||||||
{
|
{
|
||||||
struct bpf_insn insn = meta->insn;
|
struct bpf_insn insn = meta->insn;
|
||||||
|
@ -520,6 +531,9 @@ int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx,
|
||||||
int prev_insn_idx);
|
int prev_insn_idx);
|
||||||
int nfp_bpf_finalize(struct bpf_verifier_env *env);
|
int nfp_bpf_finalize(struct bpf_verifier_env *env);
|
||||||
|
|
||||||
|
int nfp_bpf_opt_replace_insn(struct bpf_verifier_env *env, u32 off,
|
||||||
|
struct bpf_insn *insn);
|
||||||
|
|
||||||
extern const struct bpf_prog_offload_ops nfp_bpf_dev_ops;
|
extern const struct bpf_prog_offload_ops nfp_bpf_dev_ops;
|
||||||
|
|
||||||
struct netdev_bpf;
|
struct netdev_bpf;
|
||||||
|
|
|
@ -592,6 +592,7 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
|
||||||
const struct bpf_prog_offload_ops nfp_bpf_dev_ops = {
|
const struct bpf_prog_offload_ops nfp_bpf_dev_ops = {
|
||||||
.insn_hook = nfp_verify_insn,
|
.insn_hook = nfp_verify_insn,
|
||||||
.finalize = nfp_bpf_finalize,
|
.finalize = nfp_bpf_finalize,
|
||||||
|
.replace_insn = nfp_bpf_opt_replace_insn,
|
||||||
.prepare = nfp_bpf_verifier_prep,
|
.prepare = nfp_bpf_verifier_prep,
|
||||||
.translate = nfp_bpf_translate,
|
.translate = nfp_bpf_translate,
|
||||||
.destroy = nfp_bpf_destroy,
|
.destroy = nfp_bpf_destroy,
|
||||||
|
|
|
@ -786,3 +786,37 @@ int nfp_bpf_finalize(struct bpf_verifier_env *env)
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int nfp_bpf_opt_replace_insn(struct bpf_verifier_env *env, u32 off,
|
||||||
|
struct bpf_insn *insn)
|
||||||
|
{
|
||||||
|
struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv;
|
||||||
|
struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
|
||||||
|
struct nfp_insn_meta *meta = nfp_prog->verifier_meta;
|
||||||
|
|
||||||
|
meta = nfp_bpf_goto_meta(nfp_prog, meta, aux_data[off].orig_idx);
|
||||||
|
nfp_prog->verifier_meta = meta;
|
||||||
|
|
||||||
|
/* conditional jump to jump conversion */
|
||||||
|
if (is_mbpf_cond_jump(meta) &&
|
||||||
|
insn->code == (BPF_JMP | BPF_JA | BPF_K)) {
|
||||||
|
unsigned int tgt_off;
|
||||||
|
|
||||||
|
tgt_off = off + insn->off + 1;
|
||||||
|
|
||||||
|
if (!insn->off) {
|
||||||
|
meta->jmp_dst = list_next_entry(meta, l);
|
||||||
|
meta->jump_neg_op = false;
|
||||||
|
} else if (meta->jmp_dst->n != aux_data[tgt_off].orig_idx) {
|
||||||
|
pr_vlog(env, "branch hard wire at %d changes target %d -> %d\n",
|
||||||
|
off, meta->jmp_dst->n,
|
||||||
|
aux_data[tgt_off].orig_idx);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
pr_vlog(env, "unsupported instruction replacement %hhx -> %hhx\n",
|
||||||
|
meta->insn.code, insn->code);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
Loading…
Reference in New Issue