bpf: Ensure kptr_struct_meta is non-NULL for collection insert and refcount_acquire
It's straightforward to prove that kptr_struct_meta must be non-NULL for
any valid call to these kfuncs:
* btf_parse_struct_metas in btf.c creates a btf_struct_meta for any
struct in user BTF with a special field (e.g. bpf_refcount,
{rb,list}_node). These are stored in that BTF's struct_meta_tab.
* __process_kf_arg_ptr_to_graph_node in verifier.c ensures that nodes
have {rb,list}_node field and that it's at the correct offset.
Similarly, check_kfunc_args ensures bpf_refcount field existence for
node param to bpf_refcount_acquire.
* So a btf_struct_meta must have been created for the struct type of
node param to these kfuncs
* That BTF and its struct_meta_tab are guaranteed to still be around.
Any arbitrary {rb,list} node the BPF program interacts with either:
came from bpf_obj_new or a collection removal kfunc in the same
program, in which case the BTF is associated with the program and
still around; or came from bpf_kptr_xchg, in which case the BTF was
associated with the map and is still around
Instead of silently continuing with NULL struct_meta, which caused
confusing bugs such as those addressed by commit 2140a6e342
("bpf: Set
kptr_struct_meta for node param to list and rbtree insert funcs"), let's
error out. Then, at runtime, we can confidently say that the
implementations of these kfuncs were given a non-NULL kptr_struct_meta,
meaning that special-field-specific functionality like
bpf_obj_free_fields and the bpf_obj_drop change introduced later in this
series are guaranteed to execute.
This patch doesn't change functionality, just makes it easier to reason
about existing functionality.
Signed-off-by: Dave Marchevsky <davemarchevsky@fb.com>
Acked-by: Yonghong Song <yonghong.song@linux.dev>
Link: https://lore.kernel.org/r/20230821193311.3290257-2-davemarchevsky@fb.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
9e3b47abeb
commit
f0d991a070
|
@ -18276,6 +18276,13 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
|
|||
struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta;
|
||||
struct bpf_insn addr[2] = { BPF_LD_IMM64(BPF_REG_2, (long)kptr_struct_meta) };
|
||||
|
||||
if (desc->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl] &&
|
||||
!kptr_struct_meta) {
|
||||
verbose(env, "verifier internal error: kptr_struct_meta expected at insn_idx %d\n",
|
||||
insn_idx);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
insn_buf[0] = addr[0];
|
||||
insn_buf[1] = addr[1];
|
||||
insn_buf[2] = *insn;
|
||||
|
@ -18283,6 +18290,7 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
|
|||
} else if (desc->func_id == special_kfunc_list[KF_bpf_list_push_back_impl] ||
|
||||
desc->func_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
|
||||
desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
|
||||
struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta;
|
||||
int struct_meta_reg = BPF_REG_3;
|
||||
int node_offset_reg = BPF_REG_4;
|
||||
|
||||
|
@ -18292,6 +18300,12 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
|
|||
node_offset_reg = BPF_REG_5;
|
||||
}
|
||||
|
||||
if (!kptr_struct_meta) {
|
||||
verbose(env, "verifier internal error: kptr_struct_meta expected at insn_idx %d\n",
|
||||
insn_idx);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
__fixup_collection_insert_kfunc(&env->insn_aux_data[insn_idx], struct_meta_reg,
|
||||
node_offset_reg, insn, insn_buf, cnt);
|
||||
} else if (desc->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx] ||
|
||||
|
|
Loading…
Reference in New Issue