bpf: Special verifier handling for bpf_rbtree_{remove, first}
Newly-added bpf_rbtree_{remove,first} kfuncs have some special properties that require handling in the verifier: * both bpf_rbtree_remove and bpf_rbtree_first return the type containing the bpf_rb_node field, with the offset set to that field's offset, instead of a struct bpf_rb_node * * mark_reg_graph_node helper added in previous patch generalizes this logic, use it * bpf_rbtree_remove's node input is a node that's been inserted in the tree - a non-owning reference. * bpf_rbtree_remove must invalidate non-owning references in order to avoid aliasing issue. Use previously-added invalidate_non_owning_refs helper to mark this function as a non-owning ref invalidation point. * Unlike other functions, which convert one of their input arg regs to non-owning reference, bpf_rbtree_first takes no arguments and just returns a non-owning reference (possibly null) * For now verifier logic for this is special-cased instead of adding new kfunc flag. This patch, along with the previous one, complete special verifier handling for all rbtree API functions added in this series. With functional verifier handling of rbtree_remove, under current non-owning reference scheme, a node type with both bpf_{list,rb}_node fields could cause the verifier to accept programs which remove such nodes from collections they haven't been added to. In order to prevent this, this patch adds a check to btf_parse_fields which rejects structs with both bpf_{list,rb}_node fields. This is a temporary measure that can be removed after "collection identity" followup. See comment added in btf_parse_fields. A linked_list BTF test exercising the new check is added in this patch as well. Signed-off-by: Dave Marchevsky <davemarchevsky@fb.com> Link: https://lore.kernel.org/r/20230214004017.2534011-6-davemarchevsky@fb.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
5d92ddc3de
commit
a40d363243
|
@ -3768,6 +3768,30 @@ struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type
|
|||
goto end;
|
||||
}
|
||||
|
||||
/* need collection identity for non-owning refs before allowing this
|
||||
*
|
||||
* Consider a node type w/ both list and rb_node fields:
|
||||
* struct node {
|
||||
* struct bpf_list_node l;
|
||||
* struct bpf_rb_node r;
|
||||
* }
|
||||
*
|
||||
* Used like so:
|
||||
* struct node *n = bpf_obj_new(....);
|
||||
* bpf_list_push_front(&list_head, &n->l);
|
||||
* bpf_rbtree_remove(&rb_root, &n->r);
|
||||
*
|
||||
* It should not be possible to rbtree_remove the node since it hasn't
|
||||
* been added to a tree. But push_front converts n to a non-owning
|
||||
* reference, and rbtree_remove accepts the non-owning reference to
|
||||
* a type w/ bpf_rb_node field.
|
||||
*/
|
||||
if (btf_record_has_field(rec, BPF_LIST_NODE) &&
|
||||
btf_record_has_field(rec, BPF_RB_NODE)) {
|
||||
ret = -EINVAL;
|
||||
goto end;
|
||||
}
|
||||
|
||||
return rec;
|
||||
end:
|
||||
btf_record_free(rec);
|
||||
|
|
|
@ -9682,14 +9682,26 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
|
|||
return ret;
|
||||
break;
|
||||
case KF_ARG_PTR_TO_RB_NODE:
|
||||
if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
|
||||
verbose(env, "arg#%d expected pointer to allocated object\n", i);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!reg->ref_obj_id) {
|
||||
verbose(env, "allocated object must be referenced\n");
|
||||
return -EINVAL;
|
||||
if (meta->func_id == special_kfunc_list[KF_bpf_rbtree_remove]) {
|
||||
if (!type_is_non_owning_ref(reg->type) || reg->ref_obj_id) {
|
||||
verbose(env, "rbtree_remove node input must be non-owning ref\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (in_rbtree_lock_required_cb(env)) {
|
||||
verbose(env, "rbtree_remove not allowed in rbtree cb\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
|
||||
verbose(env, "arg#%d expected pointer to allocated object\n", i);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!reg->ref_obj_id) {
|
||||
verbose(env, "allocated object must be referenced\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
ret = process_kf_arg_ptr_to_rbtree_node(env, reg, regno, meta);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
@ -9940,11 +9952,12 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
|
|||
meta.func_id == special_kfunc_list[KF_bpf_list_pop_back]) {
|
||||
struct btf_field *field = meta.arg_list_head.field;
|
||||
|
||||
mark_reg_known_zero(env, regs, BPF_REG_0);
|
||||
regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC;
|
||||
regs[BPF_REG_0].btf = field->graph_root.btf;
|
||||
regs[BPF_REG_0].btf_id = field->graph_root.value_btf_id;
|
||||
regs[BPF_REG_0].off = field->graph_root.node_offset;
|
||||
mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root);
|
||||
} else if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_remove] ||
|
||||
meta.func_id == special_kfunc_list[KF_bpf_rbtree_first]) {
|
||||
struct btf_field *field = meta.arg_rbtree_root.field;
|
||||
|
||||
mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root);
|
||||
} else if (meta.func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) {
|
||||
mark_reg_known_zero(env, regs, BPF_REG_0);
|
||||
regs[BPF_REG_0].type = PTR_TO_BTF_ID | PTR_TRUSTED;
|
||||
|
@ -10010,7 +10023,13 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
|
|||
if (is_kfunc_ret_null(&meta))
|
||||
regs[BPF_REG_0].id = id;
|
||||
regs[BPF_REG_0].ref_obj_id = id;
|
||||
} else if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_first]) {
|
||||
ref_set_non_owning(env, ®s[BPF_REG_0]);
|
||||
}
|
||||
|
||||
if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_remove])
|
||||
invalidate_non_owning_refs(env);
|
||||
|
||||
if (reg_may_point_to_spin_lock(®s[BPF_REG_0]) && !regs[BPF_REG_0].id)
|
||||
regs[BPF_REG_0].id = ++env->id_gen;
|
||||
} /* else { add_kfunc_call() ensures it is btf_type_is_void(t) } */
|
||||
|
|
|
@ -715,6 +715,43 @@ static void test_btf(void)
|
|||
btf__free(btf);
|
||||
break;
|
||||
}
|
||||
|
||||
while (test__start_subtest("btf: list_node and rb_node in same struct")) {
|
||||
btf = init_btf();
|
||||
if (!ASSERT_OK_PTR(btf, "init_btf"))
|
||||
break;
|
||||
|
||||
id = btf__add_struct(btf, "bpf_rb_node", 24);
|
||||
if (!ASSERT_EQ(id, 5, "btf__add_struct bpf_rb_node"))
|
||||
break;
|
||||
id = btf__add_struct(btf, "bar", 40);
|
||||
if (!ASSERT_EQ(id, 6, "btf__add_struct bar"))
|
||||
break;
|
||||
err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
|
||||
if (!ASSERT_OK(err, "btf__add_field bar::a"))
|
||||
break;
|
||||
err = btf__add_field(btf, "c", 5, 128, 0);
|
||||
if (!ASSERT_OK(err, "btf__add_field bar::c"))
|
||||
break;
|
||||
|
||||
id = btf__add_struct(btf, "foo", 20);
|
||||
if (!ASSERT_EQ(id, 7, "btf__add_struct foo"))
|
||||
break;
|
||||
err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
|
||||
if (!ASSERT_OK(err, "btf__add_field foo::a"))
|
||||
break;
|
||||
err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0);
|
||||
if (!ASSERT_OK(err, "btf__add_field foo::b"))
|
||||
break;
|
||||
id = btf__add_decl_tag(btf, "contains:bar:a", 7, 0);
|
||||
if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:bar:a"))
|
||||
break;
|
||||
|
||||
err = btf__load_into_kernel(btf);
|
||||
ASSERT_EQ(err, -EINVAL, "check btf");
|
||||
btf__free(btf);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void test_linked_list(void)
|
||||
|
|
Loading…
Reference in New Issue