diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 00a2f06e38fd..1b8d8cdd3575 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -1070,8 +1070,8 @@ done: return err; } -#define BTF_DEDUP_TABLE_SIZE_LOG 14 -#define BTF_DEDUP_TABLE_MOD ((1 << BTF_DEDUP_TABLE_SIZE_LOG) - 1) +#define BTF_DEDUP_TABLE_DEFAULT_SIZE (1 << 14) +#define BTF_DEDUP_TABLE_MAX_SIZE_LOG 31 #define BTF_UNPROCESSED_ID ((__u32)-1) #define BTF_IN_PROGRESS_ID ((__u32)-2) @@ -1128,18 +1128,21 @@ static inline __u32 hash_combine(__u32 h, __u32 value) #undef GOLDEN_RATIO_PRIME } -#define for_each_hash_node(table, hash, node) \ - for (node = table[hash & BTF_DEDUP_TABLE_MOD]; node; node = node->next) +#define for_each_dedup_cand(d, hash, node) \ + for (node = d->dedup_table[hash & (d->opts.dedup_table_size - 1)]; \ + node; \ + node = node->next) static int btf_dedup_table_add(struct btf_dedup *d, __u32 hash, __u32 type_id) { struct btf_dedup_node *node = malloc(sizeof(struct btf_dedup_node)); + int bucket = hash & (d->opts.dedup_table_size - 1); if (!node) return -ENOMEM; node->type_id = type_id; - node->next = d->dedup_table[hash & BTF_DEDUP_TABLE_MOD]; - d->dedup_table[hash & BTF_DEDUP_TABLE_MOD] = node; + node->next = d->dedup_table[bucket]; + d->dedup_table[bucket] = node; return 0; } @@ -1177,7 +1180,7 @@ static void btf_dedup_table_free(struct btf_dedup *d) if (!d->dedup_table) return; - for (i = 0; i < (1 << BTF_DEDUP_TABLE_SIZE_LOG); i++) { + for (i = 0; i < d->opts.dedup_table_size; i++) { while (d->dedup_table[i]) { tmp = d->dedup_table[i]; d->dedup_table[i] = tmp->next; @@ -1212,19 +1215,37 @@ static void btf_dedup_free(struct btf_dedup *d) free(d); } +/* Find closest power of two >= to size, capped at 2^max_size_log */ +static __u32 roundup_pow2_max(__u32 size, int max_size_log) +{ + int i; + + for (i = 0; i < max_size_log && (1U << i) < size; i++) + ; + return 1U << i; +} + + static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext, const struct btf_dedup_opts *opts) { struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup)); int i, err = 0; + __u32 sz; if (!d) return ERR_PTR(-ENOMEM); + d->opts.dont_resolve_fwds = opts && opts->dont_resolve_fwds; + sz = opts && opts->dedup_table_size ? opts->dedup_table_size + : BTF_DEDUP_TABLE_DEFAULT_SIZE; + sz = roundup_pow2_max(sz, BTF_DEDUP_TABLE_MAX_SIZE_LOG); + d->opts.dedup_table_size = sz; + d->btf = btf; d->btf_ext = btf_ext; - d->dedup_table = calloc(1 << BTF_DEDUP_TABLE_SIZE_LOG, + d->dedup_table = calloc(d->opts.dedup_table_size, sizeof(struct btf_dedup_node *)); if (!d->dedup_table) { err = -ENOMEM; @@ -1249,8 +1270,6 @@ static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext, for (i = 0; i <= btf->nr_types; i++) d->hypot_map[i] = BTF_UNPROCESSED_ID; - d->opts.dont_resolve_fwds = opts && opts->dont_resolve_fwds; - done: if (err) { btf_dedup_free(d); @@ -1644,7 +1663,7 @@ static __u32 btf_hash_struct(struct btf_type *t) * IDs. This check is performed during type graph equivalence check and * referenced types equivalence is checked separately. */ -static bool btf_equal_struct(struct btf_type *t1, struct btf_type *t2) +static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2) { struct btf_member *m1, *m2; __u16 vlen; @@ -1824,7 +1843,7 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id) case BTF_KIND_INT: h = btf_hash_int(t); - for_each_hash_node(d->dedup_table, h, cand_node) { + for_each_dedup_cand(d, h, cand_node) { cand = d->btf->types[cand_node->type_id]; if (btf_equal_int(t, cand)) { new_id = cand_node->type_id; @@ -1835,7 +1854,7 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id) case BTF_KIND_ENUM: h = btf_hash_enum(t); - for_each_hash_node(d->dedup_table, h, cand_node) { + for_each_dedup_cand(d, h, cand_node) { cand = d->btf->types[cand_node->type_id]; if (btf_equal_enum(t, cand)) { new_id = cand_node->type_id; @@ -1846,7 +1865,7 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id) case BTF_KIND_FWD: h = btf_hash_common(t); - for_each_hash_node(d->dedup_table, h, cand_node) { + for_each_dedup_cand(d, h, cand_node) { cand = d->btf->types[cand_node->type_id]; if (btf_equal_common(t, cand)) { new_id = cand_node->type_id; @@ -2105,7 +2124,7 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id, struct btf_member *cand_m, *canon_m; __u16 vlen; - if (!btf_equal_struct(cand_type, canon_type)) + if (!btf_shallow_equal_struct(cand_type, canon_type)) return 0; vlen = BTF_INFO_VLEN(cand_type->info); cand_m = (struct btf_member *)(cand_type + 1); @@ -2246,7 +2265,7 @@ static void btf_dedup_merge_hypot_map(struct btf_dedup *d) static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id) { struct btf_dedup_node *cand_node; - struct btf_type *t; + struct btf_type *cand_type, *t; /* if we don't find equivalent type, then we are canonical */ __u32 new_id = type_id; __u16 kind; @@ -2263,9 +2282,23 @@ static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id) return 0; h = btf_hash_struct(t); - for_each_hash_node(d->dedup_table, h, cand_node) { + for_each_dedup_cand(d, h, cand_node) { int eq; + /* + * Even though btf_dedup_is_equiv() checks for + * btf_shallow_equal_struct() internally when checking two + * structs (unions) for equivalence, we need to guard here + * from picking matching FWD type as a dedup candidate. + * This can happen due to hash collision. In such case just + * relying on btf_dedup_is_equiv() would lead to potentially + * creating a loop (FWD -> STRUCT and STRUCT -> FWD), because + * FWD and compatible STRUCT/UNION are considered equivalent. + */ + cand_type = d->btf->types[cand_node->type_id]; + if (!btf_shallow_equal_struct(t, cand_type)) + continue; + btf_dedup_clear_hypot_map(d); eq = btf_dedup_is_equiv(d, type_id, cand_node->type_id); if (eq < 0) @@ -2350,7 +2383,7 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id) t->type = ref_type_id; h = btf_hash_common(t); - for_each_hash_node(d->dedup_table, h, cand_node) { + for_each_dedup_cand(d, h, cand_node) { cand = d->btf->types[cand_node->type_id]; if (btf_equal_common(t, cand)) { new_id = cand_node->type_id; @@ -2373,7 +2406,7 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id) info->index_type = ref_type_id; h = btf_hash_array(t); - for_each_hash_node(d->dedup_table, h, cand_node) { + for_each_dedup_cand(d, h, cand_node) { cand = d->btf->types[cand_node->type_id]; if (btf_equal_array(t, cand)) { new_id = cand_node->type_id; @@ -2404,7 +2437,7 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id) } h = btf_hash_fnproto(t); - for_each_hash_node(d->dedup_table, h, cand_node) { + for_each_dedup_cand(d, h, cand_node) { cand = d->btf->types[cand_node->type_id]; if (btf_equal_fnproto(t, cand)) { new_id = cand_node->type_id; diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index 94bbc249b0f1..28a1e1e59861 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -76,7 +76,7 @@ LIBBPF_API int btf__get_map_kv_tids(const struct btf *btf, const char *map_name, LIBBPF_API struct btf_ext *btf_ext__new(__u8 *data, __u32 size); LIBBPF_API void btf_ext__free(struct btf_ext *btf_ext); -LIBBPF_API const void *btf_ext__get_raw_data(const struct btf_ext* btf_ext, +LIBBPF_API const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size); LIBBPF_API int btf_ext__reloc_func_info(const struct btf *btf, const struct btf_ext *btf_ext, @@ -90,6 +90,7 @@ LIBBPF_API __u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext); LIBBPF_API __u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext); struct btf_dedup_opts { + unsigned int dedup_table_size; bool dont_resolve_fwds; }; diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore index e47168d1257d..3b74d23fffab 100644 --- a/tools/testing/selftests/bpf/.gitignore +++ b/tools/testing/selftests/bpf/.gitignore @@ -14,6 +14,7 @@ feature test_libbpf_open test_sock test_sock_addr +test_sock_fields urandom_read test_btf test_sockmap diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c index 02d314383a9c..38797aa627a7 100644 --- a/tools/testing/selftests/bpf/test_btf.c +++ b/tools/testing/selftests/bpf/test_btf.c @@ -5731,6 +5731,51 @@ const struct btf_dedup_test dedup_tests[] = { .dont_resolve_fwds = false, }, }, +{ + .descr = "dedup: struct <-> fwd resolution w/ hash collision", + /* + * // CU 1: + * struct x; + * struct s { + * struct x *x; + * }; + * // CU 2: + * struct x {}; + * struct s { + * struct x *x; + * }; + */ + .input = { + .raw_types = { + /* CU 1 */ + BTF_FWD_ENC(NAME_TBD, 0 /* struct fwd */), /* [1] fwd x */ + BTF_PTR_ENC(1), /* [2] ptr -> [1] */ + BTF_STRUCT_ENC(NAME_TBD, 1, 8), /* [3] struct s */ + BTF_MEMBER_ENC(NAME_TBD, 2, 0), + /* CU 2 */ + BTF_STRUCT_ENC(NAME_TBD, 0, 0), /* [4] struct x */ + BTF_PTR_ENC(4), /* [5] ptr -> [4] */ + BTF_STRUCT_ENC(NAME_TBD, 1, 8), /* [6] struct s */ + BTF_MEMBER_ENC(NAME_TBD, 5, 0), + BTF_END_RAW, + }, + BTF_STR_SEC("\0x\0s\0x\0x\0s\0x\0"), + }, + .expect = { + .raw_types = { + BTF_PTR_ENC(3), /* [1] ptr -> [3] */ + BTF_STRUCT_ENC(NAME_TBD, 1, 8), /* [2] struct s */ + BTF_MEMBER_ENC(NAME_TBD, 1, 0), + BTF_STRUCT_ENC(NAME_NTH(2), 0, 0), /* [3] struct x */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0s\0x"), + }, + .opts = { + .dont_resolve_fwds = false, + .dedup_table_size = 1, /* force hash collisions */ + }, +}, { .descr = "dedup: all possible kinds (no duplicates)", .input = { @@ -5936,9 +5981,9 @@ static int do_test_dedup(unsigned int test_num) } test_hdr = test_btf_data; - test_strs = test_btf_data + test_hdr->str_off; + test_strs = test_btf_data + sizeof(*test_hdr) + test_hdr->str_off; expect_hdr = expect_btf_data; - expect_strs = expect_btf_data + expect_hdr->str_off; + expect_strs = expect_btf_data + sizeof(*test_hdr) + expect_hdr->str_off; if (CHECK(test_hdr->str_len != expect_hdr->str_len, "test_hdr->str_len:%u != expect_hdr->str_len:%u", test_hdr->str_len, expect_hdr->str_len)) {