Merge branch 'libbpf 1.0: deprecate bpf_map__def() API'
Christy Lee says: ==================== bpf_map__def() is rarely used and non-extensible. bpf_map_def fields can be accessed with appropriate map getters and setters instead. Deprecate bpf_map__def() API and replace use cases with getters and setters. Changelog: ---------- v1 -> v2: https://lore.kernel.org/all/20220105230057.853163-1-christylee@fb.com/ * Fixed commit messages to match commit titles * Fixed indentation * Removed bpf_map__def() usage that was missed in v1 ==================== Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
This commit is contained in:
commit
86c7ecad3b
|
@ -209,7 +209,7 @@ static struct datarec *alloc_record_per_cpu(void)
|
|||
|
||||
static struct record *alloc_record_per_rxq(void)
|
||||
{
|
||||
unsigned int nr_rxqs = bpf_map__def(rx_queue_index_map)->max_entries;
|
||||
unsigned int nr_rxqs = bpf_map__max_entries(rx_queue_index_map);
|
||||
struct record *array;
|
||||
|
||||
array = calloc(nr_rxqs, sizeof(struct record));
|
||||
|
@ -222,7 +222,7 @@ static struct record *alloc_record_per_rxq(void)
|
|||
|
||||
static struct stats_record *alloc_stats_record(void)
|
||||
{
|
||||
unsigned int nr_rxqs = bpf_map__def(rx_queue_index_map)->max_entries;
|
||||
unsigned int nr_rxqs = bpf_map__max_entries(rx_queue_index_map);
|
||||
struct stats_record *rec;
|
||||
int i;
|
||||
|
||||
|
@ -241,7 +241,7 @@ static struct stats_record *alloc_stats_record(void)
|
|||
|
||||
static void free_stats_record(struct stats_record *r)
|
||||
{
|
||||
unsigned int nr_rxqs = bpf_map__def(rx_queue_index_map)->max_entries;
|
||||
unsigned int nr_rxqs = bpf_map__max_entries(rx_queue_index_map);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nr_rxqs; i++)
|
||||
|
@ -289,7 +289,7 @@ static void stats_collect(struct stats_record *rec)
|
|||
map_collect_percpu(fd, 0, &rec->stats);
|
||||
|
||||
fd = bpf_map__fd(rx_queue_index_map);
|
||||
max_rxqs = bpf_map__def(rx_queue_index_map)->max_entries;
|
||||
max_rxqs = bpf_map__max_entries(rx_queue_index_map);
|
||||
for (i = 0; i < max_rxqs; i++)
|
||||
map_collect_percpu(fd, i, &rec->rxq[i]);
|
||||
}
|
||||
|
@ -335,7 +335,7 @@ static void stats_print(struct stats_record *stats_rec,
|
|||
struct stats_record *stats_prev,
|
||||
int action, __u32 cfg_opt)
|
||||
{
|
||||
unsigned int nr_rxqs = bpf_map__def(rx_queue_index_map)->max_entries;
|
||||
unsigned int nr_rxqs = bpf_map__max_entries(rx_queue_index_map);
|
||||
unsigned int nr_cpus = bpf_num_possible_cpus();
|
||||
double pps = 0, err = 0;
|
||||
struct record *rec, *prev;
|
||||
|
|
|
@ -227,7 +227,7 @@ static int codegen_datasecs(struct bpf_object *obj, const char *obj_name)
|
|||
/* only generate definitions for memory-mapped internal maps */
|
||||
if (!bpf_map__is_internal(map))
|
||||
continue;
|
||||
if (!(bpf_map__def(map)->map_flags & BPF_F_MMAPABLE))
|
||||
if (!(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
|
||||
continue;
|
||||
|
||||
if (!get_map_ident(map, map_ident, sizeof(map_ident)))
|
||||
|
@ -468,7 +468,7 @@ static void codegen_destroy(struct bpf_object *obj, const char *obj_name)
|
|||
if (!get_map_ident(map, ident, sizeof(ident)))
|
||||
continue;
|
||||
if (bpf_map__is_internal(map) &&
|
||||
(bpf_map__def(map)->map_flags & BPF_F_MMAPABLE))
|
||||
(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
|
||||
printf("\tmunmap(skel->%1$s, %2$zd);\n",
|
||||
ident, bpf_map_mmap_sz(map));
|
||||
codegen("\
|
||||
|
@ -536,7 +536,7 @@ static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *h
|
|||
continue;
|
||||
|
||||
if (!bpf_map__is_internal(map) ||
|
||||
!(bpf_map__def(map)->map_flags & BPF_F_MMAPABLE))
|
||||
!(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
|
||||
continue;
|
||||
|
||||
codegen("\
|
||||
|
@ -600,10 +600,10 @@ static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *h
|
|||
continue;
|
||||
|
||||
if (!bpf_map__is_internal(map) ||
|
||||
!(bpf_map__def(map)->map_flags & BPF_F_MMAPABLE))
|
||||
!(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
|
||||
continue;
|
||||
|
||||
if (bpf_map__def(map)->map_flags & BPF_F_RDONLY_PROG)
|
||||
if (bpf_map__map_flags(map) & BPF_F_RDONLY_PROG)
|
||||
mmap_flags = "PROT_READ";
|
||||
else
|
||||
mmap_flags = "PROT_READ | PROT_WRITE";
|
||||
|
@ -961,7 +961,7 @@ static int do_skeleton(int argc, char **argv)
|
|||
i, bpf_map__name(map), i, ident);
|
||||
/* memory-mapped internal maps */
|
||||
if (bpf_map__is_internal(map) &&
|
||||
(bpf_map__def(map)->map_flags & BPF_F_MMAPABLE)) {
|
||||
(bpf_map__map_flags(map) & BPF_F_MMAPABLE)) {
|
||||
printf("\ts->maps[%zu].mmaped = (void **)&obj->%s;\n",
|
||||
i, ident);
|
||||
}
|
||||
|
|
|
@ -480,7 +480,6 @@ static int do_unregister(int argc, char **argv)
|
|||
static int do_register(int argc, char **argv)
|
||||
{
|
||||
LIBBPF_OPTS(bpf_object_open_opts, open_opts);
|
||||
const struct bpf_map_def *def;
|
||||
struct bpf_map_info info = {};
|
||||
__u32 info_len = sizeof(info);
|
||||
int nr_errs = 0, nr_maps = 0;
|
||||
|
@ -510,8 +509,7 @@ static int do_register(int argc, char **argv)
|
|||
}
|
||||
|
||||
bpf_object__for_each_map(map, obj) {
|
||||
def = bpf_map__def(map);
|
||||
if (def->type != BPF_MAP_TYPE_STRUCT_OPS)
|
||||
if (bpf_map__type(map) != BPF_MAP_TYPE_STRUCT_OPS)
|
||||
continue;
|
||||
|
||||
link = bpf_map__attach_struct_ops(map);
|
||||
|
|
|
@ -706,7 +706,8 @@ bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *map);
|
|||
LIBBPF_API int bpf_map__fd(const struct bpf_map *map);
|
||||
LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd);
|
||||
/* get map definition */
|
||||
LIBBPF_API const struct bpf_map_def *bpf_map__def(const struct bpf_map *map);
|
||||
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 8, "use appropriate getters or setters instead")
|
||||
const struct bpf_map_def *bpf_map__def(const struct bpf_map *map);
|
||||
/* get map name */
|
||||
LIBBPF_API const char *bpf_map__name(const struct bpf_map *map);
|
||||
/* get/set map type */
|
||||
|
|
|
@ -1002,24 +1002,22 @@ __bpf_map__config_value(struct bpf_map *map,
|
|||
{
|
||||
struct bpf_map_op *op;
|
||||
const char *map_name = bpf_map__name(map);
|
||||
const struct bpf_map_def *def = bpf_map__def(map);
|
||||
|
||||
if (IS_ERR(def)) {
|
||||
pr_debug("Unable to get map definition from '%s'\n",
|
||||
map_name);
|
||||
if (!map) {
|
||||
pr_debug("Map '%s' is invalid\n", map_name);
|
||||
return -BPF_LOADER_ERRNO__INTERNAL;
|
||||
}
|
||||
|
||||
if (def->type != BPF_MAP_TYPE_ARRAY) {
|
||||
if (bpf_map__type(map) != BPF_MAP_TYPE_ARRAY) {
|
||||
pr_debug("Map %s type is not BPF_MAP_TYPE_ARRAY\n",
|
||||
map_name);
|
||||
return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
|
||||
}
|
||||
if (def->key_size < sizeof(unsigned int)) {
|
||||
if (bpf_map__key_size(map) < sizeof(unsigned int)) {
|
||||
pr_debug("Map %s has incorrect key size\n", map_name);
|
||||
return -BPF_LOADER_ERRNO__OBJCONF_MAP_KEYSIZE;
|
||||
}
|
||||
switch (def->value_size) {
|
||||
switch (bpf_map__value_size(map)) {
|
||||
case 1:
|
||||
case 2:
|
||||
case 4:
|
||||
|
@ -1061,7 +1059,6 @@ __bpf_map__config_event(struct bpf_map *map,
|
|||
struct parse_events_term *term,
|
||||
struct evlist *evlist)
|
||||
{
|
||||
const struct bpf_map_def *def;
|
||||
struct bpf_map_op *op;
|
||||
const char *map_name = bpf_map__name(map);
|
||||
struct evsel *evsel = evlist__find_evsel_by_str(evlist, term->val.str);
|
||||
|
@ -1072,18 +1069,16 @@ __bpf_map__config_event(struct bpf_map *map,
|
|||
return -BPF_LOADER_ERRNO__OBJCONF_MAP_NOEVT;
|
||||
}
|
||||
|
||||
def = bpf_map__def(map);
|
||||
if (IS_ERR(def)) {
|
||||
pr_debug("Unable to get map definition from '%s'\n",
|
||||
map_name);
|
||||
return PTR_ERR(def);
|
||||
if (!map) {
|
||||
pr_debug("Map '%s' is invalid\n", map_name);
|
||||
return PTR_ERR(map);
|
||||
}
|
||||
|
||||
/*
|
||||
* No need to check key_size and value_size:
|
||||
* kernel has already checked them.
|
||||
*/
|
||||
if (def->type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
|
||||
if (bpf_map__type(map) != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
|
||||
pr_debug("Map %s type is not BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
|
||||
map_name);
|
||||
return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
|
||||
|
@ -1132,7 +1127,6 @@ config_map_indices_range_check(struct parse_events_term *term,
|
|||
const char *map_name)
|
||||
{
|
||||
struct parse_events_array *array = &term->array;
|
||||
const struct bpf_map_def *def;
|
||||
unsigned int i;
|
||||
|
||||
if (!array->nr_ranges)
|
||||
|
@ -1143,10 +1137,8 @@ config_map_indices_range_check(struct parse_events_term *term,
|
|||
return -BPF_LOADER_ERRNO__INTERNAL;
|
||||
}
|
||||
|
||||
def = bpf_map__def(map);
|
||||
if (IS_ERR(def)) {
|
||||
pr_debug("ERROR: Unable to get map definition from '%s'\n",
|
||||
map_name);
|
||||
if (!map) {
|
||||
pr_debug("Map '%s' is invalid\n", map_name);
|
||||
return -BPF_LOADER_ERRNO__INTERNAL;
|
||||
}
|
||||
|
||||
|
@ -1155,7 +1147,7 @@ config_map_indices_range_check(struct parse_events_term *term,
|
|||
size_t length = array->ranges[i].length;
|
||||
unsigned int idx = start + length - 1;
|
||||
|
||||
if (idx >= def->max_entries) {
|
||||
if (idx >= bpf_map__max_entries(map)) {
|
||||
pr_debug("ERROR: index %d too large\n", idx);
|
||||
return -BPF_LOADER_ERRNO__OBJCONF_MAP_IDX2BIG;
|
||||
}
|
||||
|
@ -1248,21 +1240,21 @@ out:
|
|||
}
|
||||
|
||||
typedef int (*map_config_func_t)(const char *name, int map_fd,
|
||||
const struct bpf_map_def *pdef,
|
||||
const struct bpf_map *map,
|
||||
struct bpf_map_op *op,
|
||||
void *pkey, void *arg);
|
||||
|
||||
static int
|
||||
foreach_key_array_all(map_config_func_t func,
|
||||
void *arg, const char *name,
|
||||
int map_fd, const struct bpf_map_def *pdef,
|
||||
int map_fd, const struct bpf_map *map,
|
||||
struct bpf_map_op *op)
|
||||
{
|
||||
unsigned int i;
|
||||
int err;
|
||||
|
||||
for (i = 0; i < pdef->max_entries; i++) {
|
||||
err = func(name, map_fd, pdef, op, &i, arg);
|
||||
for (i = 0; i < bpf_map__max_entries(map); i++) {
|
||||
err = func(name, map_fd, map, op, &i, arg);
|
||||
if (err) {
|
||||
pr_debug("ERROR: failed to insert value to %s[%u]\n",
|
||||
name, i);
|
||||
|
@ -1275,7 +1267,7 @@ foreach_key_array_all(map_config_func_t func,
|
|||
static int
|
||||
foreach_key_array_ranges(map_config_func_t func, void *arg,
|
||||
const char *name, int map_fd,
|
||||
const struct bpf_map_def *pdef,
|
||||
const struct bpf_map *map,
|
||||
struct bpf_map_op *op)
|
||||
{
|
||||
unsigned int i, j;
|
||||
|
@ -1288,7 +1280,7 @@ foreach_key_array_ranges(map_config_func_t func, void *arg,
|
|||
for (j = 0; j < length; j++) {
|
||||
unsigned int idx = start + j;
|
||||
|
||||
err = func(name, map_fd, pdef, op, &idx, arg);
|
||||
err = func(name, map_fd, map, op, &idx, arg);
|
||||
if (err) {
|
||||
pr_debug("ERROR: failed to insert value to %s[%u]\n",
|
||||
name, idx);
|
||||
|
@ -1304,9 +1296,8 @@ bpf_map_config_foreach_key(struct bpf_map *map,
|
|||
map_config_func_t func,
|
||||
void *arg)
|
||||
{
|
||||
int err, map_fd;
|
||||
int err, map_fd, type;
|
||||
struct bpf_map_op *op;
|
||||
const struct bpf_map_def *def;
|
||||
const char *name = bpf_map__name(map);
|
||||
struct bpf_map_priv *priv = bpf_map__priv(map);
|
||||
|
||||
|
@ -1319,9 +1310,8 @@ bpf_map_config_foreach_key(struct bpf_map *map,
|
|||
return 0;
|
||||
}
|
||||
|
||||
def = bpf_map__def(map);
|
||||
if (IS_ERR(def)) {
|
||||
pr_debug("ERROR: failed to get definition from map %s\n", name);
|
||||
if (!map) {
|
||||
pr_debug("Map '%s' is invalid\n", name);
|
||||
return -BPF_LOADER_ERRNO__INTERNAL;
|
||||
}
|
||||
map_fd = bpf_map__fd(map);
|
||||
|
@ -1330,19 +1320,19 @@ bpf_map_config_foreach_key(struct bpf_map *map,
|
|||
return map_fd;
|
||||
}
|
||||
|
||||
type = bpf_map__type(map);
|
||||
list_for_each_entry(op, &priv->ops_list, list) {
|
||||
switch (def->type) {
|
||||
switch (type) {
|
||||
case BPF_MAP_TYPE_ARRAY:
|
||||
case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
|
||||
switch (op->key_type) {
|
||||
case BPF_MAP_KEY_ALL:
|
||||
err = foreach_key_array_all(func, arg, name,
|
||||
map_fd, def, op);
|
||||
map_fd, map, op);
|
||||
break;
|
||||
case BPF_MAP_KEY_RANGES:
|
||||
err = foreach_key_array_ranges(func, arg, name,
|
||||
map_fd, def,
|
||||
op);
|
||||
map_fd, map, op);
|
||||
break;
|
||||
default:
|
||||
pr_debug("ERROR: keytype for map '%s' invalid\n",
|
||||
|
@ -1451,7 +1441,7 @@ apply_config_evsel_for_key(const char *name, int map_fd, void *pkey,
|
|||
|
||||
static int
|
||||
apply_obj_config_map_for_key(const char *name, int map_fd,
|
||||
const struct bpf_map_def *pdef,
|
||||
const struct bpf_map *map,
|
||||
struct bpf_map_op *op,
|
||||
void *pkey, void *arg __maybe_unused)
|
||||
{
|
||||
|
@ -1460,7 +1450,7 @@ apply_obj_config_map_for_key(const char *name, int map_fd,
|
|||
switch (op->op_type) {
|
||||
case BPF_MAP_OP_SET_VALUE:
|
||||
err = apply_config_value_for_key(map_fd, pkey,
|
||||
pdef->value_size,
|
||||
bpf_map__value_size(map),
|
||||
op->v.value);
|
||||
break;
|
||||
case BPF_MAP_OP_SET_EVSEL:
|
||||
|
|
|
@ -9,25 +9,25 @@
|
|||
#include <stdlib.h>
|
||||
#include <unistd.h>
|
||||
|
||||
static bool bpf_map_def__is_per_cpu(const struct bpf_map_def *def)
|
||||
static bool bpf_map__is_per_cpu(enum bpf_map_type type)
|
||||
{
|
||||
return def->type == BPF_MAP_TYPE_PERCPU_HASH ||
|
||||
def->type == BPF_MAP_TYPE_PERCPU_ARRAY ||
|
||||
def->type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
|
||||
def->type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE;
|
||||
return type == BPF_MAP_TYPE_PERCPU_HASH ||
|
||||
type == BPF_MAP_TYPE_PERCPU_ARRAY ||
|
||||
type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
|
||||
type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE;
|
||||
}
|
||||
|
||||
static void *bpf_map_def__alloc_value(const struct bpf_map_def *def)
|
||||
static void *bpf_map__alloc_value(const struct bpf_map *map)
|
||||
{
|
||||
if (bpf_map_def__is_per_cpu(def))
|
||||
return malloc(round_up(def->value_size, 8) * sysconf(_SC_NPROCESSORS_CONF));
|
||||
if (bpf_map__is_per_cpu(bpf_map__type(map)))
|
||||
return malloc(round_up(bpf_map__value_size(map), 8) *
|
||||
sysconf(_SC_NPROCESSORS_CONF));
|
||||
|
||||
return malloc(def->value_size);
|
||||
return malloc(bpf_map__value_size(map));
|
||||
}
|
||||
|
||||
int bpf_map__fprintf(struct bpf_map *map, FILE *fp)
|
||||
{
|
||||
const struct bpf_map_def *def = bpf_map__def(map);
|
||||
void *prev_key = NULL, *key, *value;
|
||||
int fd = bpf_map__fd(map), err;
|
||||
int printed = 0;
|
||||
|
@ -35,15 +35,15 @@ int bpf_map__fprintf(struct bpf_map *map, FILE *fp)
|
|||
if (fd < 0)
|
||||
return fd;
|
||||
|
||||
if (IS_ERR(def))
|
||||
return PTR_ERR(def);
|
||||
if (!map)
|
||||
return PTR_ERR(map);
|
||||
|
||||
err = -ENOMEM;
|
||||
key = malloc(def->key_size);
|
||||
key = malloc(bpf_map__key_size(map));
|
||||
if (key == NULL)
|
||||
goto out;
|
||||
|
||||
value = bpf_map_def__alloc_value(def);
|
||||
value = bpf_map__alloc_value(map);
|
||||
if (value == NULL)
|
||||
goto out_free_key;
|
||||
|
||||
|
|
|
@ -457,7 +457,7 @@ static int init_prog_array(struct bpf_object *obj, struct bpf_map *prog_array)
|
|||
if (map_fd < 0)
|
||||
return -1;
|
||||
|
||||
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
|
||||
for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
|
||||
snprintf(prog_name, sizeof(prog_name), "flow_dissector_%d", i);
|
||||
|
||||
prog = bpf_object__find_program_by_name(obj, prog_name);
|
||||
|
|
|
@ -121,7 +121,7 @@ static void test_global_data_rdonly(struct bpf_object *obj, __u32 duration)
|
|||
if (CHECK_FAIL(map_fd < 0))
|
||||
return;
|
||||
|
||||
buff = malloc(bpf_map__def(map)->value_size);
|
||||
buff = malloc(bpf_map__value_size(map));
|
||||
if (buff)
|
||||
err = bpf_map_update_elem(map_fd, &zero, buff, 0);
|
||||
free(buff);
|
||||
|
|
|
@ -20,7 +20,7 @@ void test_global_data_init(void)
|
|||
if (CHECK_FAIL(!map || !bpf_map__is_internal(map)))
|
||||
goto out;
|
||||
|
||||
sz = bpf_map__def(map)->value_size;
|
||||
sz = bpf_map__value_size(map);
|
||||
newval = malloc(sz);
|
||||
if (CHECK_FAIL(!newval))
|
||||
goto out;
|
||||
|
|
|
@ -1413,14 +1413,12 @@ close_srv1:
|
|||
|
||||
static void test_ops_cleanup(const struct bpf_map *map)
|
||||
{
|
||||
const struct bpf_map_def *def;
|
||||
int err, mapfd;
|
||||
u32 key;
|
||||
|
||||
def = bpf_map__def(map);
|
||||
mapfd = bpf_map__fd(map);
|
||||
|
||||
for (key = 0; key < def->max_entries; key++) {
|
||||
for (key = 0; key < bpf_map__max_entries(map); key++) {
|
||||
err = bpf_map_delete_elem(mapfd, &key);
|
||||
if (err && errno != EINVAL && errno != ENOENT)
|
||||
FAIL_ERRNO("map_delete: expected EINVAL/ENOENT");
|
||||
|
@ -1443,13 +1441,13 @@ static const char *family_str(sa_family_t family)
|
|||
|
||||
static const char *map_type_str(const struct bpf_map *map)
|
||||
{
|
||||
const struct bpf_map_def *def;
|
||||
int type;
|
||||
|
||||
def = bpf_map__def(map);
|
||||
if (IS_ERR(def))
|
||||
if (!map)
|
||||
return "invalid";
|
||||
type = bpf_map__type(map);
|
||||
|
||||
switch (def->type) {
|
||||
switch (type) {
|
||||
case BPF_MAP_TYPE_SOCKMAP:
|
||||
return "sockmap";
|
||||
case BPF_MAP_TYPE_SOCKHASH:
|
||||
|
|
|
@ -37,7 +37,7 @@ static void test_tailcall_1(void)
|
|||
if (CHECK_FAIL(map_fd < 0))
|
||||
goto out;
|
||||
|
||||
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
|
||||
for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
|
||||
snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
|
||||
|
||||
prog = bpf_object__find_program_by_name(obj, prog_name);
|
||||
|
@ -53,7 +53,7 @@ static void test_tailcall_1(void)
|
|||
goto out;
|
||||
}
|
||||
|
||||
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
|
||||
for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
|
||||
err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
|
||||
&duration, &retval, NULL);
|
||||
CHECK(err || retval != i, "tailcall",
|
||||
|
@ -69,7 +69,7 @@ static void test_tailcall_1(void)
|
|||
CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n",
|
||||
err, errno, retval);
|
||||
|
||||
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
|
||||
for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
|
||||
snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
|
||||
|
||||
prog = bpf_object__find_program_by_name(obj, prog_name);
|
||||
|
@ -90,8 +90,8 @@ static void test_tailcall_1(void)
|
|||
CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n",
|
||||
err, errno, retval);
|
||||
|
||||
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
|
||||
j = bpf_map__def(prog_array)->max_entries - 1 - i;
|
||||
for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
|
||||
j = bpf_map__max_entries(prog_array) - 1 - i;
|
||||
snprintf(prog_name, sizeof(prog_name), "classifier_%d", j);
|
||||
|
||||
prog = bpf_object__find_program_by_name(obj, prog_name);
|
||||
|
@ -107,8 +107,8 @@ static void test_tailcall_1(void)
|
|||
goto out;
|
||||
}
|
||||
|
||||
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
|
||||
j = bpf_map__def(prog_array)->max_entries - 1 - i;
|
||||
for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
|
||||
j = bpf_map__max_entries(prog_array) - 1 - i;
|
||||
|
||||
err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
|
||||
&duration, &retval, NULL);
|
||||
|
@ -125,7 +125,7 @@ static void test_tailcall_1(void)
|
|||
CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n",
|
||||
err, errno, retval);
|
||||
|
||||
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
|
||||
for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
|
||||
err = bpf_map_delete_elem(map_fd, &i);
|
||||
if (CHECK_FAIL(err >= 0 || errno != ENOENT))
|
||||
goto out;
|
||||
|
@ -175,7 +175,7 @@ static void test_tailcall_2(void)
|
|||
if (CHECK_FAIL(map_fd < 0))
|
||||
goto out;
|
||||
|
||||
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
|
||||
for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
|
||||
snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
|
||||
|
||||
prog = bpf_object__find_program_by_name(obj, prog_name);
|
||||
|
@ -353,7 +353,7 @@ static void test_tailcall_4(void)
|
|||
if (CHECK_FAIL(map_fd < 0))
|
||||
return;
|
||||
|
||||
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
|
||||
for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
|
||||
snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
|
||||
|
||||
prog = bpf_object__find_program_by_name(obj, prog_name);
|
||||
|
@ -369,7 +369,7 @@ static void test_tailcall_4(void)
|
|||
goto out;
|
||||
}
|
||||
|
||||
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
|
||||
for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
|
||||
err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
|
||||
if (CHECK_FAIL(err))
|
||||
goto out;
|
||||
|
@ -380,7 +380,7 @@ static void test_tailcall_4(void)
|
|||
"err %d errno %d retval %d\n", err, errno, retval);
|
||||
}
|
||||
|
||||
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
|
||||
for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
|
||||
err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
|
||||
if (CHECK_FAIL(err))
|
||||
goto out;
|
||||
|
@ -441,7 +441,7 @@ static void test_tailcall_5(void)
|
|||
if (CHECK_FAIL(map_fd < 0))
|
||||
return;
|
||||
|
||||
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
|
||||
for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
|
||||
snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
|
||||
|
||||
prog = bpf_object__find_program_by_name(obj, prog_name);
|
||||
|
@ -457,7 +457,7 @@ static void test_tailcall_5(void)
|
|||
goto out;
|
||||
}
|
||||
|
||||
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
|
||||
for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
|
||||
err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
|
||||
if (CHECK_FAIL(err))
|
||||
goto out;
|
||||
|
@ -468,7 +468,7 @@ static void test_tailcall_5(void)
|
|||
"err %d errno %d retval %d\n", err, errno, retval);
|
||||
}
|
||||
|
||||
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
|
||||
for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
|
||||
err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
|
||||
if (CHECK_FAIL(err))
|
||||
goto out;
|
||||
|
@ -520,7 +520,7 @@ static void test_tailcall_bpf2bpf_1(void)
|
|||
goto out;
|
||||
|
||||
/* nop -> jmp */
|
||||
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
|
||||
for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
|
||||
snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
|
||||
|
||||
prog = bpf_object__find_program_by_name(obj, prog_name);
|
||||
|
@ -681,7 +681,7 @@ static void test_tailcall_bpf2bpf_3(void)
|
|||
if (CHECK_FAIL(map_fd < 0))
|
||||
goto out;
|
||||
|
||||
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
|
||||
for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
|
||||
snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
|
||||
|
||||
prog = bpf_object__find_program_by_name(obj, prog_name);
|
||||
|
@ -778,7 +778,7 @@ static void test_tailcall_bpf2bpf_4(bool noise)
|
|||
if (CHECK_FAIL(map_fd < 0))
|
||||
goto out;
|
||||
|
||||
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
|
||||
for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
|
||||
snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
|
||||
|
||||
prog = bpf_object__find_program_by_name(obj, prog_name);
|
||||
|
|
Loading…
Reference in New Issue