selftests/bpf: Extend SK_REUSEPORT tests to cover SOCKMAP/SOCKHASH

Parametrize the SK_REUSEPORT tests so that the map type for storing sockets
is not hard-coded in the test setup routine.

This, together with careful state cleaning after the tests, lets us run the
test cases for REUSEPORT_ARRAY, SOCKMAP, and SOCKHASH to have test coverage
for all supported map types. The last two support only TCP sockets at the
moment.

Signed-off-by: Jakub Sitnicki <jakub@cloudflare.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: John Fastabend <john.fastabend@gmail.com>
Link: https://lore.kernel.org/bpf/20200218171023.844439-11-jakub@cloudflare.com
This commit is contained in:
Jakub Sitnicki 2020-02-18 17:10:22 +00:00 committed by Daniel Borkmann
parent 035ff358f2
commit 11318ba8ca
1 changed files with 53 additions and 10 deletions

View File

@ -36,6 +36,7 @@ static int result_map, tmp_index_ovr_map, linum_map, data_check_map;
static __u32 expected_results[NR_RESULTS];
static int sk_fds[REUSEPORT_ARRAY_SIZE];
static int reuseport_array = -1, outer_map = -1;
static enum bpf_map_type inner_map_type;
static int select_by_skb_data_prog;
static int saved_tcp_syncookie = -1;
static struct bpf_object *obj;
@ -63,13 +64,15 @@ static union sa46 {
} \
})
static int create_maps(void)
static int create_maps(enum bpf_map_type inner_type)
{
struct bpf_create_map_attr attr = {};
inner_map_type = inner_type;
/* Creating reuseport_array */
attr.name = "reuseport_array";
attr.map_type = BPF_MAP_TYPE_REUSEPORT_SOCKARRAY;
attr.map_type = inner_type;
attr.key_size = sizeof(__u32);
attr.value_size = sizeof(__u32);
attr.max_entries = REUSEPORT_ARRAY_SIZE;
@ -726,12 +729,36 @@ static void cleanup_per_test(bool no_inner_map)
static void cleanup(void)
{
if (outer_map != -1)
if (outer_map != -1) {
close(outer_map);
if (reuseport_array != -1)
outer_map = -1;
}
if (reuseport_array != -1) {
close(reuseport_array);
if (obj)
reuseport_array = -1;
}
if (obj) {
bpf_object__close(obj);
obj = NULL;
}
memset(expected_results, 0, sizeof(expected_results));
}
static const char *maptype_str(enum bpf_map_type type)
{
switch (type) {
case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
return "reuseport_sockarray";
case BPF_MAP_TYPE_SOCKMAP:
return "sockmap";
case BPF_MAP_TYPE_SOCKHASH:
return "sockhash";
default:
return "unknown";
}
}
static const char *family_str(sa_family_t family)
@ -779,13 +806,21 @@ static void test_config(int sotype, sa_family_t family, bool inany)
const struct test *t;
for (t = tests; t < tests + ARRAY_SIZE(tests); t++) {
snprintf(s, sizeof(s), "%s/%s %s %s",
snprintf(s, sizeof(s), "%s %s/%s %s %s",
maptype_str(inner_map_type),
family_str(family), sotype_str(sotype),
inany ? "INANY" : "LOOPBACK", t->name);
if (!test__start_subtest(s))
continue;
if (sotype == SOCK_DGRAM &&
inner_map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
/* SOCKMAP/SOCKHASH don't support UDP yet */
test__skip();
continue;
}
setup_per_test(sotype, family, inany, t->no_inner_map);
t->fn(sotype, family);
cleanup_per_test(t->no_inner_map);
@ -814,13 +849,20 @@ static void test_all(void)
test_config(c->sotype, c->family, c->inany);
}
void test_select_reuseport(void)
void test_map_type(enum bpf_map_type mt)
{
if (create_maps())
if (create_maps(mt))
goto out;
if (prepare_bpf_obj())
goto out;
test_all();
out:
cleanup();
}
void test_select_reuseport(void)
{
saved_tcp_fo = read_int_sysctl(TCP_FO_SYSCTL);
saved_tcp_syncookie = read_int_sysctl(TCP_SYNCOOKIE_SYSCTL);
if (saved_tcp_syncookie < 0 || saved_tcp_syncookie < 0)
@ -831,8 +873,9 @@ void test_select_reuseport(void)
if (disable_syncookie())
goto out;
test_all();
test_map_type(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY);
test_map_type(BPF_MAP_TYPE_SOCKMAP);
test_map_type(BPF_MAP_TYPE_SOCKHASH);
out:
cleanup();
restore_sysctls();
}