selftests: xsk: Add frame_headroom test
Add a test for the frame_headroom feature that can be set on the umem. The logic added validates that all offsets in all tests and packets are valid, not just the ones that have a specifically configured frame_headroom. Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Link: https://lore.kernel.org/bpf/20210922075613.12186-14-magnus.karlsson@gmail.com
This commit is contained in:
parent
e4e9baf06a
commit
e34087fc00
|
@ -514,8 +514,7 @@ static struct pkt_stream *pkt_stream_generate(struct xsk_umem_info *umem, u32 nb
|
|||
|
||||
pkt_stream->nb_pkts = nb_pkts;
|
||||
for (i = 0; i < nb_pkts; i++) {
|
||||
pkt_stream->pkts[i].addr = (i % umem->num_frames) * umem->frame_size +
|
||||
DEFAULT_OFFSET;
|
||||
pkt_stream->pkts[i].addr = (i % umem->num_frames) * umem->frame_size;
|
||||
pkt_stream->pkts[i].len = pkt_len;
|
||||
pkt_stream->pkts[i].payload = i;
|
||||
|
||||
|
@ -642,6 +641,25 @@ static void pkt_dump(void *pkt, u32 len)
|
|||
fprintf(stdout, "---------------------------------------\n");
|
||||
}
|
||||
|
||||
static bool is_offset_correct(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream, u64 addr,
|
||||
u64 pkt_stream_addr)
|
||||
{
|
||||
u32 headroom = umem->unaligned_mode ? 0 : umem->frame_headroom;
|
||||
u32 offset = addr % umem->frame_size, expected_offset = 0;
|
||||
|
||||
if (!pkt_stream->use_addr_for_fill)
|
||||
pkt_stream_addr = 0;
|
||||
|
||||
expected_offset += (pkt_stream_addr + headroom + XDP_PACKET_HEADROOM) % umem->frame_size;
|
||||
|
||||
if (offset == expected_offset)
|
||||
return true;
|
||||
|
||||
ksft_test_result_fail("ERROR: [%s] expected [%u], got [%u]\n", __func__, expected_offset,
|
||||
offset);
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool is_pkt_valid(struct pkt *pkt, void *buffer, u64 addr, u32 len)
|
||||
{
|
||||
void *data = xsk_umem__get_data(buffer, addr);
|
||||
|
@ -724,6 +742,7 @@ static void receive_pkts(struct pkt_stream *pkt_stream, struct xsk_socket_info *
|
|||
struct pollfd *fds)
|
||||
{
|
||||
struct pkt *pkt = pkt_stream_get_next_rx_pkt(pkt_stream);
|
||||
struct xsk_umem_info *umem = xsk->umem;
|
||||
u32 idx_rx = 0, idx_fq = 0, rcvd, i;
|
||||
u32 total = 0;
|
||||
int ret;
|
||||
|
@ -731,7 +750,7 @@ static void receive_pkts(struct pkt_stream *pkt_stream, struct xsk_socket_info *
|
|||
while (pkt) {
|
||||
rcvd = xsk_ring_cons__peek(&xsk->rx, BATCH_SIZE, &idx_rx);
|
||||
if (!rcvd) {
|
||||
if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq)) {
|
||||
if (xsk_ring_prod__needs_wakeup(&umem->fq)) {
|
||||
ret = poll(fds, 1, POLL_TMOUT);
|
||||
if (ret < 0)
|
||||
exit_with_error(-ret);
|
||||
|
@ -739,16 +758,16 @@ static void receive_pkts(struct pkt_stream *pkt_stream, struct xsk_socket_info *
|
|||
continue;
|
||||
}
|
||||
|
||||
ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq);
|
||||
ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
|
||||
while (ret != rcvd) {
|
||||
if (ret < 0)
|
||||
exit_with_error(-ret);
|
||||
if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq)) {
|
||||
if (xsk_ring_prod__needs_wakeup(&umem->fq)) {
|
||||
ret = poll(fds, 1, POLL_TMOUT);
|
||||
if (ret < 0)
|
||||
exit_with_error(-ret);
|
||||
}
|
||||
ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq);
|
||||
ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
|
||||
}
|
||||
|
||||
for (i = 0; i < rcvd; i++) {
|
||||
|
@ -765,14 +784,17 @@ static void receive_pkts(struct pkt_stream *pkt_stream, struct xsk_socket_info *
|
|||
|
||||
orig = xsk_umem__extract_addr(addr);
|
||||
addr = xsk_umem__add_offset_to_addr(addr);
|
||||
if (!is_pkt_valid(pkt, xsk->umem->buffer, addr, desc->len))
|
||||
|
||||
if (!is_pkt_valid(pkt, umem->buffer, addr, desc->len))
|
||||
return;
|
||||
if (!is_offset_correct(umem, pkt_stream, addr, pkt->addr))
|
||||
return;
|
||||
|
||||
*xsk_ring_prod__fill_addr(&xsk->umem->fq, idx_fq++) = orig;
|
||||
*xsk_ring_prod__fill_addr(&umem->fq, idx_fq++) = orig;
|
||||
pkt = pkt_stream_get_next_rx_pkt(pkt_stream);
|
||||
}
|
||||
|
||||
xsk_ring_prod__submit(&xsk->umem->fq, rcvd);
|
||||
xsk_ring_prod__submit(&umem->fq, rcvd);
|
||||
xsk_ring_cons__release(&xsk->rx, rcvd);
|
||||
|
||||
pthread_mutex_lock(&pacing_mutex);
|
||||
|
@ -1011,7 +1033,7 @@ static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream
|
|||
break;
|
||||
addr = pkt->addr;
|
||||
} else {
|
||||
addr = i * umem->frame_size + DEFAULT_OFFSET;
|
||||
addr = i * umem->frame_size;
|
||||
}
|
||||
|
||||
*xsk_ring_prod__fill_addr(&umem->fq, idx++) = addr;
|
||||
|
@ -1134,6 +1156,13 @@ static void testapp_bpf_res(struct test_spec *test)
|
|||
testapp_validate_traffic(test);
|
||||
}
|
||||
|
||||
static void testapp_headroom(struct test_spec *test)
|
||||
{
|
||||
test_spec_set_name(test, "UMEM_HEADROOM");
|
||||
test->ifobj_rx->umem->frame_headroom = UMEM_HEADROOM_TEST_SIZE;
|
||||
testapp_validate_traffic(test);
|
||||
}
|
||||
|
||||
static void testapp_stats(struct test_spec *test)
|
||||
{
|
||||
int i;
|
||||
|
@ -1346,6 +1375,9 @@ static void run_pkt_test(struct test_spec *test, enum test_mode mode, enum test_
|
|||
if (!testapp_unaligned(test))
|
||||
return;
|
||||
break;
|
||||
case TEST_TYPE_HEADROOM:
|
||||
testapp_headroom(test);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -41,7 +41,7 @@
|
|||
#define DEFAULT_UMEM_BUFFERS (DEFAULT_PKT_CNT / 4)
|
||||
#define UMEM_SIZE (DEFAULT_UMEM_BUFFERS * XSK_UMEM__DEFAULT_FRAME_SIZE)
|
||||
#define RX_FULL_RXQSIZE 32
|
||||
#define DEFAULT_OFFSET 256
|
||||
#define UMEM_HEADROOM_TEST_SIZE 128
|
||||
#define XSK_UMEM__INVALID_FRAME_SIZE (XSK_UMEM__DEFAULT_FRAME_SIZE + 1)
|
||||
|
||||
#define print_verbose(x...) do { if (opt_verbose) ksft_print_msg(x); } while (0)
|
||||
|
@ -61,6 +61,7 @@ enum test_type {
|
|||
TEST_TYPE_ALIGNED_INV_DESC,
|
||||
TEST_TYPE_ALIGNED_INV_DESC_2K_FRAME,
|
||||
TEST_TYPE_UNALIGNED_INV_DESC,
|
||||
TEST_TYPE_HEADROOM,
|
||||
TEST_TYPE_TEARDOWN,
|
||||
TEST_TYPE_BIDI,
|
||||
TEST_TYPE_STATS,
|
||||
|
|
Loading…
Reference in New Issue