mlx4: xdp: Reserve headroom for receiving packet when XDP prog is active
Reserve XDP_PACKET_HEADROOM for packet and enable bpf_xdp_adjust_head() support. This patch only affects the code path when XDP is active. After testing, the tx_dropped counter is incremented if the xdp_prog sends more than wire MTU. Signed-off-by: Martin KaFai Lau <kafai@fb.com> Acked-by: Saeed Mahameed <saeedm@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
b45f0674b9
commit
ea3349a035
|
@ -51,7 +51,8 @@
|
|||
#include "mlx4_en.h"
|
||||
#include "en_port.h"
|
||||
|
||||
#define MLX4_EN_MAX_XDP_MTU ((int)(PAGE_SIZE - ETH_HLEN - (2 * VLAN_HLEN)))
|
||||
#define MLX4_EN_MAX_XDP_MTU ((int)(PAGE_SIZE - ETH_HLEN - (2 * VLAN_HLEN) - \
|
||||
XDP_PACKET_HEADROOM))
|
||||
|
||||
int mlx4_en_setup_tc(struct net_device *dev, u8 up)
|
||||
{
|
||||
|
@ -2700,11 +2701,6 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
|
|||
int err;
|
||||
int i;
|
||||
|
||||
if (prog && prog->xdp_adjust_head) {
|
||||
en_err(priv, "Does not support bpf_xdp_adjust_head()\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
xdp_ring_num = prog ? priv->rx_ring_num : 0;
|
||||
|
||||
/* No need to reconfigure buffers when simply swapping the
|
||||
|
|
|
@ -96,7 +96,6 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
|
|||
struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
|
||||
const struct mlx4_en_frag_info *frag_info;
|
||||
struct page *page;
|
||||
dma_addr_t dma;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < priv->num_frags; i++) {
|
||||
|
@ -115,9 +114,10 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
|
|||
|
||||
for (i = 0; i < priv->num_frags; i++) {
|
||||
frags[i] = ring_alloc[i];
|
||||
dma = ring_alloc[i].dma + ring_alloc[i].page_offset;
|
||||
frags[i].page_offset += priv->frag_info[i].rx_headroom;
|
||||
rx_desc->data[i].addr = cpu_to_be64(frags[i].dma +
|
||||
frags[i].page_offset);
|
||||
ring_alloc[i] = page_alloc[i];
|
||||
rx_desc->data[i].addr = cpu_to_be64(dma);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -250,7 +250,8 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
|
|||
|
||||
if (ring->page_cache.index > 0) {
|
||||
frags[0] = ring->page_cache.buf[--ring->page_cache.index];
|
||||
rx_desc->data[0].addr = cpu_to_be64(frags[0].dma);
|
||||
rx_desc->data[0].addr = cpu_to_be64(frags[0].dma +
|
||||
frags[0].page_offset);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -889,6 +890,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
|
|||
if (xdp_prog) {
|
||||
struct xdp_buff xdp;
|
||||
dma_addr_t dma;
|
||||
void *orig_data;
|
||||
u32 act;
|
||||
|
||||
dma = be64_to_cpu(rx_desc->data[0].addr);
|
||||
|
@ -896,11 +898,19 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
|
|||
priv->frag_info[0].frag_size,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
xdp.data = page_address(frags[0].page) +
|
||||
frags[0].page_offset;
|
||||
xdp.data_hard_start = page_address(frags[0].page);
|
||||
xdp.data = xdp.data_hard_start + frags[0].page_offset;
|
||||
xdp.data_end = xdp.data + length;
|
||||
orig_data = xdp.data;
|
||||
|
||||
act = bpf_prog_run_xdp(xdp_prog, &xdp);
|
||||
|
||||
if (xdp.data != orig_data) {
|
||||
length = xdp.data_end - xdp.data;
|
||||
frags[0].page_offset = xdp.data -
|
||||
xdp.data_hard_start;
|
||||
}
|
||||
|
||||
switch (act) {
|
||||
case XDP_PASS:
|
||||
break;
|
||||
|
@ -1180,6 +1190,7 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
|
|||
*/
|
||||
priv->frag_info[0].frag_stride = PAGE_SIZE;
|
||||
priv->frag_info[0].dma_dir = PCI_DMA_BIDIRECTIONAL;
|
||||
priv->frag_info[0].rx_headroom = XDP_PACKET_HEADROOM;
|
||||
i = 1;
|
||||
} else {
|
||||
int buf_size = 0;
|
||||
|
@ -1194,6 +1205,7 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
|
|||
ALIGN(priv->frag_info[i].frag_size,
|
||||
SMP_CACHE_BYTES);
|
||||
priv->frag_info[i].dma_dir = PCI_DMA_FROMDEVICE;
|
||||
priv->frag_info[i].rx_headroom = 0;
|
||||
buf_size += priv->frag_info[i].frag_size;
|
||||
i++;
|
||||
}
|
||||
|
|
|
@ -354,7 +354,7 @@ u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
|
|||
struct mlx4_en_rx_alloc frame = {
|
||||
.page = tx_info->page,
|
||||
.dma = tx_info->map0_dma,
|
||||
.page_offset = 0,
|
||||
.page_offset = XDP_PACKET_HEADROOM,
|
||||
.page_size = PAGE_SIZE,
|
||||
};
|
||||
|
||||
|
@ -1132,7 +1132,7 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
|
|||
tx_info->page = frame->page;
|
||||
frame->page = NULL;
|
||||
tx_info->map0_dma = dma;
|
||||
tx_info->map0_byte_count = length;
|
||||
tx_info->map0_byte_count = PAGE_SIZE;
|
||||
tx_info->nr_txbb = nr_txbb;
|
||||
tx_info->nr_bytes = max_t(unsigned int, length, ETH_ZLEN);
|
||||
tx_info->data_offset = (void *)data - (void *)tx_desc;
|
||||
|
@ -1141,9 +1141,10 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
|
|||
tx_info->linear = 1;
|
||||
tx_info->inl = 0;
|
||||
|
||||
dma_sync_single_for_device(priv->ddev, dma, length, PCI_DMA_TODEVICE);
|
||||
dma_sync_single_range_for_device(priv->ddev, dma, frame->page_offset,
|
||||
length, PCI_DMA_TODEVICE);
|
||||
|
||||
data->addr = cpu_to_be64(dma);
|
||||
data->addr = cpu_to_be64(dma + frame->page_offset);
|
||||
data->lkey = ring->mr_key;
|
||||
dma_wmb();
|
||||
data->byte_count = cpu_to_be32(length);
|
||||
|
|
|
@ -475,7 +475,8 @@ struct mlx4_en_frag_info {
|
|||
u16 frag_prefix_size;
|
||||
u32 frag_stride;
|
||||
enum dma_data_direction dma_dir;
|
||||
int order;
|
||||
u16 order;
|
||||
u16 rx_headroom;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_MLX4_EN_DCB
|
||||
|
|
Loading…
Reference in New Issue