i40e: convert to use generic xdp_frame and xdp_return_frame API
Also convert driver i40e, which very recently got XDP_REDIRECT support
in commit d9314c474d
("i40e: add support for XDP_REDIRECT").
V7: This patch got added in V7 of this patchset.
Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
70280ed91c
commit
b411ef1102
|
@ -638,7 +638,8 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
|
|||
if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
|
||||
kfree(tx_buffer->raw_buf);
|
||||
else if (ring_is_xdp(ring))
|
||||
page_frag_free(tx_buffer->raw_buf);
|
||||
xdp_return_frame(tx_buffer->xdpf->data,
|
||||
&tx_buffer->xdpf->mem);
|
||||
else
|
||||
dev_kfree_skb_any(tx_buffer->skb);
|
||||
if (dma_unmap_len(tx_buffer, len))
|
||||
|
@ -841,7 +842,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
|
|||
|
||||
/* free the skb/XDP data */
|
||||
if (ring_is_xdp(tx_ring))
|
||||
page_frag_free(tx_buf->raw_buf);
|
||||
xdp_return_frame(tx_buf->xdpf->data, &tx_buf->xdpf->mem);
|
||||
else
|
||||
napi_consume_skb(tx_buf->skb, napi_budget);
|
||||
|
||||
|
@ -2225,6 +2226,8 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
|
|||
if (!xdp_prog)
|
||||
goto xdp_out;
|
||||
|
||||
prefetchw(xdp->data_hard_start); /* xdp_frame write */
|
||||
|
||||
act = bpf_prog_run_xdp(xdp_prog, xdp);
|
||||
switch (act) {
|
||||
case XDP_PASS:
|
||||
|
@ -3481,25 +3484,32 @@ dma_error:
|
|||
static int i40e_xmit_xdp_ring(struct xdp_buff *xdp,
|
||||
struct i40e_ring *xdp_ring)
|
||||
{
|
||||
u32 size = xdp->data_end - xdp->data;
|
||||
u16 i = xdp_ring->next_to_use;
|
||||
struct i40e_tx_buffer *tx_bi;
|
||||
struct i40e_tx_desc *tx_desc;
|
||||
struct xdp_frame *xdpf;
|
||||
dma_addr_t dma;
|
||||
u32 size;
|
||||
|
||||
xdpf = convert_to_xdp_frame(xdp);
|
||||
if (unlikely(!xdpf))
|
||||
return I40E_XDP_CONSUMED;
|
||||
|
||||
size = xdpf->len;
|
||||
|
||||
if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) {
|
||||
xdp_ring->tx_stats.tx_busy++;
|
||||
return I40E_XDP_CONSUMED;
|
||||
}
|
||||
|
||||
dma = dma_map_single(xdp_ring->dev, xdp->data, size, DMA_TO_DEVICE);
|
||||
dma = dma_map_single(xdp_ring->dev, xdpf->data, size, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(xdp_ring->dev, dma))
|
||||
return I40E_XDP_CONSUMED;
|
||||
|
||||
tx_bi = &xdp_ring->tx_bi[i];
|
||||
tx_bi->bytecount = size;
|
||||
tx_bi->gso_segs = 1;
|
||||
tx_bi->raw_buf = xdp->data;
|
||||
tx_bi->xdpf = xdpf;
|
||||
|
||||
/* record length, and DMA address */
|
||||
dma_unmap_len_set(tx_bi, len, size);
|
||||
|
|
|
@ -306,6 +306,7 @@ static inline unsigned int i40e_txd_use_count(unsigned int size)
|
|||
struct i40e_tx_buffer {
|
||||
struct i40e_tx_desc *next_to_watch;
|
||||
union {
|
||||
struct xdp_frame *xdpf;
|
||||
struct sk_buff *skb;
|
||||
void *raw_buf;
|
||||
};
|
||||
|
|
Loading…
Reference in New Issue