i40e: remove the redundant buffer info updates
For performance reasons, remove the redundant buffer info updates (*bi = NULL). The buffers ready to be cleaned can easily be tracked based on the ring next-to-clean variable, which is consistently updated. Signed-off-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com> Tested-by: Kiran Bhandare <kiranx.bhandare@intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
This commit is contained in:
parent
f12738b6ec
commit
d4178c31a5
|
@ -280,7 +280,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
|
||||||
|
|
||||||
while (likely(total_rx_packets < (unsigned int)budget)) {
|
while (likely(total_rx_packets < (unsigned int)budget)) {
|
||||||
union i40e_rx_desc *rx_desc;
|
union i40e_rx_desc *rx_desc;
|
||||||
struct xdp_buff **bi;
|
struct xdp_buff *bi;
|
||||||
unsigned int size;
|
unsigned int size;
|
||||||
u64 qword;
|
u64 qword;
|
||||||
|
|
||||||
|
@ -297,9 +297,8 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
|
||||||
i40e_clean_programming_status(rx_ring,
|
i40e_clean_programming_status(rx_ring,
|
||||||
rx_desc->raw.qword[0],
|
rx_desc->raw.qword[0],
|
||||||
qword);
|
qword);
|
||||||
bi = i40e_rx_bi(rx_ring, next_to_clean);
|
bi = *i40e_rx_bi(rx_ring, next_to_clean);
|
||||||
xsk_buff_free(*bi);
|
xsk_buff_free(bi);
|
||||||
*bi = NULL;
|
|
||||||
next_to_clean = (next_to_clean + 1) & count_mask;
|
next_to_clean = (next_to_clean + 1) & count_mask;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -309,18 +308,17 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
|
||||||
if (!size)
|
if (!size)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
bi = i40e_rx_bi(rx_ring, next_to_clean);
|
bi = *i40e_rx_bi(rx_ring, next_to_clean);
|
||||||
(*bi)->data_end = (*bi)->data + size;
|
bi->data_end = bi->data + size;
|
||||||
xsk_buff_dma_sync_for_cpu(*bi, rx_ring->xsk_pool);
|
xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool);
|
||||||
|
|
||||||
xdp_res = i40e_run_xdp_zc(rx_ring, *bi);
|
xdp_res = i40e_run_xdp_zc(rx_ring, bi);
|
||||||
if (xdp_res) {
|
if (xdp_res) {
|
||||||
if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR))
|
if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR))
|
||||||
xdp_xmit |= xdp_res;
|
xdp_xmit |= xdp_res;
|
||||||
else
|
else
|
||||||
xsk_buff_free(*bi);
|
xsk_buff_free(bi);
|
||||||
|
|
||||||
*bi = NULL;
|
|
||||||
total_rx_bytes += size;
|
total_rx_bytes += size;
|
||||||
total_rx_packets++;
|
total_rx_packets++;
|
||||||
|
|
||||||
|
@ -335,13 +333,12 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
|
||||||
* BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that
|
* BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that
|
||||||
* SBP is *not* set in PRT_SBPVSI (default not set).
|
* SBP is *not* set in PRT_SBPVSI (default not set).
|
||||||
*/
|
*/
|
||||||
skb = i40e_construct_skb_zc(rx_ring, *bi);
|
skb = i40e_construct_skb_zc(rx_ring, bi);
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
rx_ring->rx_stats.alloc_buff_failed++;
|
rx_ring->rx_stats.alloc_buff_failed++;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
*bi = NULL;
|
|
||||||
next_to_clean = (next_to_clean + 1) & count_mask;
|
next_to_clean = (next_to_clean + 1) & count_mask;
|
||||||
|
|
||||||
if (eth_skb_pad(skb))
|
if (eth_skb_pad(skb))
|
||||||
|
@ -594,16 +591,14 @@ int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
|
||||||
|
|
||||||
void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring)
|
void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring)
|
||||||
{
|
{
|
||||||
u16 i;
|
u16 count_mask = rx_ring->count - 1;
|
||||||
|
u16 ntc = rx_ring->next_to_clean;
|
||||||
|
u16 ntu = rx_ring->next_to_use;
|
||||||
|
|
||||||
for (i = 0; i < rx_ring->count; i++) {
|
for ( ; ntc != ntu; ntc = (ntc + 1) & count_mask) {
|
||||||
struct xdp_buff *rx_bi = *i40e_rx_bi(rx_ring, i);
|
struct xdp_buff *rx_bi = *i40e_rx_bi(rx_ring, ntc);
|
||||||
|
|
||||||
if (!rx_bi)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
xsk_buff_free(rx_bi);
|
xsk_buff_free(rx_bi);
|
||||||
rx_bi = NULL;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue