net/mlx4_en: use napi_complete_done() in TX completion

In order to benefit from the new napi_defer_hard_irqs feature,
we need to use napi_complete_done() variant in this driver.

RX path is already using it, this patch implements TX completion side.

mlx4_en_process_tx_cq() now returns the amount of retired packets,
instead of a boolean, so that mlx4_en_poll_tx_cq() can pass
this value to napi_complete_done().

Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Eric Dumazet 2020-04-22 09:13:29 -07:00 committed by David S. Miller
parent 7e417a66b8
commit cf4058dbaa
3 changed files with 13 additions and 13 deletions

View File

@ -946,7 +946,7 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
xdp_tx_cq = priv->tx_cq[TX_XDP][cq->ring]; xdp_tx_cq = priv->tx_cq[TX_XDP][cq->ring];
if (xdp_tx_cq->xdp_busy) { if (xdp_tx_cq->xdp_busy) {
clean_complete = mlx4_en_process_tx_cq(dev, xdp_tx_cq, clean_complete = mlx4_en_process_tx_cq(dev, xdp_tx_cq,
budget); budget) < budget;
xdp_tx_cq->xdp_busy = !clean_complete; xdp_tx_cq->xdp_busy = !clean_complete;
} }
} }

View File

@ -382,7 +382,7 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
return cnt; return cnt;
} }
bool mlx4_en_process_tx_cq(struct net_device *dev, int mlx4_en_process_tx_cq(struct net_device *dev,
struct mlx4_en_cq *cq, int napi_budget) struct mlx4_en_cq *cq, int napi_budget)
{ {
struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_priv *priv = netdev_priv(dev);
@ -405,7 +405,7 @@ bool mlx4_en_process_tx_cq(struct net_device *dev,
u32 ring_cons; u32 ring_cons;
if (unlikely(!priv->port_up)) if (unlikely(!priv->port_up))
return true; return 0;
netdev_txq_bql_complete_prefetchw(ring->tx_queue); netdev_txq_bql_complete_prefetchw(ring->tx_queue);
@ -480,7 +480,7 @@ bool mlx4_en_process_tx_cq(struct net_device *dev,
WRITE_ONCE(ring->cons, ring_cons + txbbs_skipped); WRITE_ONCE(ring->cons, ring_cons + txbbs_skipped);
if (cq->type == TX_XDP) if (cq->type == TX_XDP)
return done < budget; return done;
netdev_tx_completed_queue(ring->tx_queue, packets, bytes); netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
@ -492,7 +492,7 @@ bool mlx4_en_process_tx_cq(struct net_device *dev,
ring->wake_queue++; ring->wake_queue++;
} }
return done < budget; return done;
} }
void mlx4_en_tx_irq(struct mlx4_cq *mcq) void mlx4_en_tx_irq(struct mlx4_cq *mcq)
@ -512,13 +512,13 @@ int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget)
struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
struct net_device *dev = cq->dev; struct net_device *dev = cq->dev;
struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_priv *priv = netdev_priv(dev);
bool clean_complete; int work_done;
clean_complete = mlx4_en_process_tx_cq(dev, cq, budget); work_done = mlx4_en_process_tx_cq(dev, cq, budget);
if (!clean_complete) if (work_done >= budget)
return budget; return budget;
napi_complete(napi); if (napi_complete_done(napi, work_done))
mlx4_en_arm_cq(priv, cq); mlx4_en_arm_cq(priv, cq);
return 0; return 0;

View File

@ -737,7 +737,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev,
int budget); int budget);
int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget); int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget); int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget);
bool mlx4_en_process_tx_cq(struct net_device *dev, int mlx4_en_process_tx_cq(struct net_device *dev,
struct mlx4_en_cq *cq, int napi_budget); struct mlx4_en_cq *cq, int napi_budget);
u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring, struct mlx4_en_tx_ring *ring,