net/mlx5e: Remove wrong poll CQ optimization

With the MLX5E_CQ_HAS_CQES optimization flag, the following buggy
flow might occur:
- Suppose RX is always busy, TX has a single packet every second.
- We poll a single TX cqe and clear its flag.
- We never arm it again as RX is always busy.
- TX CQ flag is never changed, and new TX cqes are not polled.

We revert this optimization.

Fixes: e586b3b0ba ('net/mlx5: Ethernet Datapath files')
Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Tariq Toukan 2016-02-29 21:17:09 +02:00 committed by David S. Miller
parent 607e681111
commit 59a7c2fd33
4 changed files with 1 additions and 22 deletions

View File

@ -304,14 +304,9 @@ enum {
MLX5E_RQ_STATE_POST_WQES_ENABLE, MLX5E_RQ_STATE_POST_WQES_ENABLE,
}; };
enum cq_flags {
MLX5E_CQ_HAS_CQES = 1,
};
struct mlx5e_cq { struct mlx5e_cq {
/* data path - accessed per cqe */ /* data path - accessed per cqe */
struct mlx5_cqwq wq; struct mlx5_cqwq wq;
unsigned long flags;
/* data path - accessed per napi poll */ /* data path - accessed per napi poll */
struct napi_struct *napi; struct napi_struct *napi;

View File

@ -230,10 +230,6 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
int work_done; int work_done;
/* avoid accessing cq (dma coherent memory) if not needed */
if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags))
return 0;
for (work_done = 0; work_done < budget; work_done++) { for (work_done = 0; work_done < budget; work_done++) {
struct mlx5e_rx_wqe *wqe; struct mlx5e_rx_wqe *wqe;
struct mlx5_cqe64 *cqe; struct mlx5_cqe64 *cqe;
@ -279,8 +275,5 @@ wq_ll_pop:
/* ensure cq space is freed before enabling more cqes */ /* ensure cq space is freed before enabling more cqes */
wmb(); wmb();
if (work_done == budget)
set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
return work_done; return work_done;
} }

View File

@ -335,10 +335,6 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
u16 sqcc; u16 sqcc;
int i; int i;
/* avoid accessing cq (dma coherent memory) if not needed */
if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags))
return false;
sq = container_of(cq, struct mlx5e_sq, cq); sq = container_of(cq, struct mlx5e_sq, cq);
npkts = 0; npkts = 0;
@ -422,10 +418,6 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
netif_tx_wake_queue(sq->txq); netif_tx_wake_queue(sq->txq);
sq->stats.wake++; sq->stats.wake++;
} }
if (i == MLX5E_TX_CQ_POLL_BUDGET) {
set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
return true;
}
return false; return (i == MLX5E_TX_CQ_POLL_BUDGET);
} }

View File

@ -88,7 +88,6 @@ void mlx5e_completion_event(struct mlx5_core_cq *mcq)
{ {
struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq); struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
set_bit(MLX5E_CHANNEL_NAPI_SCHED, &cq->channel->flags); set_bit(MLX5E_CHANNEL_NAPI_SCHED, &cq->channel->flags);
barrier(); barrier();
napi_schedule(cq->napi); napi_schedule(cq->napi);