net/mlx5e: TX, Generalise code and usage of error CQE dump
Error CQE was dumped only for TXQ SQs. Generalise the function, and add usage for error completions on ICO SQs and XDP SQs. Signed-off-by: Tariq Toukan <tariqt@mellanox.com> Reviewed-by: Aya Levin <ayal@mellanox.com> Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
This commit is contained in:
parent
e658664c77
commit
f1b95753ee
|
@ -189,6 +189,22 @@ static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
|
|||
}
|
||||
}
|
||||
|
||||
static inline void mlx5e_dump_error_cqe(struct mlx5e_cq *cq, u32 sqn,
|
||||
struct mlx5_err_cqe *err_cqe)
|
||||
{
|
||||
struct mlx5_cqwq *wq = &cq->wq;
|
||||
u32 ci;
|
||||
|
||||
ci = mlx5_cqwq_ctr2ix(wq, wq->cc - 1);
|
||||
|
||||
netdev_err(cq->channel->netdev,
|
||||
"Error cqe on cqn 0x%x, ci 0x%x, sqn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n",
|
||||
cq->mcq.cqn, ci, sqn,
|
||||
get_cqe_opcode((struct mlx5_cqe64 *)err_cqe),
|
||||
err_cqe->syndrome, err_cqe->vendor_err_synd);
|
||||
mlx5_dump_err_cqe(cq->mdev, err_cqe);
|
||||
}
|
||||
|
||||
/* SW parser related functions */
|
||||
|
||||
struct mlx5e_swp_spec {
|
||||
|
|
|
@ -415,11 +415,6 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
|
|||
|
||||
wqe_counter = be16_to_cpu(cqe->wqe_counter);
|
||||
|
||||
if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ))
|
||||
netdev_WARN_ONCE(sq->channel->netdev,
|
||||
"Bad OP in XDPSQ CQE: 0x%x\n",
|
||||
get_cqe_opcode(cqe));
|
||||
|
||||
do {
|
||||
struct mlx5e_xdp_wqe_info *wi;
|
||||
u16 ci;
|
||||
|
@ -432,6 +427,14 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
|
|||
|
||||
mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, true);
|
||||
} while (!last_wqe);
|
||||
|
||||
if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
|
||||
netdev_WARN_ONCE(sq->channel->netdev,
|
||||
"Bad OP in XDPSQ CQE: 0x%x\n",
|
||||
get_cqe_opcode(cqe));
|
||||
mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
|
||||
(struct mlx5_err_cqe *)cqe);
|
||||
}
|
||||
} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
|
||||
|
||||
if (xsk_frames)
|
||||
|
|
|
@ -631,6 +631,8 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
|
|||
netdev_WARN_ONCE(cq->channel->netdev,
|
||||
"Bad OP in ICOSQ CQE: 0x%x\n",
|
||||
get_cqe_opcode(cqe));
|
||||
mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
|
||||
(struct mlx5_err_cqe *)cqe);
|
||||
if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
|
||||
queue_work(cq->channel->priv->wq, &sq->recover_work);
|
||||
break;
|
||||
|
|
|
@ -399,22 +399,6 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
return mlx5e_sq_xmit(sq, skb, wqe, pi, netdev_xmit_more());
|
||||
}
|
||||
|
||||
static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq,
|
||||
struct mlx5_err_cqe *err_cqe)
|
||||
{
|
||||
struct mlx5_cqwq *wq = &sq->cq.wq;
|
||||
u32 ci;
|
||||
|
||||
ci = mlx5_cqwq_ctr2ix(wq, wq->cc - 1);
|
||||
|
||||
netdev_err(sq->channel->netdev,
|
||||
"Error cqe on cqn 0x%x, ci 0x%x, sqn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n",
|
||||
sq->cq.mcq.cqn, ci, sq->sqn,
|
||||
get_cqe_opcode((struct mlx5_cqe64 *)err_cqe),
|
||||
err_cqe->syndrome, err_cqe->vendor_err_synd);
|
||||
mlx5_dump_err_cqe(sq->cq.mdev, err_cqe);
|
||||
}
|
||||
|
||||
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
|
||||
{
|
||||
struct mlx5e_sq_stats *stats;
|
||||
|
@ -501,7 +485,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
|
|||
if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) {
|
||||
if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING,
|
||||
&sq->state)) {
|
||||
mlx5e_dump_error_cqe(sq,
|
||||
mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
|
||||
(struct mlx5_err_cqe *)cqe);
|
||||
mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
|
||||
queue_work(cq->channel->priv->wq,
|
||||
|
|
Loading…
Reference in New Issue