IB/mlx5: Break up wqe handling into begin & finish routines
As a preliminary step for signature feature which will require posting multiple (3) WQEs for a single WR, we break post_send routine WQE indexing into begin and finish routines. This patch does not change any functionality. Signed-off-by: Sagi Grimberg <sagig@mellanox.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
This commit is contained in:
parent
e1e66cc264
commit
6e5eadace1
|
@ -2047,6 +2047,59 @@ static u8 get_fence(u8 fence, struct ib_send_wr *wr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
|
||||||
|
struct mlx5_wqe_ctrl_seg **ctrl,
|
||||||
|
struct ib_send_wr *wr, int *idx,
|
||||||
|
int *size, int nreq)
|
||||||
|
{
|
||||||
|
int err = 0;
|
||||||
|
|
||||||
|
if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) {
|
||||||
|
err = -ENOMEM;
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
*idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
|
||||||
|
*seg = mlx5_get_send_wqe(qp, *idx);
|
||||||
|
*ctrl = *seg;
|
||||||
|
*(uint32_t *)(*seg + 8) = 0;
|
||||||
|
(*ctrl)->imm = send_ieth(wr);
|
||||||
|
(*ctrl)->fm_ce_se = qp->sq_signal_bits |
|
||||||
|
(wr->send_flags & IB_SEND_SIGNALED ?
|
||||||
|
MLX5_WQE_CTRL_CQ_UPDATE : 0) |
|
||||||
|
(wr->send_flags & IB_SEND_SOLICITED ?
|
||||||
|
MLX5_WQE_CTRL_SOLICITED : 0);
|
||||||
|
|
||||||
|
*seg += sizeof(**ctrl);
|
||||||
|
*size = sizeof(**ctrl) / 16;
|
||||||
|
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void finish_wqe(struct mlx5_ib_qp *qp,
|
||||||
|
struct mlx5_wqe_ctrl_seg *ctrl,
|
||||||
|
u8 size, unsigned idx, u64 wr_id,
|
||||||
|
int nreq, u8 fence, u8 next_fence,
|
||||||
|
u32 mlx5_opcode)
|
||||||
|
{
|
||||||
|
u8 opmod = 0;
|
||||||
|
|
||||||
|
ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) |
|
||||||
|
mlx5_opcode | ((u32)opmod << 24));
|
||||||
|
ctrl->qpn_ds = cpu_to_be32(size | (qp->mqp.qpn << 8));
|
||||||
|
ctrl->fm_ce_se |= fence;
|
||||||
|
qp->fm_cache = next_fence;
|
||||||
|
if (unlikely(qp->wq_sig))
|
||||||
|
ctrl->signature = wq_sig(ctrl);
|
||||||
|
|
||||||
|
qp->sq.wrid[idx] = wr_id;
|
||||||
|
qp->sq.w_list[idx].opcode = mlx5_opcode;
|
||||||
|
qp->sq.wqe_head[idx] = qp->sq.head + nreq;
|
||||||
|
qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
|
||||||
|
qp->sq.w_list[idx].next = qp->sq.cur_post;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||||
struct ib_send_wr **bad_wr)
|
struct ib_send_wr **bad_wr)
|
||||||
{
|
{
|
||||||
|
@ -2060,7 +2113,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||||
int uninitialized_var(size);
|
int uninitialized_var(size);
|
||||||
void *qend = qp->sq.qend;
|
void *qend = qp->sq.qend;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
u32 mlx5_opcode;
|
|
||||||
unsigned idx;
|
unsigned idx;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
int inl = 0;
|
int inl = 0;
|
||||||
|
@ -2069,7 +2121,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||||
int nreq;
|
int nreq;
|
||||||
int i;
|
int i;
|
||||||
u8 next_fence = 0;
|
u8 next_fence = 0;
|
||||||
u8 opmod = 0;
|
|
||||||
u8 fence;
|
u8 fence;
|
||||||
|
|
||||||
spin_lock_irqsave(&qp->sq.lock, flags);
|
spin_lock_irqsave(&qp->sq.lock, flags);
|
||||||
|
@ -2082,13 +2133,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) {
|
|
||||||
mlx5_ib_warn(dev, "\n");
|
|
||||||
err = -ENOMEM;
|
|
||||||
*bad_wr = wr;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
fence = qp->fm_cache;
|
fence = qp->fm_cache;
|
||||||
num_sge = wr->num_sge;
|
num_sge = wr->num_sge;
|
||||||
if (unlikely(num_sge > qp->sq.max_gs)) {
|
if (unlikely(num_sge > qp->sq.max_gs)) {
|
||||||
|
@ -2098,19 +2142,13 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
|
err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, nreq);
|
||||||
seg = mlx5_get_send_wqe(qp, idx);
|
if (err) {
|
||||||
ctrl = seg;
|
mlx5_ib_warn(dev, "\n");
|
||||||
*(uint32_t *)(seg + 8) = 0;
|
err = -ENOMEM;
|
||||||
ctrl->imm = send_ieth(wr);
|
*bad_wr = wr;
|
||||||
ctrl->fm_ce_se = qp->sq_signal_bits |
|
goto out;
|
||||||
(wr->send_flags & IB_SEND_SIGNALED ?
|
}
|
||||||
MLX5_WQE_CTRL_CQ_UPDATE : 0) |
|
|
||||||
(wr->send_flags & IB_SEND_SOLICITED ?
|
|
||||||
MLX5_WQE_CTRL_SOLICITED : 0);
|
|
||||||
|
|
||||||
seg += sizeof(*ctrl);
|
|
||||||
size = sizeof(*ctrl) / 16;
|
|
||||||
|
|
||||||
switch (ibqp->qp_type) {
|
switch (ibqp->qp_type) {
|
||||||
case IB_QPT_XRC_INI:
|
case IB_QPT_XRC_INI:
|
||||||
|
@ -2244,22 +2282,9 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
mlx5_opcode = mlx5_ib_opcode[wr->opcode];
|
finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
|
||||||
ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) |
|
get_fence(fence, wr), next_fence,
|
||||||
mlx5_opcode |
|
mlx5_ib_opcode[wr->opcode]);
|
||||||
((u32)opmod << 24));
|
|
||||||
ctrl->qpn_ds = cpu_to_be32(size | (qp->mqp.qpn << 8));
|
|
||||||
ctrl->fm_ce_se |= get_fence(fence, wr);
|
|
||||||
qp->fm_cache = next_fence;
|
|
||||||
if (unlikely(qp->wq_sig))
|
|
||||||
ctrl->signature = wq_sig(ctrl);
|
|
||||||
|
|
||||||
qp->sq.wrid[idx] = wr->wr_id;
|
|
||||||
qp->sq.w_list[idx].opcode = mlx5_opcode;
|
|
||||||
qp->sq.wqe_head[idx] = qp->sq.head + nreq;
|
|
||||||
qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
|
|
||||||
qp->sq.w_list[idx].next = qp->sq.cur_post;
|
|
||||||
|
|
||||||
if (0)
|
if (0)
|
||||||
dump_wqe(qp, idx, size);
|
dump_wqe(qp, idx, size);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue