RDMA/hns: Fix 0-length sge calculation error
One RC SQ WQE can store 2 sges but UD can't, so ignore 2 valid sges of
wr.sglist for RC which have been filled in WQE before setting extended
sge. Either of RC and UD can not contain 0-length sges, so these 0-length
sges should be skipped.
Fixes: 54d6638765
("RDMA/hns: Optimize WQE buffer size calculating process")
Link: https://lore.kernel.org/r/1606558959-48510-2-git-send-email-liweihang@huawei.com
Signed-off-by: Lang Cheng <chenglang@huawei.com>
Signed-off-by: Weihang Li <liweihang@huawei.com>
Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
parent
1d11d26cf0
commit
0fd0175e30
|
@ -214,26 +214,21 @@ static int fill_ext_sge_inl_data(struct hns_roce_qp *qp,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
|
static void set_extend_sge(struct hns_roce_qp *qp, struct ib_sge *sge,
|
||||||
unsigned int *sge_ind, unsigned int valid_num_sge)
|
unsigned int *sge_ind, unsigned int cnt)
|
||||||
{
|
{
|
||||||
struct hns_roce_v2_wqe_data_seg *dseg;
|
struct hns_roce_v2_wqe_data_seg *dseg;
|
||||||
unsigned int cnt = valid_num_sge;
|
|
||||||
struct ib_sge *sge = wr->sg_list;
|
|
||||||
unsigned int idx = *sge_ind;
|
unsigned int idx = *sge_ind;
|
||||||
|
|
||||||
if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
|
|
||||||
cnt -= HNS_ROCE_SGE_IN_WQE;
|
|
||||||
sge += HNS_ROCE_SGE_IN_WQE;
|
|
||||||
}
|
|
||||||
|
|
||||||
while (cnt > 0) {
|
while (cnt > 0) {
|
||||||
dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1));
|
dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1));
|
||||||
|
if (likely(sge->length)) {
|
||||||
set_data_seg_v2(dseg, sge);
|
set_data_seg_v2(dseg, sge);
|
||||||
idx++;
|
idx++;
|
||||||
sge++;
|
|
||||||
cnt--;
|
cnt--;
|
||||||
}
|
}
|
||||||
|
sge++;
|
||||||
|
}
|
||||||
|
|
||||||
*sge_ind = idx;
|
*sge_ind = idx;
|
||||||
}
|
}
|
||||||
|
@ -340,7 +335,8 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
set_extend_sge(qp, wr, sge_ind, valid_num_sge);
|
set_extend_sge(qp, wr->sg_list + i, sge_ind,
|
||||||
|
valid_num_sge - HNS_ROCE_SGE_IN_WQE);
|
||||||
}
|
}
|
||||||
|
|
||||||
roce_set_field(rc_sq_wqe->byte_16,
|
roce_set_field(rc_sq_wqe->byte_16,
|
||||||
|
@ -503,7 +499,7 @@ static inline int set_ud_wqe(struct hns_roce_qp *qp,
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
set_extend_sge(qp, wr, &curr_idx, valid_num_sge);
|
set_extend_sge(qp, wr->sg_list, &curr_idx, valid_num_sge);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The pipeline can sequentially post all valid WQEs into WQ buffer,
|
* The pipeline can sequentially post all valid WQEs into WQ buffer,
|
||||||
|
|
Loading…
Reference in New Issue