RDMA/hns: Use a separated function for setting extend sge paramters

Moves the related lines of setting extended sge size into a separate
function as well as remove the unused variables.

Link: https://lore.kernel.org/r/1562593285-8037-5-git-send-email-oulijun@huawei.com
Signed-off-by: Lijun Ou <oulijun@huawei.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
Lijun Ou 2019-07-08 21:41:20 +08:00 committed by Jason Gunthorpe
parent 606bf89e98
commit 947441eadb
2 changed files with 52 additions and 39 deletions

View File

@ -654,8 +654,6 @@ struct hns_roce_qp {
u32 doorbell_qpn;
__le32 sq_signal_bits;
u32 sq_next_wqe;
int sq_max_wqes_per_wr;
int sq_spare_wqes;
struct hns_roce_wq sq;
struct ib_umem *umem;

View File

@ -501,43 +501,10 @@ static int calc_wqe_bt_page_shift(struct hns_roce_dev *hr_dev,
return bt_pg_shift - PAGE_SHIFT;
}
static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
struct ib_qp_cap *cap,
struct hns_roce_qp *hr_qp)
static int set_extend_sge_param(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp)
{
struct device *dev = hr_dev->dev;
u32 page_size;
u32 max_cnt;
int size;
if (cap->max_send_wr > hr_dev->caps.max_wqes ||
cap->max_send_sge > hr_dev->caps.max_sq_sg ||
cap->max_inline_data > hr_dev->caps.max_sq_inline) {
dev_err(dev, "SQ WR or sge or inline data error!\n");
return -EINVAL;
}
hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz);
hr_qp->sq_max_wqes_per_wr = 1;
hr_qp->sq_spare_wqes = 0;
if (hr_dev->caps.min_wqes)
max_cnt = max(cap->max_send_wr, hr_dev->caps.min_wqes);
else
max_cnt = cap->max_send_wr;
hr_qp->sq.wqe_cnt = roundup_pow_of_two(max_cnt);
if ((u32)hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) {
dev_err(dev, "while setting kernel sq size, sq.wqe_cnt too large\n");
return -EINVAL;
}
/* Get data_seg numbers */
max_cnt = max(1U, cap->max_send_sge);
if (hr_dev->caps.max_sq_sg <= 2)
hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
else
hr_qp->sq.max_gs = max_cnt;
if (hr_qp->sq.max_gs > 2) {
hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
@ -560,6 +527,52 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
}
}
return 0;
}
static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
struct ib_qp_cap *cap,
struct hns_roce_qp *hr_qp)
{
struct device *dev = hr_dev->dev;
u32 page_size;
u32 max_cnt;
int size;
int ret;
if (cap->max_send_wr > hr_dev->caps.max_wqes ||
cap->max_send_sge > hr_dev->caps.max_sq_sg ||
cap->max_inline_data > hr_dev->caps.max_sq_inline) {
dev_err(dev, "SQ WR or sge or inline data error!\n");
return -EINVAL;
}
hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz);
if (hr_dev->caps.min_wqes)
max_cnt = max(cap->max_send_wr, hr_dev->caps.min_wqes);
else
max_cnt = cap->max_send_wr;
hr_qp->sq.wqe_cnt = roundup_pow_of_two(max_cnt);
if ((u32)hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) {
dev_err(dev, "while setting kernel sq size, sq.wqe_cnt too large\n");
return -EINVAL;
}
/* Get data_seg numbers */
max_cnt = max(1U, cap->max_send_sge);
if (hr_dev->caps.max_sq_sg <= 2)
hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
else
hr_qp->sq.max_gs = max_cnt;
ret = set_extend_sge_param(hr_dev, hr_qp);
if (ret) {
dev_err(dev, "set extend sge parameters fail\n");
return ret;
}
/* Get buf size, SQ and RQ are aligned to PAGE_SIZE */
page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
hr_qp->sq.offset = 0;
@ -942,11 +955,13 @@ err_db:
hns_roce_free_db(hr_dev, &hr_qp->rdb);
err_rq_sge_list:
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
hns_roce_qp_has_rq(init_attr))
kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
err_wqe_list:
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
hns_roce_qp_has_rq(init_attr))
kfree(hr_qp->rq_inl_buf.wqe_list);
err_out: