RDMA/hns: Use IDA interface to manage srq index
Switch srq index allocation and release from hns' own bitmap interface to IDA interface. Link: https://lore.kernel.org/r/1629336980-17499-3-git-send-email-liangwenpeng@huawei.com Signed-off-by: Yangyang Li <liyangyang20@huawei.com> Signed-off-by: Wenpeng Liang <liangwenpeng@huawei.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
parent
8feafd9017
commit
c4f11b36f8
|
@ -248,7 +248,7 @@ void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev)
|
|||
ida_destroy(&hr_dev->xrcd_ida.ida);
|
||||
|
||||
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
|
||||
hns_roce_cleanup_srq_table(hr_dev);
|
||||
ida_destroy(&hr_dev->srq_table.srq_ida.ida);
|
||||
hns_roce_cleanup_qp_table(hr_dev);
|
||||
hns_roce_cleanup_cq_table(hr_dev);
|
||||
ida_destroy(&hr_dev->mr_table.mtpt_ida.ida);
|
||||
|
|
|
@ -514,7 +514,7 @@ struct hns_roce_cq_table {
|
|||
};
|
||||
|
||||
struct hns_roce_srq_table {
|
||||
struct hns_roce_bitmap bitmap;
|
||||
struct hns_roce_ida srq_ida;
|
||||
struct xarray xa;
|
||||
struct hns_roce_hem_table table;
|
||||
};
|
||||
|
@ -1145,13 +1145,12 @@ void hns_roce_init_pd_table(struct hns_roce_dev *hr_dev);
|
|||
void hns_roce_init_mr_table(struct hns_roce_dev *hr_dev);
|
||||
void hns_roce_init_cq_table(struct hns_roce_dev *hr_dev);
|
||||
void hns_roce_init_qp_table(struct hns_roce_dev *hr_dev);
|
||||
int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev);
|
||||
void hns_roce_init_srq_table(struct hns_roce_dev *hr_dev);
|
||||
void hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev);
|
||||
|
||||
void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev);
|
||||
void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev);
|
||||
void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev);
|
||||
void hns_roce_cleanup_srq_table(struct hns_roce_dev *hr_dev);
|
||||
|
||||
int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj);
|
||||
void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj);
|
||||
|
|
|
@ -758,26 +758,11 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
|
|||
hns_roce_init_qp_table(hr_dev);
|
||||
|
||||
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
|
||||
ret = hns_roce_init_srq_table(hr_dev);
|
||||
if (ret) {
|
||||
dev_err(dev,
|
||||
"Failed to init share receive queue table.\n");
|
||||
goto err_qp_table_free;
|
||||
}
|
||||
hns_roce_init_srq_table(hr_dev);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_qp_table_free:
|
||||
hns_roce_cleanup_qp_table(hr_dev);
|
||||
hns_roce_cleanup_cq_table(hr_dev);
|
||||
ida_destroy(&hr_dev->mr_table.mtpt_ida.ida);
|
||||
|
||||
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
|
||||
ida_destroy(&hr_dev->xrcd_ida.ida);
|
||||
|
||||
ida_destroy(&hr_dev->pd_ida.ida);
|
||||
|
||||
err_uar_table_free:
|
||||
ida_destroy(&hr_dev->uar_ida.ida);
|
||||
return ret;
|
||||
|
|
|
@ -80,15 +80,19 @@ static int hns_roce_hw_destroy_srq(struct hns_roce_dev *dev,
|
|||
static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
|
||||
{
|
||||
struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
|
||||
struct hns_roce_ida *srq_ida = &hr_dev->srq_table.srq_ida;
|
||||
struct ib_device *ibdev = &hr_dev->ib_dev;
|
||||
struct hns_roce_cmd_mailbox *mailbox;
|
||||
int ret;
|
||||
int id;
|
||||
|
||||
ret = hns_roce_bitmap_alloc(&srq_table->bitmap, &srq->srqn);
|
||||
if (ret) {
|
||||
ibdev_err(ibdev, "failed to alloc SRQ number.\n");
|
||||
id = ida_alloc_range(&srq_ida->ida, srq_ida->min, srq_ida->max,
|
||||
GFP_KERNEL);
|
||||
if (id < 0) {
|
||||
ibdev_err(ibdev, "failed to alloc srq(%d).\n", id);
|
||||
return -ENOMEM;
|
||||
}
|
||||
srq->srqn = (unsigned long)id;
|
||||
|
||||
ret = hns_roce_table_get(hr_dev, &srq_table->table, srq->srqn);
|
||||
if (ret) {
|
||||
|
@ -132,7 +136,7 @@ err_xa:
|
|||
err_put:
|
||||
hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
|
||||
err_out:
|
||||
hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn);
|
||||
ida_free(&srq_ida->ida, id);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -154,7 +158,7 @@ static void free_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
|
|||
wait_for_completion(&srq->free);
|
||||
|
||||
hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
|
||||
hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn);
|
||||
ida_free(&srq_table->srq_ida.ida, (int)srq->srqn);
|
||||
}
|
||||
|
||||
static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
|
||||
|
@ -440,18 +444,14 @@ int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev)
|
||||
void hns_roce_init_srq_table(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
|
||||
struct hns_roce_ida *srq_ida = &srq_table->srq_ida;
|
||||
|
||||
xa_init(&srq_table->xa);
|
||||
|
||||
return hns_roce_bitmap_init(&srq_table->bitmap, hr_dev->caps.num_srqs,
|
||||
hr_dev->caps.num_srqs - 1,
|
||||
hr_dev->caps.reserved_srqs, 0);
|
||||
}
|
||||
|
||||
void hns_roce_cleanup_srq_table(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
hns_roce_bitmap_cleanup(&hr_dev->srq_table.bitmap);
|
||||
ida_init(&srq_ida->ida);
|
||||
srq_ida->max = hr_dev->caps.num_srqs - 1;
|
||||
srq_ida->min = hr_dev->caps.reserved_srqs;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue