RDMA/mlx5: Get XRCD number directly for the internal use
The mlx5_ib creates XRC domain and uses for creating internal SRQ. However all that is needed is XRCD number and not full blown ib_xrcd objects. Update the code to get and store the number only. Link: https://lore.kernel.org/r/20200706122716.647338-2-leon@kernel.org Signed-off-by: Leon Romanovsky <leonro@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
parent
42a3b15396
commit
f4375443b7
|
@ -5003,6 +5003,9 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
|
|||
dev = container_of(devr, struct mlx5_ib_dev, devr);
|
||||
ibdev = &dev->ib_dev;
|
||||
|
||||
if (!MLX5_CAP_GEN(dev->mdev, xrc))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
mutex_init(&devr->mutex);
|
||||
|
||||
devr->p0 = rdma_zalloc_drv_obj(ibdev, ib_pd);
|
||||
|
@ -5030,34 +5033,19 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
|
|||
if (ret)
|
||||
goto err_create_cq;
|
||||
|
||||
devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL);
|
||||
if (IS_ERR(devr->x0)) {
|
||||
ret = PTR_ERR(devr->x0);
|
||||
ret = mlx5_cmd_xrcd_alloc(dev->mdev, &devr->xrcdn0, 0);
|
||||
if (ret)
|
||||
goto error2;
|
||||
}
|
||||
devr->x0->device = &dev->ib_dev;
|
||||
devr->x0->inode = NULL;
|
||||
atomic_set(&devr->x0->usecnt, 0);
|
||||
mutex_init(&devr->x0->tgt_qp_mutex);
|
||||
INIT_LIST_HEAD(&devr->x0->tgt_qp_list);
|
||||
|
||||
devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL);
|
||||
if (IS_ERR(devr->x1)) {
|
||||
ret = PTR_ERR(devr->x1);
|
||||
ret = mlx5_cmd_xrcd_alloc(dev->mdev, &devr->xrcdn1, 0);
|
||||
if (ret)
|
||||
goto error3;
|
||||
}
|
||||
devr->x1->device = &dev->ib_dev;
|
||||
devr->x1->inode = NULL;
|
||||
atomic_set(&devr->x1->usecnt, 0);
|
||||
mutex_init(&devr->x1->tgt_qp_mutex);
|
||||
INIT_LIST_HEAD(&devr->x1->tgt_qp_list);
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
attr.attr.max_sge = 1;
|
||||
attr.attr.max_wr = 1;
|
||||
attr.srq_type = IB_SRQT_XRC;
|
||||
attr.ext.cq = devr->c0;
|
||||
attr.ext.xrc.xrcd = devr->x0;
|
||||
|
||||
devr->s0 = rdma_zalloc_drv_obj(ibdev, ib_srq);
|
||||
if (!devr->s0) {
|
||||
|
@ -5068,13 +5056,11 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
|
|||
devr->s0->device = &dev->ib_dev;
|
||||
devr->s0->pd = devr->p0;
|
||||
devr->s0->srq_type = IB_SRQT_XRC;
|
||||
devr->s0->ext.xrc.xrcd = devr->x0;
|
||||
devr->s0->ext.cq = devr->c0;
|
||||
ret = mlx5_ib_create_srq(devr->s0, &attr, NULL);
|
||||
if (ret)
|
||||
goto err_create;
|
||||
|
||||
atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt);
|
||||
atomic_inc(&devr->s0->ext.cq->usecnt);
|
||||
atomic_inc(&devr->p0->usecnt);
|
||||
atomic_set(&devr->s0->usecnt, 0);
|
||||
|
@ -5116,9 +5102,9 @@ error5:
|
|||
err_create:
|
||||
kfree(devr->s0);
|
||||
error4:
|
||||
mlx5_ib_dealloc_xrcd(devr->x1, NULL);
|
||||
mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn1, 0);
|
||||
error3:
|
||||
mlx5_ib_dealloc_xrcd(devr->x0, NULL);
|
||||
mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0);
|
||||
error2:
|
||||
mlx5_ib_destroy_cq(devr->c0, NULL);
|
||||
err_create_cq:
|
||||
|
@ -5132,14 +5118,17 @@ error0:
|
|||
|
||||
static void destroy_dev_resources(struct mlx5_ib_resources *devr)
|
||||
{
|
||||
struct mlx5_ib_dev *dev;
|
||||
int port;
|
||||
|
||||
dev = container_of(devr, struct mlx5_ib_dev, devr);
|
||||
|
||||
mlx5_ib_destroy_srq(devr->s1, NULL);
|
||||
kfree(devr->s1);
|
||||
mlx5_ib_destroy_srq(devr->s0, NULL);
|
||||
kfree(devr->s0);
|
||||
mlx5_ib_dealloc_xrcd(devr->x0, NULL);
|
||||
mlx5_ib_dealloc_xrcd(devr->x1, NULL);
|
||||
mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn1, 0);
|
||||
mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0);
|
||||
mlx5_ib_destroy_cq(devr->c0, NULL);
|
||||
kfree(devr->c0);
|
||||
mlx5_ib_dealloc_pd(devr->p0, NULL);
|
||||
|
|
|
@ -730,8 +730,8 @@ struct mlx5_ib_port_resources {
|
|||
|
||||
struct mlx5_ib_resources {
|
||||
struct ib_cq *c0;
|
||||
struct ib_xrcd *x0;
|
||||
struct ib_xrcd *x1;
|
||||
u32 xrcdn0;
|
||||
u32 xrcdn1;
|
||||
struct ib_pd *p0;
|
||||
struct ib_srq *s0;
|
||||
struct ib_srq *s1;
|
||||
|
|
|
@ -2035,15 +2035,15 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
|
|||
switch (init_attr->qp_type) {
|
||||
case IB_QPT_XRC_INI:
|
||||
MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn);
|
||||
MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x1)->xrcdn);
|
||||
MLX5_SET(qpc, qpc, xrcd, devr->xrcdn1);
|
||||
MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn);
|
||||
break;
|
||||
default:
|
||||
if (init_attr->srq) {
|
||||
MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x0)->xrcdn);
|
||||
MLX5_SET(qpc, qpc, xrcd, devr->xrcdn0);
|
||||
MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(init_attr->srq)->msrq.srqn);
|
||||
} else {
|
||||
MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x1)->xrcdn);
|
||||
MLX5_SET(qpc, qpc, xrcd, devr->xrcdn1);
|
||||
MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s1)->msrq.srqn);
|
||||
}
|
||||
}
|
||||
|
@ -2183,11 +2183,11 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
|
|||
MLX5_SET(qpc, qpc, no_sq, 1);
|
||||
|
||||
if (attr->srq) {
|
||||
MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x0)->xrcdn);
|
||||
MLX5_SET(qpc, qpc, xrcd, devr->xrcdn0);
|
||||
MLX5_SET(qpc, qpc, srqn_rmpn_xrqn,
|
||||
to_msrq(attr->srq)->msrq.srqn);
|
||||
} else {
|
||||
MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x1)->xrcdn);
|
||||
MLX5_SET(qpc, qpc, xrcd, devr->xrcdn1);
|
||||
MLX5_SET(qpc, qpc, srqn_rmpn_xrqn,
|
||||
to_msrq(devr->s1)->msrq.srqn);
|
||||
}
|
||||
|
|
|
@ -274,10 +274,10 @@ int mlx5_ib_create_srq(struct ib_srq *ib_srq,
|
|||
if (srq->wq_sig)
|
||||
in.flags |= MLX5_SRQ_FLAG_WQ_SIG;
|
||||
|
||||
if (init_attr->srq_type == IB_SRQT_XRC)
|
||||
if (init_attr->srq_type == IB_SRQT_XRC && init_attr->ext.xrc.xrcd)
|
||||
in.xrcd = to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn;
|
||||
else
|
||||
in.xrcd = to_mxrcd(dev->devr.x0)->xrcdn;
|
||||
in.xrcd = dev->devr.xrcdn0;
|
||||
|
||||
if (init_attr->srq_type == IB_SRQT_TM) {
|
||||
in.tm_log_list_size =
|
||||
|
|
Loading…
Reference in New Issue