IB/mlx5: Fix the locking of SRQ objects in ODP events

QP and SRQ objects are stored in different containers so the action to get
and lock a common resource during ODP event needs to address that.

While here get rid of 'refcount' and 'free' fields in mlx5_core_srq struct
and use the fields with same semantics in common structure.

Fixes: 032080ab43 ("IB/mlx5: Lock QP during page fault handling")
Signed-off-by: Moni Shoua <monis@mellanox.com>
Reviewed-by: Majd Dibbiny <majd@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
Moni Shoua 2019-01-22 08:48:40 +02:00 committed by Jason Gunthorpe
parent e431a80a54
commit 10f56242e3
4 changed files with 17 additions and 18 deletions

View File

@ -187,8 +187,8 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
wqe_ctr = be16_to_cpu(cqe->wqe_counter);
wc->wr_id = srq->wrid[wqe_ctr];
mlx5_ib_free_srq_wqe(srq, wqe_ctr);
if (msrq && atomic_dec_and_test(&msrq->refcount))
complete(&msrq->free);
if (msrq)
mlx5_core_res_put(&msrq->common);
}
} else {
wq = &qp->rq;

View File

@ -1115,22 +1115,25 @@ invalid_transport_or_opcode:
static inline struct mlx5_core_rsc_common *odp_get_rsc(struct mlx5_ib_dev *dev,
u32 wq_num, int pf_type)
{
enum mlx5_res_type res_type;
struct mlx5_core_rsc_common *common = NULL;
struct mlx5_core_srq *srq;
switch (pf_type) {
case MLX5_WQE_PF_TYPE_RMP:
res_type = MLX5_RES_SRQ;
srq = mlx5_cmd_get_srq(dev, wq_num);
if (srq)
common = &srq->common;
break;
case MLX5_WQE_PF_TYPE_REQ_SEND_OR_WRITE:
case MLX5_WQE_PF_TYPE_RESP:
case MLX5_WQE_PF_TYPE_REQ_READ_OR_ATOMIC:
res_type = MLX5_RES_QP;
common = mlx5_core_res_hold(dev->mdev, wq_num, MLX5_RES_QP);
break;
default:
return NULL;
break;
}
return mlx5_core_res_hold(dev->mdev, wq_num, res_type);
return common;
}
static inline struct mlx5_ib_qp *res_to_qp(struct mlx5_core_rsc_common *res)

View File

@ -46,8 +46,6 @@ struct mlx5_core_srq {
int wqe_shift;
void (*event)(struct mlx5_core_srq *srq, enum mlx5_event e);
atomic_t refcount;
struct completion free;
u16 uid;
};

View File

@ -87,7 +87,7 @@ struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn)
srq = radix_tree_lookup(&table->tree, srqn);
if (srq)
atomic_inc(&srq->refcount);
atomic_inc(&srq->common.refcount);
spin_unlock(&table->lock);
@ -594,8 +594,8 @@ int mlx5_cmd_create_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
if (err)
return err;
atomic_set(&srq->refcount, 1);
init_completion(&srq->free);
atomic_set(&srq->common.refcount, 1);
init_completion(&srq->common.free);
spin_lock_irq(&table->lock);
err = radix_tree_insert(&table->tree, srq->srqn, srq);
@ -627,9 +627,8 @@ int mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
if (err)
return err;
if (atomic_dec_and_test(&srq->refcount))
complete(&srq->free);
wait_for_completion(&srq->free);
mlx5_core_res_put(&srq->common);
wait_for_completion(&srq->common.free);
return 0;
}
@ -685,7 +684,7 @@ static int srq_event_notifier(struct notifier_block *nb,
srq = radix_tree_lookup(&table->tree, srqn);
if (srq)
atomic_inc(&srq->refcount);
atomic_inc(&srq->common.refcount);
spin_unlock(&table->lock);
@ -694,8 +693,7 @@ static int srq_event_notifier(struct notifier_block *nb,
srq->event(srq, eqe->type);
if (atomic_dec_and_test(&srq->refcount))
complete(&srq->free);
mlx5_core_res_put(&srq->common);
return NOTIFY_OK;
}