vfio/mlx5: Manage error scenarios on tracker
Handle async error events and health/recovery flow to safely stop the tracker upon error scenarios. Signed-off-by: Yishai Hadas <yishaih@nvidia.com> Link: https://lore.kernel.org/r/20220908183448.195262-10-yishaih@nvidia.com Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
This commit is contained in:
parent
1047797e8e
commit
e295738756
|
@ -70,6 +70,13 @@ int mlx5vf_cmd_query_vhca_migration_state(struct mlx5vf_pci_core_device *mvdev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void set_tracker_error(struct mlx5vf_pci_core_device *mvdev)
|
||||
{
|
||||
/* Mark the tracker under an error and wake it up if it's running */
|
||||
mvdev->tracker.is_err = true;
|
||||
complete(&mvdev->tracker_comp);
|
||||
}
|
||||
|
||||
static int mlx5fv_vf_event(struct notifier_block *nb,
|
||||
unsigned long event, void *data)
|
||||
{
|
||||
|
@ -100,6 +107,8 @@ void mlx5vf_cmd_close_migratable(struct mlx5vf_pci_core_device *mvdev)
|
|||
if (!mvdev->migrate_cap)
|
||||
return;
|
||||
|
||||
/* Must be done outside the lock to let it progress */
|
||||
set_tracker_error(mvdev);
|
||||
mutex_lock(&mvdev->state_mutex);
|
||||
mlx5vf_disable_fds(mvdev);
|
||||
_mlx5vf_free_page_tracker_resources(mvdev);
|
||||
|
@ -619,6 +628,47 @@ static void mlx5vf_destroy_cq(struct mlx5_core_dev *mdev,
|
|||
mlx5_db_free(mdev, &cq->db);
|
||||
}
|
||||
|
||||
static void mlx5vf_cq_event(struct mlx5_core_cq *mcq, enum mlx5_event type)
|
||||
{
|
||||
if (type != MLX5_EVENT_TYPE_CQ_ERROR)
|
||||
return;
|
||||
|
||||
set_tracker_error(container_of(mcq, struct mlx5vf_pci_core_device,
|
||||
tracker.cq.mcq));
|
||||
}
|
||||
|
||||
static int mlx5vf_event_notifier(struct notifier_block *nb, unsigned long type,
|
||||
void *data)
|
||||
{
|
||||
struct mlx5_vhca_page_tracker *tracker =
|
||||
mlx5_nb_cof(nb, struct mlx5_vhca_page_tracker, nb);
|
||||
struct mlx5vf_pci_core_device *mvdev = container_of(
|
||||
tracker, struct mlx5vf_pci_core_device, tracker);
|
||||
struct mlx5_eqe *eqe = data;
|
||||
u8 event_type = (u8)type;
|
||||
u8 queue_type;
|
||||
int qp_num;
|
||||
|
||||
switch (event_type) {
|
||||
case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
|
||||
case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
|
||||
case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
|
||||
queue_type = eqe->data.qp_srq.type;
|
||||
if (queue_type != MLX5_EVENT_QUEUE_TYPE_QP)
|
||||
break;
|
||||
qp_num = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
|
||||
if (qp_num != tracker->host_qp->qpn &&
|
||||
qp_num != tracker->fw_qp->qpn)
|
||||
break;
|
||||
set_tracker_error(mvdev);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static void mlx5vf_cq_complete(struct mlx5_core_cq *mcq,
|
||||
struct mlx5_eqe *eqe)
|
||||
{
|
||||
|
@ -680,6 +730,7 @@ static int mlx5vf_create_cq(struct mlx5_core_dev *mdev,
|
|||
pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas);
|
||||
mlx5_fill_page_frag_array(&cq->buf.frag_buf, pas);
|
||||
cq->mcq.comp = mlx5vf_cq_complete;
|
||||
cq->mcq.event = mlx5vf_cq_event;
|
||||
err = mlx5_core_create_cq(mdev, &cq->mcq, in, inlen, out, sizeof(out));
|
||||
if (err)
|
||||
goto err_vec;
|
||||
|
@ -1014,6 +1065,7 @@ _mlx5vf_free_page_tracker_resources(struct mlx5vf_pci_core_device *mvdev)
|
|||
|
||||
WARN_ON(mvdev->mdev_detach);
|
||||
|
||||
mlx5_eq_notifier_unregister(mdev, &tracker->nb);
|
||||
mlx5vf_cmd_destroy_tracker(mdev, tracker->id);
|
||||
mlx5vf_destroy_qp(mdev, tracker->fw_qp);
|
||||
mlx5vf_free_qp_recv_resources(mdev, tracker->host_qp);
|
||||
|
@ -1127,6 +1179,8 @@ int mlx5vf_start_page_tracker(struct vfio_device *vdev,
|
|||
if (err)
|
||||
goto err_activate;
|
||||
|
||||
MLX5_NB_INIT(&tracker->nb, mlx5vf_event_notifier, NOTIFY_ANY);
|
||||
mlx5_eq_notifier_register(mdev, &tracker->nb);
|
||||
*page_size = host_qp->tracked_page_size;
|
||||
mvdev->log_active = true;
|
||||
mlx5vf_state_mutex_unlock(mvdev);
|
||||
|
@ -1273,7 +1327,8 @@ int mlx5vf_tracker_read_and_clear(struct vfio_device *vdev, unsigned long iova,
|
|||
goto end;
|
||||
|
||||
tracker->status = MLX5_PAGE_TRACK_STATE_REPORTING;
|
||||
while (tracker->status == MLX5_PAGE_TRACK_STATE_REPORTING) {
|
||||
while (tracker->status == MLX5_PAGE_TRACK_STATE_REPORTING &&
|
||||
!tracker->is_err) {
|
||||
poll_err = mlx5vf_cq_poll_one(cq, tracker->host_qp, dirty,
|
||||
&tracker->status);
|
||||
if (poll_err == CQ_EMPTY) {
|
||||
|
@ -1294,8 +1349,10 @@ int mlx5vf_tracker_read_and_clear(struct vfio_device *vdev, unsigned long iova,
|
|||
}
|
||||
|
||||
if (tracker->status == MLX5_PAGE_TRACK_STATE_ERROR)
|
||||
err = -EIO;
|
||||
tracker->is_err = true;
|
||||
|
||||
if (tracker->is_err)
|
||||
err = -EIO;
|
||||
end:
|
||||
mlx5vf_state_mutex_unlock(mvdev);
|
||||
return err;
|
||||
|
|
|
@ -82,10 +82,12 @@ struct mlx5_vhca_qp {
|
|||
struct mlx5_vhca_page_tracker {
|
||||
u32 id;
|
||||
u32 pdn;
|
||||
u8 is_err:1;
|
||||
struct mlx5_uars_page *uar;
|
||||
struct mlx5_vhca_cq cq;
|
||||
struct mlx5_vhca_qp *host_qp;
|
||||
struct mlx5_vhca_qp *fw_qp;
|
||||
struct mlx5_nb nb;
|
||||
int status;
|
||||
};
|
||||
|
||||
|
|
Loading…
Reference in New Issue