IB/mlx5: Fix long EEH recover time with NVMe offloads
On NVMe offloads connection with many IO queues, EEH takes long time to
recover. The culprit is the synchronize_srcu in the destroy_mkey. The
solution is to use synchronize_srcu only for ODP mkey.
Fixes: b4cfe447d4
("IB/mlx5: Implement on demand paging by adding support for MMU notifiers")
Signed-off-by: Huy Nguyen <huyn@mellanox.com>
Reviewed-by: Daniel Jurgens <danielj@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
842a9c837e
commit
bb7e22a8ab
|
@ -73,6 +73,7 @@ static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
|||
|
||||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
||||
/* Wait until all page fault handlers using the mr complete. */
|
||||
if (mr->umem && mr->umem->is_odp)
|
||||
synchronize_srcu(&dev->mr_srcu);
|
||||
#endif
|
||||
|
||||
|
@ -237,6 +238,9 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
|
|||
{
|
||||
struct mlx5_mr_cache *cache = &dev->cache;
|
||||
struct mlx5_cache_ent *ent = &cache->ent[c];
|
||||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
||||
bool odp_mkey_exist = false;
|
||||
#endif
|
||||
struct mlx5_ib_mr *tmp_mr;
|
||||
struct mlx5_ib_mr *mr;
|
||||
LIST_HEAD(del_list);
|
||||
|
@ -249,6 +253,10 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
|
|||
break;
|
||||
}
|
||||
mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
|
||||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
||||
if (mr->umem && mr->umem->is_odp)
|
||||
odp_mkey_exist = true;
|
||||
#endif
|
||||
list_move(&mr->list, &del_list);
|
||||
ent->cur--;
|
||||
ent->size--;
|
||||
|
@ -257,6 +265,7 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
||||
if (odp_mkey_exist)
|
||||
synchronize_srcu(&dev->mr_srcu);
|
||||
#endif
|
||||
|
||||
|
@ -572,6 +581,7 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
|
|||
{
|
||||
struct mlx5_mr_cache *cache = &dev->cache;
|
||||
struct mlx5_cache_ent *ent = &cache->ent[c];
|
||||
bool odp_mkey_exist = false;
|
||||
struct mlx5_ib_mr *tmp_mr;
|
||||
struct mlx5_ib_mr *mr;
|
||||
LIST_HEAD(del_list);
|
||||
|
@ -584,6 +594,8 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
|
|||
break;
|
||||
}
|
||||
mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
|
||||
if (mr->umem && mr->umem->is_odp)
|
||||
odp_mkey_exist = true;
|
||||
list_move(&mr->list, &del_list);
|
||||
ent->cur--;
|
||||
ent->size--;
|
||||
|
@ -592,6 +604,7 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
||||
if (odp_mkey_exist)
|
||||
synchronize_srcu(&dev->mr_srcu);
|
||||
#endif
|
||||
|
||||
|
|
Loading…
Reference in New Issue