RDMA/core: Avoid flush_workqueue(system_unbound_wq) usage
Flushing system-wide workqueues is dangerous and will be forbidden. Replace system_unbound_wq with local ib_unreg_wq. Link: https://lore.kernel.org/r/252cefb0-a400-83f6-2032-333d69f52c1b@I-love.SAKURA.ne.jp Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
parent
0f328c7034
commit
ff815a8939
|
@ -58,6 +58,7 @@ struct workqueue_struct *ib_comp_wq;
|
|||
struct workqueue_struct *ib_comp_unbound_wq;
|
||||
struct workqueue_struct *ib_wq;
|
||||
EXPORT_SYMBOL_GPL(ib_wq);
|
||||
static struct workqueue_struct *ib_unreg_wq;
|
||||
|
||||
/*
|
||||
* Each of the three rwsem locks (devices, clients, client_data) protects the
|
||||
|
@ -1602,7 +1603,7 @@ void ib_unregister_device_queued(struct ib_device *ib_dev)
|
|||
WARN_ON(!refcount_read(&ib_dev->refcount));
|
||||
WARN_ON(!ib_dev->ops.dealloc_driver);
|
||||
get_device(&ib_dev->dev);
|
||||
if (!queue_work(system_unbound_wq, &ib_dev->unregistration_work))
|
||||
if (!queue_work(ib_unreg_wq, &ib_dev->unregistration_work))
|
||||
put_device(&ib_dev->dev);
|
||||
}
|
||||
EXPORT_SYMBOL(ib_unregister_device_queued);
|
||||
|
@ -2751,27 +2752,28 @@ static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = {
|
|||
|
||||
static int __init ib_core_init(void)
|
||||
{
|
||||
int ret;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
ib_wq = alloc_workqueue("infiniband", 0, 0);
|
||||
if (!ib_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
ib_unreg_wq = alloc_workqueue("ib-unreg-wq", WQ_UNBOUND,
|
||||
WQ_UNBOUND_MAX_ACTIVE);
|
||||
if (!ib_unreg_wq)
|
||||
goto err;
|
||||
|
||||
ib_comp_wq = alloc_workqueue("ib-comp-wq",
|
||||
WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
|
||||
if (!ib_comp_wq) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
if (!ib_comp_wq)
|
||||
goto err_unbound;
|
||||
|
||||
ib_comp_unbound_wq =
|
||||
alloc_workqueue("ib-comp-unb-wq",
|
||||
WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM |
|
||||
WQ_SYSFS, WQ_UNBOUND_MAX_ACTIVE);
|
||||
if (!ib_comp_unbound_wq) {
|
||||
ret = -ENOMEM;
|
||||
if (!ib_comp_unbound_wq)
|
||||
goto err_comp;
|
||||
}
|
||||
|
||||
ret = class_register(&ib_class);
|
||||
if (ret) {
|
||||
|
@ -2831,6 +2833,8 @@ err_comp_unbound:
|
|||
destroy_workqueue(ib_comp_unbound_wq);
|
||||
err_comp:
|
||||
destroy_workqueue(ib_comp_wq);
|
||||
err_unbound:
|
||||
destroy_workqueue(ib_unreg_wq);
|
||||
err:
|
||||
destroy_workqueue(ib_wq);
|
||||
return ret;
|
||||
|
@ -2852,7 +2856,7 @@ static void __exit ib_core_cleanup(void)
|
|||
destroy_workqueue(ib_comp_wq);
|
||||
/* Make sure that any pending umem accounting work is done. */
|
||||
destroy_workqueue(ib_wq);
|
||||
flush_workqueue(system_unbound_wq);
|
||||
destroy_workqueue(ib_unreg_wq);
|
||||
WARN_ON(!xa_empty(&clients));
|
||||
WARN_ON(!xa_empty(&devices));
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue