RDMA: Update workqueue usage
* ib_wq is added, which is used as the common workqueue for infiniband instead of the system workqueue. All system workqueue usages including flush_scheduled_work() callers are converted to use and flush ib_wq. * cancel_delayed_work() + flush_scheduled_work() converted to cancel_delayed_work_sync(). * qib_wq is removed and ib_wq is used instead. This is to prepare for deprecation of flush_scheduled_work(). Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Roland Dreier <rolandd@cisco.com>
This commit is contained in:
parent
948579cd8c
commit
f06267104d
|
@ -308,7 +308,7 @@ static void ib_cache_event(struct ib_event_handler *handler,
|
|||
INIT_WORK(&work->work, ib_cache_task);
|
||||
work->device = event->device;
|
||||
work->port_num = event->element.port_num;
|
||||
schedule_work(&work->work);
|
||||
queue_work(ib_wq, &work->work);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -368,7 +368,7 @@ static void ib_cache_cleanup_one(struct ib_device *device)
|
|||
int p;
|
||||
|
||||
ib_unregister_event_handler(&device->cache.event_handler);
|
||||
flush_scheduled_work();
|
||||
flush_workqueue(ib_wq);
|
||||
|
||||
for (p = 0; p <= end_port(device) - start_port(device); ++p) {
|
||||
kfree(device->cache.pkey_cache[p]);
|
||||
|
|
|
@ -38,7 +38,6 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
#include "core_priv.h"
|
||||
|
||||
|
@ -52,6 +51,9 @@ struct ib_client_data {
|
|||
void * data;
|
||||
};
|
||||
|
||||
struct workqueue_struct *ib_wq;
|
||||
EXPORT_SYMBOL_GPL(ib_wq);
|
||||
|
||||
static LIST_HEAD(device_list);
|
||||
static LIST_HEAD(client_list);
|
||||
|
||||
|
@ -718,6 +720,10 @@ static int __init ib_core_init(void)
|
|||
{
|
||||
int ret;
|
||||
|
||||
ib_wq = alloc_workqueue("infiniband", 0, 0);
|
||||
if (!ib_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = ib_sysfs_setup();
|
||||
if (ret)
|
||||
printk(KERN_WARNING "Couldn't create InfiniBand device class\n");
|
||||
|
@ -726,6 +732,7 @@ static int __init ib_core_init(void)
|
|||
if (ret) {
|
||||
printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n");
|
||||
ib_sysfs_cleanup();
|
||||
destroy_workqueue(ib_wq);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -736,7 +743,7 @@ static void __exit ib_core_cleanup(void)
|
|||
ib_cache_cleanup();
|
||||
ib_sysfs_cleanup();
|
||||
/* Make sure that any pending umem accounting work is done. */
|
||||
flush_scheduled_work();
|
||||
destroy_workqueue(ib_wq);
|
||||
}
|
||||
|
||||
module_init(ib_core_init);
|
||||
|
|
|
@ -425,7 +425,7 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event
|
|||
port->sm_ah = NULL;
|
||||
spin_unlock_irqrestore(&port->ah_lock, flags);
|
||||
|
||||
schedule_work(&sa_dev->port[event->element.port_num -
|
||||
queue_work(ib_wq, &sa_dev->port[event->element.port_num -
|
||||
sa_dev->start_port].update_task);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -262,7 +262,7 @@ void ib_umem_release(struct ib_umem *umem)
|
|||
umem->mm = mm;
|
||||
umem->diff = diff;
|
||||
|
||||
schedule_work(&umem->work);
|
||||
queue_work(ib_wq, &umem->work);
|
||||
return;
|
||||
}
|
||||
} else
|
||||
|
|
|
@ -755,7 +755,7 @@ static void __devexit ipath_remove_one(struct pci_dev *pdev)
|
|||
*/
|
||||
ipath_shutdown_device(dd);
|
||||
|
||||
flush_scheduled_work();
|
||||
flush_workqueue(ib_wq);
|
||||
|
||||
if (dd->verbs_dev)
|
||||
ipath_unregister_ib_device(dd->verbs_dev);
|
||||
|
|
|
@ -220,7 +220,7 @@ void ipath_release_user_pages_on_close(struct page **p, size_t num_pages)
|
|||
work->mm = mm;
|
||||
work->num_pages = num_pages;
|
||||
|
||||
schedule_work(&work->work);
|
||||
queue_work(ib_wq, &work->work);
|
||||
return;
|
||||
|
||||
bail_mm:
|
||||
|
|
|
@ -1692,8 +1692,7 @@ static void qib_7220_quiet_serdes(struct qib_pportdata *ppd)
|
|||
ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
|
||||
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
|
||||
wake_up(&ppd->cpspec->autoneg_wait);
|
||||
cancel_delayed_work(&ppd->cpspec->autoneg_work);
|
||||
flush_scheduled_work();
|
||||
cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
|
||||
|
||||
shutdown_7220_relock_poll(ppd->dd);
|
||||
val = qib_read_kreg64(ppd->dd, kr_xgxs_cfg);
|
||||
|
@ -3515,8 +3514,8 @@ static void try_7220_autoneg(struct qib_pportdata *ppd)
|
|||
|
||||
toggle_7220_rclkrls(ppd->dd);
|
||||
/* 2 msec is minimum length of a poll cycle */
|
||||
schedule_delayed_work(&ppd->cpspec->autoneg_work,
|
||||
msecs_to_jiffies(2));
|
||||
queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
|
||||
msecs_to_jiffies(2));
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -2406,10 +2406,9 @@ static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
|
|||
ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
|
||||
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
|
||||
wake_up(&ppd->cpspec->autoneg_wait);
|
||||
cancel_delayed_work(&ppd->cpspec->autoneg_work);
|
||||
cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
|
||||
if (ppd->dd->cspec->r1)
|
||||
cancel_delayed_work(&ppd->cpspec->ipg_work);
|
||||
flush_scheduled_work();
|
||||
cancel_delayed_work_sync(&ppd->cpspec->ipg_work);
|
||||
|
||||
ppd->cpspec->chase_end = 0;
|
||||
if (ppd->cpspec->chase_timer.data) /* if initted */
|
||||
|
@ -2706,7 +2705,7 @@ static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
|
|||
if (!(pins & mask)) {
|
||||
++handled;
|
||||
qd->t_insert = get_jiffies_64();
|
||||
schedule_work(&qd->work);
|
||||
queue_work(ib_wq, &qd->work);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -4990,8 +4989,8 @@ static void try_7322_autoneg(struct qib_pportdata *ppd)
|
|||
set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
|
||||
qib_7322_mini_pcs_reset(ppd);
|
||||
/* 2 msec is minimum length of a poll cycle */
|
||||
schedule_delayed_work(&ppd->cpspec->autoneg_work,
|
||||
msecs_to_jiffies(2));
|
||||
queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
|
||||
msecs_to_jiffies(2));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -5121,7 +5120,8 @@ static void try_7322_ipg(struct qib_pportdata *ppd)
|
|||
ib_free_send_mad(send_buf);
|
||||
retry:
|
||||
delay = 2 << ppd->cpspec->ipg_tries;
|
||||
schedule_delayed_work(&ppd->cpspec->ipg_work, msecs_to_jiffies(delay));
|
||||
queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work,
|
||||
msecs_to_jiffies(delay));
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -80,7 +80,6 @@ unsigned qib_wc_pat = 1; /* default (1) is to use PAT, not MTRR */
|
|||
module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO);
|
||||
MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism");
|
||||
|
||||
struct workqueue_struct *qib_wq;
|
||||
struct workqueue_struct *qib_cq_wq;
|
||||
|
||||
static void verify_interrupt(unsigned long);
|
||||
|
@ -1044,24 +1043,10 @@ static int __init qlogic_ib_init(void)
|
|||
if (ret)
|
||||
goto bail;
|
||||
|
||||
/*
|
||||
* We create our own workqueue mainly because we want to be
|
||||
* able to flush it when devices are being removed. We can't
|
||||
* use schedule_work()/flush_scheduled_work() because both
|
||||
* unregister_netdev() and linkwatch_event take the rtnl lock,
|
||||
* so flush_scheduled_work() can deadlock during device
|
||||
* removal.
|
||||
*/
|
||||
qib_wq = create_workqueue("qib");
|
||||
if (!qib_wq) {
|
||||
ret = -ENOMEM;
|
||||
goto bail_dev;
|
||||
}
|
||||
|
||||
qib_cq_wq = create_singlethread_workqueue("qib_cq");
|
||||
if (!qib_cq_wq) {
|
||||
ret = -ENOMEM;
|
||||
goto bail_wq;
|
||||
goto bail_dev;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1091,8 +1076,6 @@ bail_unit:
|
|||
idr_destroy(&qib_unit_table);
|
||||
bail_cq_wq:
|
||||
destroy_workqueue(qib_cq_wq);
|
||||
bail_wq:
|
||||
destroy_workqueue(qib_wq);
|
||||
bail_dev:
|
||||
qib_dev_cleanup();
|
||||
bail:
|
||||
|
@ -1116,7 +1099,6 @@ static void __exit qlogic_ib_cleanup(void)
|
|||
|
||||
pci_unregister_driver(&qib_driver);
|
||||
|
||||
destroy_workqueue(qib_wq);
|
||||
destroy_workqueue(qib_cq_wq);
|
||||
|
||||
qib_cpulist_count = 0;
|
||||
|
@ -1289,7 +1271,7 @@ static int __devinit qib_init_one(struct pci_dev *pdev,
|
|||
|
||||
if (qib_mini_init || initfail || ret) {
|
||||
qib_stop_timers(dd);
|
||||
flush_scheduled_work();
|
||||
flush_workqueue(ib_wq);
|
||||
for (pidx = 0; pidx < dd->num_pports; ++pidx)
|
||||
dd->f_quiet_serdes(dd->pport + pidx);
|
||||
if (qib_mini_init)
|
||||
|
@ -1338,8 +1320,8 @@ static void __devexit qib_remove_one(struct pci_dev *pdev)
|
|||
|
||||
qib_stop_timers(dd);
|
||||
|
||||
/* wait until all of our (qsfp) schedule_work() calls complete */
|
||||
flush_scheduled_work();
|
||||
/* wait until all of our (qsfp) queue_work() calls complete */
|
||||
flush_workqueue(ib_wq);
|
||||
|
||||
ret = qibfs_remove(dd);
|
||||
if (ret)
|
||||
|
|
|
@ -485,7 +485,7 @@ void qib_qsfp_init(struct qib_qsfp_data *qd,
|
|||
goto bail;
|
||||
/* We see a module, but it may be unwise to look yet. Just schedule */
|
||||
qd->t_insert = get_jiffies_64();
|
||||
schedule_work(&qd->work);
|
||||
queue_work(ib_wq, &qd->work);
|
||||
bail:
|
||||
return;
|
||||
}
|
||||
|
@ -493,10 +493,9 @@ bail:
|
|||
void qib_qsfp_deinit(struct qib_qsfp_data *qd)
|
||||
{
|
||||
/*
|
||||
* There is nothing to do here for now. our
|
||||
* work is scheduled with schedule_work(), and
|
||||
* flush_scheduled_work() from remove_one will
|
||||
* block until all work ssetup with schedule_work()
|
||||
* There is nothing to do here for now. our work is scheduled
|
||||
* with queue_work(), and flush_workqueue() from remove_one
|
||||
* will block until all work setup with queue_work()
|
||||
* completes.
|
||||
*/
|
||||
}
|
||||
|
|
|
@ -805,7 +805,6 @@ static inline int qib_send_ok(struct qib_qp *qp)
|
|||
!(qp->s_flags & QIB_S_ANY_WAIT_SEND));
|
||||
}
|
||||
|
||||
extern struct workqueue_struct *qib_wq;
|
||||
extern struct workqueue_struct *qib_cq_wq;
|
||||
|
||||
/*
|
||||
|
@ -814,7 +813,7 @@ extern struct workqueue_struct *qib_cq_wq;
|
|||
static inline void qib_schedule_send(struct qib_qp *qp)
|
||||
{
|
||||
if (qib_send_ok(qp))
|
||||
queue_work(qib_wq, &qp->s_work);
|
||||
queue_work(ib_wq, &qp->s_work);
|
||||
}
|
||||
|
||||
static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
|
||||
|
|
|
@ -638,7 +638,7 @@ err:
|
|||
if (target->state == SRP_TARGET_CONNECTING) {
|
||||
target->state = SRP_TARGET_DEAD;
|
||||
INIT_WORK(&target->work, srp_remove_work);
|
||||
schedule_work(&target->work);
|
||||
queue_work(ib_wq, &target->work);
|
||||
}
|
||||
spin_unlock_irq(&target->lock);
|
||||
|
||||
|
@ -2199,7 +2199,7 @@ static void srp_remove_one(struct ib_device *device)
|
|||
* started before we marked our target ports as
|
||||
* removed, and any target port removal tasks.
|
||||
*/
|
||||
flush_scheduled_work();
|
||||
flush_workqueue(ib_wq);
|
||||
|
||||
list_for_each_entry_safe(target, tmp_target,
|
||||
&host->target_list, list) {
|
||||
|
|
|
@ -47,10 +47,13 @@
|
|||
#include <linux/list.h>
|
||||
#include <linux/rwsem.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
extern struct workqueue_struct *ib_wq;
|
||||
|
||||
union ib_gid {
|
||||
u8 raw[16];
|
||||
struct {
|
||||
|
|
Loading…
Reference in New Issue