Merge branch 'for-3.7' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
Pull workqueue changes from Tejun Heo: "This is workqueue updates for v3.7-rc1. A lot of activities this round including considerable API and behavior cleanups. * delayed_work combines a timer and a work item. The handling of the timer part has always been a bit clunky leading to confusing cancelation API with weird corner-case behaviors. delayed_work is updated to use new IRQ safe timer and cancelation now works as expected. * Another deficiency of delayed_work was lack of the counterpart of mod_timer() which led to cancel+queue combinations or open-coded timer+work usages. mod_delayed_work[_on]() are added. These two delayed_work changes make delayed_work provide interface and behave like timer which is executed with process context. * A work item could be executed concurrently on multiple CPUs, which is rather unintuitive and made flush_work() behavior confusing and half-broken under certain circumstances. This problem doesn't exist for non-reentrant workqueues. While non-reentrancy check isn't free, the overhead is incurred only when a work item bounces across different CPUs and even in simulated pathological scenario the overhead isn't too high. All workqueues are made non-reentrant. This removes the distinction between flush_[delayed_]work() and flush_[delayed_]_work_sync(). The former is now as strong as the latter and the specified work item is guaranteed to have finished execution of any previous queueing on return. * In addition to the various bug fixes, Lai redid and simplified CPU hotplug handling significantly. * Joonsoo introduced system_highpri_wq and used it during CPU hotplug. There are two merge commits - one to pull in IRQ safe timer from tip/timers/core and the other to pull in CPU hotplug fixes from wq/for-3.6-fixes as Lai's hotplug restructuring depended on them." Fixed a number of trivial conflicts, but the more interesting conflicts were silent ones where the deprecated interfaces had been used by new code in the merge window, and thus didn't cause any real data conflicts. Tejun pointed out a few of them, I fixed a couple more. * 'for-3.7' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq: (46 commits) workqueue: remove spurious WARN_ON_ONCE(in_irq()) from try_to_grab_pending() workqueue: use cwq_set_max_active() helper for workqueue_set_max_active() workqueue: introduce cwq_set_max_active() helper for thaw_workqueues() workqueue: remove @delayed from cwq_dec_nr_in_flight() workqueue: fix possible stall on try_to_grab_pending() of a delayed work item workqueue: use hotcpu_notifier() for workqueue_cpu_down_callback() workqueue: use __cpuinit instead of __devinit for cpu callbacks workqueue: rename manager_mutex to assoc_mutex workqueue: WORKER_REBIND is no longer necessary for idle rebinding workqueue: WORKER_REBIND is no longer necessary for busy rebinding workqueue: reimplement idle worker rebinding workqueue: deprecate __cancel_delayed_work() workqueue: reimplement cancel_delayed_work() using try_to_grab_pending() workqueue: use mod_delayed_work() instead of __cancel + queue workqueue: use irqsafe timer for delayed_work workqueue: clean up delayed_work initializers and add missing one workqueue: make deferrable delayed_work initializer names consistent workqueue: cosmetic whitespace updates for macro definitions workqueue: deprecate system_nrt[_freezable]_wq workqueue: deprecate flush[_delayed]_work_sync() ...
This commit is contained in:
commit
033d9959ed
|
@ -579,8 +579,8 @@ static int sharpsl_ac_check(void)
|
|||
static int sharpsl_pm_suspend(struct platform_device *pdev, pm_message_t state)
|
||||
{
|
||||
sharpsl_pm.flags |= SHARPSL_SUSPENDED;
|
||||
flush_delayed_work_sync(&toggle_charger);
|
||||
flush_delayed_work_sync(&sharpsl_bat);
|
||||
flush_delayed_work(&toggle_charger);
|
||||
flush_delayed_work(&sharpsl_bat);
|
||||
|
||||
if (sharpsl_pm.charge_mode == CHRG_ON)
|
||||
sharpsl_pm.flags |= SHARPSL_DO_OFFLINE_CHRG;
|
||||
|
|
|
@ -310,7 +310,7 @@ static void omap_mbox_fini(struct omap_mbox *mbox)
|
|||
omap_mbox_disable_irq(mbox, IRQ_RX);
|
||||
free_irq(mbox->irq, mbox);
|
||||
tasklet_kill(&mbox->txq->tasklet);
|
||||
flush_work_sync(&mbox->rxq->work);
|
||||
flush_work(&mbox->rxq->work);
|
||||
mbox_queue_free(mbox->txq);
|
||||
mbox_queue_free(mbox->rxq);
|
||||
}
|
||||
|
|
|
@ -76,7 +76,7 @@ static void spu_gov_work(struct work_struct *work)
|
|||
static void spu_gov_init_work(struct spu_gov_info_struct *info)
|
||||
{
|
||||
int delay = usecs_to_jiffies(info->poll_int);
|
||||
INIT_DELAYED_WORK_DEFERRABLE(&info->work, spu_gov_work);
|
||||
INIT_DEFERRABLE_WORK(&info->work, spu_gov_work);
|
||||
schedule_delayed_work_on(info->policy->cpu, &info->work, delay);
|
||||
}
|
||||
|
||||
|
|
|
@ -107,7 +107,7 @@ static int switch_drv_remove(struct platform_device *pdev)
|
|||
device_remove_file(&pdev->dev, &dev_attr_switch);
|
||||
|
||||
platform_set_drvdata(pdev, NULL);
|
||||
flush_work_sync(&psw->work);
|
||||
flush_work(&psw->work);
|
||||
del_timer_sync(&psw->debounce);
|
||||
free_irq(irq, pdev);
|
||||
|
||||
|
|
|
@ -262,7 +262,7 @@ EXPORT_SYMBOL(blk_start_queue);
|
|||
**/
|
||||
void blk_stop_queue(struct request_queue *q)
|
||||
{
|
||||
__cancel_delayed_work(&q->delay_work);
|
||||
cancel_delayed_work(&q->delay_work);
|
||||
queue_flag_set(QUEUE_FLAG_STOPPED, q);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_stop_queue);
|
||||
|
@ -319,10 +319,8 @@ EXPORT_SYMBOL(__blk_run_queue);
|
|||
*/
|
||||
void blk_run_queue_async(struct request_queue *q)
|
||||
{
|
||||
if (likely(!blk_queue_stopped(q))) {
|
||||
__cancel_delayed_work(&q->delay_work);
|
||||
queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
|
||||
}
|
||||
if (likely(!blk_queue_stopped(q)))
|
||||
mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_run_queue_async);
|
||||
|
||||
|
|
|
@ -180,7 +180,7 @@ static inline unsigned int total_nr_queued(struct throtl_data *td)
|
|||
|
||||
/*
|
||||
* Worker for allocating per cpu stat for tgs. This is scheduled on the
|
||||
* system_nrt_wq once there are some groups on the alloc_list waiting for
|
||||
* system_wq once there are some groups on the alloc_list waiting for
|
||||
* allocation.
|
||||
*/
|
||||
static void tg_stats_alloc_fn(struct work_struct *work)
|
||||
|
@ -194,8 +194,7 @@ alloc_stats:
|
|||
stats_cpu = alloc_percpu(struct tg_stats_cpu);
|
||||
if (!stats_cpu) {
|
||||
/* allocation failed, try again after some time */
|
||||
queue_delayed_work(system_nrt_wq, dwork,
|
||||
msecs_to_jiffies(10));
|
||||
schedule_delayed_work(dwork, msecs_to_jiffies(10));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -238,7 +237,7 @@ static void throtl_pd_init(struct blkcg_gq *blkg)
|
|||
*/
|
||||
spin_lock_irqsave(&tg_stats_alloc_lock, flags);
|
||||
list_add(&tg->stats_alloc_node, &tg_stats_alloc_list);
|
||||
queue_delayed_work(system_nrt_wq, &tg_stats_alloc_work, 0);
|
||||
schedule_delayed_work(&tg_stats_alloc_work, 0);
|
||||
spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
|
||||
}
|
||||
|
||||
|
@ -930,12 +929,7 @@ throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
|
|||
|
||||
/* schedule work if limits changed even if no bio is queued */
|
||||
if (total_nr_queued(td) || td->limits_changed) {
|
||||
/*
|
||||
* We might have a work scheduled to be executed in future.
|
||||
* Cancel that and schedule a new one.
|
||||
*/
|
||||
__cancel_delayed_work(dwork);
|
||||
queue_delayed_work(kthrotld_workqueue, dwork, delay);
|
||||
mod_delayed_work(kthrotld_workqueue, dwork, delay);
|
||||
throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
|
||||
delay, jiffies);
|
||||
}
|
||||
|
|
|
@ -1490,9 +1490,9 @@ static void __disk_unblock_events(struct gendisk *disk, bool check_now)
|
|||
intv = disk_events_poll_jiffies(disk);
|
||||
set_timer_slack(&ev->dwork.timer, intv / 4);
|
||||
if (check_now)
|
||||
queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
|
||||
queue_delayed_work(system_freezable_wq, &ev->dwork, 0);
|
||||
else if (intv)
|
||||
queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv);
|
||||
queue_delayed_work(system_freezable_wq, &ev->dwork, intv);
|
||||
out_unlock:
|
||||
spin_unlock_irqrestore(&ev->lock, flags);
|
||||
}
|
||||
|
@ -1534,10 +1534,8 @@ void disk_flush_events(struct gendisk *disk, unsigned int mask)
|
|||
|
||||
spin_lock_irq(&ev->lock);
|
||||
ev->clearing |= mask;
|
||||
if (!ev->block) {
|
||||
cancel_delayed_work(&ev->dwork);
|
||||
queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
|
||||
}
|
||||
if (!ev->block)
|
||||
mod_delayed_work(system_freezable_wq, &ev->dwork, 0);
|
||||
spin_unlock_irq(&ev->lock);
|
||||
}
|
||||
|
||||
|
@ -1573,7 +1571,7 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
|
|||
|
||||
/* uncondtionally schedule event check and wait for it to finish */
|
||||
disk_block_events(disk);
|
||||
queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
|
||||
queue_delayed_work(system_freezable_wq, &ev->dwork, 0);
|
||||
flush_delayed_work(&ev->dwork);
|
||||
__disk_unblock_events(disk, false);
|
||||
|
||||
|
@ -1610,7 +1608,7 @@ static void disk_events_workfn(struct work_struct *work)
|
|||
|
||||
intv = disk_events_poll_jiffies(disk);
|
||||
if (!ev->block && intv)
|
||||
queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv);
|
||||
queue_delayed_work(system_freezable_wq, &ev->dwork, intv);
|
||||
|
||||
spin_unlock_irq(&ev->lock);
|
||||
|
||||
|
|
|
@ -672,7 +672,6 @@ static void __reschedule_timeout(int drive, const char *message)
|
|||
|
||||
if (drive == current_reqD)
|
||||
drive = current_drive;
|
||||
__cancel_delayed_work(&fd_timeout);
|
||||
|
||||
if (drive < 0 || drive >= N_DRIVE) {
|
||||
delay = 20UL * HZ;
|
||||
|
@ -680,7 +679,7 @@ static void __reschedule_timeout(int drive, const char *message)
|
|||
} else
|
||||
delay = UDP->timeout;
|
||||
|
||||
queue_delayed_work(floppy_wq, &fd_timeout, delay);
|
||||
mod_delayed_work(floppy_wq, &fd_timeout, delay);
|
||||
if (UDP->flags & FD_DEBUG)
|
||||
DPRINT("reschedule timeout %s\n", message);
|
||||
timeout_message = message;
|
||||
|
@ -891,7 +890,7 @@ static void unlock_fdc(void)
|
|||
|
||||
raw_cmd = NULL;
|
||||
command_status = FD_COMMAND_NONE;
|
||||
__cancel_delayed_work(&fd_timeout);
|
||||
cancel_delayed_work(&fd_timeout);
|
||||
do_floppy = NULL;
|
||||
cont = NULL;
|
||||
clear_bit(0, &fdc_busy);
|
||||
|
|
|
@ -670,7 +670,7 @@ static void xlvbd_release_gendisk(struct blkfront_info *info)
|
|||
spin_unlock_irqrestore(&info->io_lock, flags);
|
||||
|
||||
/* Flush gnttab callback work. Must be done with no locks held. */
|
||||
flush_work_sync(&info->work);
|
||||
flush_work(&info->work);
|
||||
|
||||
del_gendisk(info->gd);
|
||||
|
||||
|
@ -719,7 +719,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
|
|||
spin_unlock_irq(&info->io_lock);
|
||||
|
||||
/* Flush gnttab callback work. Must be done with no locks held. */
|
||||
flush_work_sync(&info->work);
|
||||
flush_work(&info->work);
|
||||
|
||||
/* Free resources associated with old device channel. */
|
||||
if (info->ring_ref != GRANT_INVALID_REF) {
|
||||
|
|
|
@ -840,7 +840,7 @@ probe_fail_no_mem:
|
|||
|
||||
static int __devexit remove_gdrom(struct platform_device *devptr)
|
||||
{
|
||||
flush_work_sync(&work);
|
||||
flush_work(&work);
|
||||
blk_cleanup_queue(gd.gdrom_rq);
|
||||
free_irq(HW_EVENT_GDROM_CMD, &gd);
|
||||
free_irq(HW_EVENT_GDROM_DMA, &gd);
|
||||
|
|
|
@ -1433,7 +1433,7 @@ static int __devexit sonypi_remove(struct platform_device *dev)
|
|||
sonypi_disable();
|
||||
|
||||
synchronize_irq(sonypi_device.irq);
|
||||
flush_work_sync(&sonypi_device.input_work);
|
||||
flush_work(&sonypi_device.input_work);
|
||||
|
||||
if (useinput) {
|
||||
input_unregister_device(sonypi_device.input_key_dev);
|
||||
|
|
|
@ -1172,7 +1172,7 @@ int tpm_release(struct inode *inode, struct file *file)
|
|||
struct tpm_chip *chip = file->private_data;
|
||||
|
||||
del_singleshot_timer_sync(&chip->user_read_timer);
|
||||
flush_work_sync(&chip->work);
|
||||
flush_work(&chip->work);
|
||||
file->private_data = NULL;
|
||||
atomic_set(&chip->data_pending, 0);
|
||||
kfree(chip->data_buffer);
|
||||
|
@ -1225,7 +1225,7 @@ ssize_t tpm_read(struct file *file, char __user *buf,
|
|||
int rc;
|
||||
|
||||
del_singleshot_timer_sync(&chip->user_read_timer);
|
||||
flush_work_sync(&chip->work);
|
||||
flush_work(&chip->work);
|
||||
ret_size = atomic_read(&chip->data_pending);
|
||||
atomic_set(&chip->data_pending, 0);
|
||||
if (ret_size > 0) { /* relay data */
|
||||
|
|
|
@ -466,7 +466,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
|
|||
delay -= jiffies % delay;
|
||||
|
||||
dbs_info->enable = 1;
|
||||
INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
|
||||
INIT_DEFERRABLE_WORK(&dbs_info->work, do_dbs_timer);
|
||||
schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay);
|
||||
}
|
||||
|
||||
|
|
|
@ -644,7 +644,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
|
|||
delay -= jiffies % delay;
|
||||
|
||||
dbs_info->sample_type = DBS_NORMAL_SAMPLE;
|
||||
INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
|
||||
INIT_DEFERRABLE_WORK(&dbs_info->work, do_dbs_timer);
|
||||
schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay);
|
||||
}
|
||||
|
||||
|
|
|
@ -607,7 +607,7 @@ static int __init devfreq_start_polling(void)
|
|||
mutex_lock(&devfreq_list_lock);
|
||||
polling = false;
|
||||
devfreq_wq = create_freezable_workqueue("devfreq_wq");
|
||||
INIT_DELAYED_WORK_DEFERRABLE(&devfreq_work, devfreq_monitor);
|
||||
INIT_DEFERRABLE_WORK(&devfreq_work, devfreq_monitor);
|
||||
mutex_unlock(&devfreq_list_lock);
|
||||
|
||||
devfreq_monitor(&devfreq_work.work);
|
||||
|
|
|
@ -559,7 +559,7 @@ static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
|
|||
return;
|
||||
|
||||
INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
|
||||
queue_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec));
|
||||
mod_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -599,21 +599,6 @@ void edac_mc_reset_delay_period(int value)
|
|||
|
||||
mutex_lock(&mem_ctls_mutex);
|
||||
|
||||
/* scan the list and turn off all workq timers, doing so under lock
|
||||
*/
|
||||
list_for_each(item, &mc_devices) {
|
||||
mci = list_entry(item, struct mem_ctl_info, link);
|
||||
|
||||
if (mci->op_state == OP_RUNNING_POLL)
|
||||
cancel_delayed_work(&mci->work);
|
||||
}
|
||||
|
||||
mutex_unlock(&mem_ctls_mutex);
|
||||
|
||||
|
||||
/* re-walk the list, and reset the poll delay */
|
||||
mutex_lock(&mem_ctls_mutex);
|
||||
|
||||
list_for_each(item, &mc_devices) {
|
||||
mci = list_entry(item, struct mem_ctl_info, link);
|
||||
|
||||
|
|
|
@ -143,7 +143,7 @@ static int __devinit adc_jack_probe(struct platform_device *pdev)
|
|||
|
||||
data->handling_delay = msecs_to_jiffies(pdata->handling_delay_ms);
|
||||
|
||||
INIT_DELAYED_WORK_DEFERRABLE(&data->handler, adc_jack_handler);
|
||||
INIT_DEFERRABLE_WORK(&data->handler, adc_jack_handler);
|
||||
|
||||
platform_set_drvdata(pdev, data);
|
||||
|
||||
|
|
|
@ -968,7 +968,7 @@ static void output_poll_execute(struct work_struct *work)
|
|||
}
|
||||
|
||||
if (repoll)
|
||||
queue_delayed_work(system_nrt_wq, delayed_work, DRM_OUTPUT_POLL_PERIOD);
|
||||
schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
|
||||
}
|
||||
|
||||
void drm_kms_helper_poll_disable(struct drm_device *dev)
|
||||
|
@ -993,7 +993,7 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
|
|||
}
|
||||
|
||||
if (poll)
|
||||
queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
|
||||
schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_kms_helper_poll_enable);
|
||||
|
||||
|
@ -1020,6 +1020,6 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
|
|||
/* kill timer and schedule immediate execution, this doesn't block */
|
||||
cancel_delayed_work(&dev->mode_config.output_poll_work);
|
||||
if (drm_kms_helper_poll)
|
||||
queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0);
|
||||
schedule_delayed_work(&dev->mode_config.output_poll_work, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_helper_hpd_irq_event);
|
||||
|
|
|
@ -878,7 +878,7 @@ static int g2d_suspend(struct device *dev)
|
|||
/* FIXME: good range? */
|
||||
usleep_range(500, 1000);
|
||||
|
||||
flush_work_sync(&g2d->runqueue_work);
|
||||
flush_work(&g2d->runqueue_work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -302,7 +302,7 @@ nouveau_gpio_isr_del(struct drm_device *dev, int idx, u8 tag, u8 line,
|
|||
spin_unlock_irqrestore(&pgpio->lock, flags);
|
||||
|
||||
list_for_each_entry_safe(isr, tmp, &tofree, head) {
|
||||
flush_work_sync(&isr->work);
|
||||
flush_work(&isr->work);
|
||||
kfree(isr);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -277,7 +277,7 @@ void radeon_irq_kms_fini(struct radeon_device *rdev)
|
|||
if (rdev->msi_enabled)
|
||||
pci_disable_msi(rdev->pdev);
|
||||
}
|
||||
flush_work_sync(&rdev->hotplug_work);
|
||||
flush_work(&rdev->hotplug_work);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -594,7 +594,7 @@ int vmw_fb_off(struct vmw_private *vmw_priv)
|
|||
par->dirty.active = false;
|
||||
spin_unlock_irqrestore(&par->dirty.lock, flags);
|
||||
|
||||
flush_delayed_work_sync(&info->deferred_work);
|
||||
flush_delayed_work(&info->deferred_work);
|
||||
|
||||
par->bo_ptr = NULL;
|
||||
ttm_bo_kunmap(&par->map);
|
||||
|
|
|
@ -608,7 +608,7 @@ void picolcd_exit_framebuffer(struct picolcd_data *data)
|
|||
/* make sure there is no running update - thus that fbdata->picolcd
|
||||
* once obtained under lock is guaranteed not to get free() under
|
||||
* the feet of the deferred work */
|
||||
flush_delayed_work_sync(&info->deferred_work);
|
||||
flush_delayed_work(&info->deferred_work);
|
||||
|
||||
data->fb_info = NULL;
|
||||
unregister_framebuffer(info);
|
||||
|
|
|
@ -229,7 +229,7 @@ static void wiiext_worker(struct work_struct *work)
|
|||
/* schedule work only once, otherwise mark for reschedule */
|
||||
static void wiiext_schedule(struct wiimote_ext *ext)
|
||||
{
|
||||
queue_work(system_nrt_wq, &ext->worker);
|
||||
schedule_work(&ext->worker);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -152,13 +152,11 @@ static void set_timeout(unsigned long time)
|
|||
{
|
||||
unsigned long delay;
|
||||
|
||||
cancel_delayed_work(&work);
|
||||
|
||||
delay = time - jiffies;
|
||||
if ((long)delay <= 0)
|
||||
delay = 1;
|
||||
|
||||
queue_delayed_work(addr_wq, &work, delay);
|
||||
mod_delayed_work(addr_wq, &work, delay);
|
||||
}
|
||||
|
||||
static void queue_req(struct addr_req *req)
|
||||
|
|
|
@ -2004,7 +2004,7 @@ static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
|
|||
unsigned long delay;
|
||||
|
||||
if (list_empty(&mad_agent_priv->wait_list)) {
|
||||
__cancel_delayed_work(&mad_agent_priv->timed_work);
|
||||
cancel_delayed_work(&mad_agent_priv->timed_work);
|
||||
} else {
|
||||
mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
|
||||
struct ib_mad_send_wr_private,
|
||||
|
@ -2013,13 +2013,11 @@ static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
|
|||
if (time_after(mad_agent_priv->timeout,
|
||||
mad_send_wr->timeout)) {
|
||||
mad_agent_priv->timeout = mad_send_wr->timeout;
|
||||
__cancel_delayed_work(&mad_agent_priv->timed_work);
|
||||
delay = mad_send_wr->timeout - jiffies;
|
||||
if ((long)delay <= 0)
|
||||
delay = 1;
|
||||
queue_delayed_work(mad_agent_priv->qp_info->
|
||||
port_priv->wq,
|
||||
&mad_agent_priv->timed_work, delay);
|
||||
mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
|
||||
&mad_agent_priv->timed_work, delay);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2052,11 +2050,9 @@ static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
|
|||
list_add(&mad_send_wr->agent_list, list_item);
|
||||
|
||||
/* Reschedule a work item if we have a shorter timeout */
|
||||
if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) {
|
||||
__cancel_delayed_work(&mad_agent_priv->timed_work);
|
||||
queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
|
||||
&mad_agent_priv->timed_work, delay);
|
||||
}
|
||||
if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
|
||||
mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
|
||||
&mad_agent_priv->timed_work, delay);
|
||||
}
|
||||
|
||||
void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
|
||||
|
|
|
@ -2679,11 +2679,9 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
|
|||
}
|
||||
}
|
||||
if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_SFP_D) {
|
||||
if (nesdev->link_recheck)
|
||||
cancel_delayed_work(&nesdev->work);
|
||||
nesdev->link_recheck = 1;
|
||||
schedule_delayed_work(&nesdev->work,
|
||||
NES_LINK_RECHECK_DELAY);
|
||||
mod_delayed_work(system_wq, &nesdev->work,
|
||||
NES_LINK_RECHECK_DELAY);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -243,10 +243,9 @@ static int nes_netdev_open(struct net_device *netdev)
|
|||
|
||||
spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags);
|
||||
if (nesdev->nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_SFP_D) {
|
||||
if (nesdev->link_recheck)
|
||||
cancel_delayed_work(&nesdev->work);
|
||||
nesdev->link_recheck = 1;
|
||||
schedule_delayed_work(&nesdev->work, NES_LINK_RECHECK_DELAY);
|
||||
mod_delayed_work(system_wq, &nesdev->work,
|
||||
NES_LINK_RECHECK_DELAY);
|
||||
}
|
||||
spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags);
|
||||
|
||||
|
|
|
@ -156,8 +156,7 @@ static irqreturn_t qt2160_irq(int irq, void *_qt2160)
|
|||
|
||||
spin_lock_irqsave(&qt2160->lock, flags);
|
||||
|
||||
__cancel_delayed_work(&qt2160->dwork);
|
||||
schedule_delayed_work(&qt2160->dwork, 0);
|
||||
mod_delayed_work(system_wq, &qt2160->dwork, 0);
|
||||
|
||||
spin_unlock_irqrestore(&qt2160->lock, flags);
|
||||
|
||||
|
|
|
@ -376,12 +376,7 @@ static void synaptics_i2c_reschedule_work(struct synaptics_i2c *touch,
|
|||
|
||||
spin_lock_irqsave(&touch->lock, flags);
|
||||
|
||||
/*
|
||||
* If work is already scheduled then subsequent schedules will not
|
||||
* change the scheduled time that's why we have to cancel it first.
|
||||
*/
|
||||
__cancel_delayed_work(&touch->dwork);
|
||||
schedule_delayed_work(&touch->dwork, delay);
|
||||
mod_delayed_work(system_wq, &touch->dwork, delay);
|
||||
|
||||
spin_unlock_irqrestore(&touch->lock, flags);
|
||||
}
|
||||
|
|
|
@ -221,7 +221,7 @@ static void wm831x_ts_input_close(struct input_dev *idev)
|
|||
synchronize_irq(wm831x_ts->pd_irq);
|
||||
|
||||
/* Make sure the IRQ completion work is quiesced */
|
||||
flush_work_sync(&wm831x_ts->pd_data_work);
|
||||
flush_work(&wm831x_ts->pd_data_work);
|
||||
|
||||
/* If we ended up with the pen down then make sure we revert back
|
||||
* to pen detection state for the next time we start up.
|
||||
|
|
|
@ -116,7 +116,7 @@ mISDN_freedchannel(struct dchannel *ch)
|
|||
}
|
||||
skb_queue_purge(&ch->squeue);
|
||||
skb_queue_purge(&ch->rqueue);
|
||||
flush_work_sync(&ch->workq);
|
||||
flush_work(&ch->workq);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(mISDN_freedchannel);
|
||||
|
|
|
@ -737,7 +737,7 @@ err_sysfs_remove:
|
|||
sysfs_remove_group(&led->cdev.dev->kobj, &lm3533_led_attribute_group);
|
||||
err_unregister:
|
||||
led_classdev_unregister(&led->cdev);
|
||||
flush_work_sync(&led->work);
|
||||
flush_work(&led->work);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -751,7 +751,7 @@ static int __devexit lm3533_led_remove(struct platform_device *pdev)
|
|||
lm3533_ctrlbank_disable(&led->cb);
|
||||
sysfs_remove_group(&led->cdev.dev->kobj, &lm3533_led_attribute_group);
|
||||
led_classdev_unregister(&led->cdev);
|
||||
flush_work_sync(&led->work);
|
||||
flush_work(&led->work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -765,7 +765,7 @@ static void lm3533_led_shutdown(struct platform_device *pdev)
|
|||
|
||||
lm3533_ctrlbank_disable(&led->cb);
|
||||
lm3533_led_set(&led->cdev, LED_OFF); /* disable blink */
|
||||
flush_work_sync(&led->work);
|
||||
flush_work(&led->work);
|
||||
}
|
||||
|
||||
static struct platform_driver lm3533_led_driver = {
|
||||
|
|
|
@ -172,7 +172,7 @@ static int __devexit lp8788_led_remove(struct platform_device *pdev)
|
|||
struct lp8788_led *led = platform_get_drvdata(pdev);
|
||||
|
||||
led_classdev_unregister(&led->led_dev);
|
||||
flush_work_sync(&led->work);
|
||||
flush_work(&led->work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -275,7 +275,7 @@ static int wm8350_led_remove(struct platform_device *pdev)
|
|||
struct wm8350_led *led = platform_get_drvdata(pdev);
|
||||
|
||||
led_classdev_unregister(&led->cdev);
|
||||
flush_work_sync(&led->work);
|
||||
flush_work(&led->work);
|
||||
wm8350_led_disable(led);
|
||||
regulator_put(led->dcdc);
|
||||
regulator_put(led->isink);
|
||||
|
|
|
@ -226,7 +226,7 @@ void ams_sensor_detach(void)
|
|||
* We do this after ams_info.exit(), because an interrupt might
|
||||
* have arrived before disabling them.
|
||||
*/
|
||||
flush_work_sync(&ams_info.worker);
|
||||
flush_work(&ams_info.worker);
|
||||
|
||||
/* Remove device */
|
||||
of_device_unregister(ams_info.of_dev);
|
||||
|
|
|
@ -944,7 +944,7 @@ static void flush_multipath_work(struct multipath *m)
|
|||
flush_workqueue(kmpath_handlerd);
|
||||
multipath_wait_for_pg_init_completion(m);
|
||||
flush_workqueue(kmultipathd);
|
||||
flush_work_sync(&m->trigger_event);
|
||||
flush_work(&m->trigger_event);
|
||||
}
|
||||
|
||||
static void multipath_dtr(struct dm_target *ti)
|
||||
|
|
|
@ -1146,7 +1146,7 @@ static void mirror_dtr(struct dm_target *ti)
|
|||
|
||||
del_timer_sync(&ms->timer);
|
||||
flush_workqueue(ms->kmirrord_wq);
|
||||
flush_work_sync(&ms->trigger_event);
|
||||
flush_work(&ms->trigger_event);
|
||||
dm_kcopyd_client_destroy(ms->kcopyd_client);
|
||||
destroy_workqueue(ms->kmirrord_wq);
|
||||
free_context(ms, ti, ms->nr_mirrors);
|
||||
|
|
|
@ -199,7 +199,7 @@ static void stripe_dtr(struct dm_target *ti)
|
|||
for (i = 0; i < sc->stripes; i++)
|
||||
dm_put_device(ti, sc->stripe[i].dev);
|
||||
|
||||
flush_work_sync(&sc->trigger_event);
|
||||
flush_work(&sc->trigger_event);
|
||||
kfree(sc);
|
||||
}
|
||||
|
||||
|
|
|
@ -1329,8 +1329,8 @@ static int dvb_net_remove_if(struct dvb_net *dvbnet, unsigned long num)
|
|||
return -EBUSY;
|
||||
|
||||
dvb_net_stop(net);
|
||||
flush_work_sync(&priv->set_multicast_list_wq);
|
||||
flush_work_sync(&priv->restart_net_feed_wq);
|
||||
flush_work(&priv->set_multicast_list_wq);
|
||||
flush_work(&priv->restart_net_feed_wq);
|
||||
printk("dvb_net: removed network interface %s\n", net->name);
|
||||
unregister_netdev(net);
|
||||
dvbnet->state[num]=0;
|
||||
|
|
|
@ -111,7 +111,7 @@ void mantis_evmgr_exit(struct mantis_ca *ca)
|
|||
struct mantis_pci *mantis = ca->ca_priv;
|
||||
|
||||
dprintk(MANTIS_DEBUG, 1, "Mantis Host I/F Event manager exiting");
|
||||
flush_work_sync(&ca->hif_evm_work);
|
||||
flush_work(&ca->hif_evm_work);
|
||||
mantis_hif_exit(ca);
|
||||
mantis_pcmcia_exit(ca);
|
||||
}
|
||||
|
|
|
@ -183,6 +183,6 @@ void mantis_uart_exit(struct mantis_pci *mantis)
|
|||
{
|
||||
/* disable interrupt */
|
||||
mmwrite(mmread(MANTIS_UART_CTL) & 0xffef, MANTIS_UART_CTL);
|
||||
flush_work_sync(&mantis->uart_work);
|
||||
flush_work(&mantis->uart_work);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mantis_uart_exit);
|
||||
|
|
|
@ -196,7 +196,7 @@ static void request_modules(struct bttv *dev)
|
|||
|
||||
static void flush_request_modules(struct bttv *dev)
|
||||
{
|
||||
flush_work_sync(&dev->request_module_wk);
|
||||
flush_work(&dev->request_module_wk);
|
||||
}
|
||||
#else
|
||||
#define request_modules(dev)
|
||||
|
|
|
@ -272,7 +272,7 @@ static void request_modules(struct cx18 *dev)
|
|||
|
||||
static void flush_request_modules(struct cx18 *dev)
|
||||
{
|
||||
flush_work_sync(&dev->request_module_wk);
|
||||
flush_work(&dev->request_module_wk);
|
||||
}
|
||||
#else
|
||||
#define request_modules(dev)
|
||||
|
|
|
@ -1002,7 +1002,7 @@ static void request_modules(struct cx231xx *dev)
|
|||
|
||||
static void flush_request_modules(struct cx231xx *dev)
|
||||
{
|
||||
flush_work_sync(&dev->request_module_wk);
|
||||
flush_work(&dev->request_module_wk);
|
||||
}
|
||||
#else
|
||||
#define request_modules(dev)
|
||||
|
|
|
@ -231,9 +231,9 @@ static void cx23885_input_ir_stop(struct cx23885_dev *dev)
|
|||
v4l2_subdev_call(dev->sd_ir, ir, rx_s_parameters, ¶ms);
|
||||
v4l2_subdev_call(dev->sd_ir, ir, rx_g_parameters, ¶ms);
|
||||
}
|
||||
flush_work_sync(&dev->cx25840_work);
|
||||
flush_work_sync(&dev->ir_rx_work);
|
||||
flush_work_sync(&dev->ir_tx_work);
|
||||
flush_work(&dev->cx25840_work);
|
||||
flush_work(&dev->ir_rx_work);
|
||||
flush_work(&dev->ir_tx_work);
|
||||
}
|
||||
|
||||
static void cx23885_input_ir_close(struct rc_dev *rc)
|
||||
|
|
|
@ -70,7 +70,7 @@ static void request_modules(struct cx8802_dev *dev)
|
|||
|
||||
static void flush_request_modules(struct cx8802_dev *dev)
|
||||
{
|
||||
flush_work_sync(&dev->request_module_wk);
|
||||
flush_work(&dev->request_module_wk);
|
||||
}
|
||||
#else
|
||||
#define request_modules(dev)
|
||||
|
|
|
@ -2900,7 +2900,7 @@ static void request_modules(struct em28xx *dev)
|
|||
|
||||
static void flush_request_modules(struct em28xx *dev)
|
||||
{
|
||||
flush_work_sync(&dev->request_module_wk);
|
||||
flush_work(&dev->request_module_wk);
|
||||
}
|
||||
#else
|
||||
#define request_modules(dev)
|
||||
|
|
|
@ -1198,7 +1198,7 @@ static int vidioc_streamoff(struct file *file, void *fh, enum v4l2_buf_type i)
|
|||
|
||||
atomic_inc(&cam->reset_disable);
|
||||
|
||||
flush_work_sync(&cam->sensor_reset_work);
|
||||
flush_work(&cam->sensor_reset_work);
|
||||
|
||||
rval = videobuf_streamoff(q);
|
||||
if (!rval) {
|
||||
|
@ -1512,7 +1512,7 @@ static int omap24xxcam_release(struct file *file)
|
|||
|
||||
atomic_inc(&cam->reset_disable);
|
||||
|
||||
flush_work_sync(&cam->sensor_reset_work);
|
||||
flush_work(&cam->sensor_reset_work);
|
||||
|
||||
/* stop streaming capture */
|
||||
videobuf_streamoff(&fh->vbq);
|
||||
|
@ -1536,7 +1536,7 @@ static int omap24xxcam_release(struct file *file)
|
|||
* not be scheduled anymore since streaming is already
|
||||
* disabled.)
|
||||
*/
|
||||
flush_work_sync(&cam->sensor_reset_work);
|
||||
flush_work(&cam->sensor_reset_work);
|
||||
|
||||
mutex_lock(&cam->mutex);
|
||||
if (atomic_dec_return(&cam->users) == 0) {
|
||||
|
|
|
@ -170,7 +170,7 @@ static void request_submodules(struct saa7134_dev *dev)
|
|||
|
||||
static void flush_request_submodules(struct saa7134_dev *dev)
|
||||
{
|
||||
flush_work_sync(&dev->request_module_wk);
|
||||
flush_work(&dev->request_module_wk);
|
||||
}
|
||||
|
||||
#else
|
||||
|
|
|
@ -556,7 +556,7 @@ static int empress_fini(struct saa7134_dev *dev)
|
|||
|
||||
if (NULL == dev->empress_dev)
|
||||
return 0;
|
||||
flush_work_sync(&dev->empress_workqueue);
|
||||
flush_work(&dev->empress_workqueue);
|
||||
video_unregister_device(dev->empress_dev);
|
||||
dev->empress_dev = NULL;
|
||||
return 0;
|
||||
|
|
|
@ -1074,7 +1074,7 @@ static void request_modules(struct tm6000_core *dev)
|
|||
|
||||
static void flush_request_modules(struct tm6000_core *dev)
|
||||
{
|
||||
flush_work_sync(&dev->request_module_wk);
|
||||
flush_work(&dev->request_module_wk);
|
||||
}
|
||||
#else
|
||||
#define request_modules(dev)
|
||||
|
|
|
@ -1259,7 +1259,7 @@ static int menelaus_probe(struct i2c_client *client,
|
|||
return 0;
|
||||
fail2:
|
||||
free_irq(client->irq, menelaus);
|
||||
flush_work_sync(&menelaus->work);
|
||||
flush_work(&menelaus->work);
|
||||
fail1:
|
||||
kfree(menelaus);
|
||||
return err;
|
||||
|
@ -1270,7 +1270,7 @@ static int __exit menelaus_remove(struct i2c_client *client)
|
|||
struct menelaus_chip *menelaus = i2c_get_clientdata(client);
|
||||
|
||||
free_irq(client->irq, menelaus);
|
||||
flush_work_sync(&menelaus->work);
|
||||
flush_work(&menelaus->work);
|
||||
kfree(menelaus);
|
||||
the_menelaus = NULL;
|
||||
return 0;
|
||||
|
|
|
@ -487,7 +487,7 @@ static void __exit
|
|||
ioc4_exit(void)
|
||||
{
|
||||
/* Ensure ioc4_load_modules() has completed before exiting */
|
||||
flush_work_sync(&ioc4_load_modules_work);
|
||||
flush_work(&ioc4_load_modules_work);
|
||||
pci_unregister_driver(&ioc4_driver);
|
||||
}
|
||||
|
||||
|
|
|
@ -204,8 +204,8 @@ void mmc_host_clk_release(struct mmc_host *host)
|
|||
host->clk_requests--;
|
||||
if (mmc_host_may_gate_card(host->card) &&
|
||||
!host->clk_requests)
|
||||
queue_delayed_work(system_nrt_wq, &host->clk_gate_work,
|
||||
msecs_to_jiffies(host->clkgate_delay));
|
||||
schedule_delayed_work(&host->clk_gate_work,
|
||||
msecs_to_jiffies(host->clkgate_delay));
|
||||
spin_unlock_irqrestore(&host->clk_lock, flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -387,8 +387,8 @@ static void mtdoops_notify_remove(struct mtd_info *mtd)
|
|||
printk(KERN_WARNING "mtdoops: could not unregister kmsg_dumper\n");
|
||||
|
||||
cxt->mtd = NULL;
|
||||
flush_work_sync(&cxt->work_erase);
|
||||
flush_work_sync(&cxt->work_write);
|
||||
flush_work(&cxt->work_erase);
|
||||
flush_work(&cxt->work_write);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -1394,7 +1394,7 @@ static int offload_close(struct t3cdev *tdev)
|
|||
sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
|
||||
|
||||
/* Flush work scheduled while releasing TIDs */
|
||||
flush_work_sync(&td->tid_release_task);
|
||||
flush_work(&td->tid_release_task);
|
||||
|
||||
tdev->lldev = NULL;
|
||||
cxgb3_set_dummy_ops(tdev);
|
||||
|
|
|
@ -139,5 +139,5 @@ void mlx4_sense_init(struct mlx4_dev *dev)
|
|||
for (port = 1; port <= dev->caps.num_ports; port++)
|
||||
sense->do_sense_port[port] = 1;
|
||||
|
||||
INIT_DELAYED_WORK_DEFERRABLE(&sense->sense_poll, mlx4_sense_port);
|
||||
INIT_DEFERRABLE_WORK(&sense->sense_poll, mlx4_sense_port);
|
||||
}
|
||||
|
|
|
@ -3521,7 +3521,7 @@ static void vxge_device_unregister(struct __vxge_hw_device *hldev)
|
|||
|
||||
strncpy(buf, dev->name, IFNAMSIZ);
|
||||
|
||||
flush_work_sync(&vdev->reset_task);
|
||||
flush_work(&vdev->reset_task);
|
||||
|
||||
/* in 2.6 will call stop() if device is up */
|
||||
unregister_netdev(dev);
|
||||
|
|
|
@ -3890,7 +3890,7 @@ static int cas_change_mtu(struct net_device *dev, int new_mtu)
|
|||
schedule_work(&cp->reset_task);
|
||||
#endif
|
||||
|
||||
flush_work_sync(&cp->reset_task);
|
||||
flush_work(&cp->reset_task);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -9927,7 +9927,7 @@ static int niu_suspend(struct pci_dev *pdev, pm_message_t state)
|
|||
if (!netif_running(dev))
|
||||
return 0;
|
||||
|
||||
flush_work_sync(&np->reset_task);
|
||||
flush_work(&np->reset_task);
|
||||
niu_netif_stop(np);
|
||||
|
||||
del_timer_sync(&np->timer);
|
||||
|
|
|
@ -521,7 +521,7 @@ static void refill_work(struct work_struct *work)
|
|||
/* In theory, this can happen: if we don't get any buffers in
|
||||
* we will *never* try to fill again. */
|
||||
if (still_empty)
|
||||
queue_delayed_work(system_nrt_wq, &vi->refill, HZ/2);
|
||||
schedule_delayed_work(&vi->refill, HZ/2);
|
||||
}
|
||||
|
||||
static int virtnet_poll(struct napi_struct *napi, int budget)
|
||||
|
@ -540,7 +540,7 @@ again:
|
|||
|
||||
if (vi->num < vi->max / 2) {
|
||||
if (!try_fill_recv(vi, GFP_ATOMIC))
|
||||
queue_delayed_work(system_nrt_wq, &vi->refill, 0);
|
||||
schedule_delayed_work(&vi->refill, 0);
|
||||
}
|
||||
|
||||
/* Out of packets? */
|
||||
|
@ -745,7 +745,7 @@ static int virtnet_open(struct net_device *dev)
|
|||
|
||||
/* Make sure we have some buffers: if oom use wq. */
|
||||
if (!try_fill_recv(vi, GFP_KERNEL))
|
||||
queue_delayed_work(system_nrt_wq, &vi->refill, 0);
|
||||
schedule_delayed_work(&vi->refill, 0);
|
||||
|
||||
virtnet_napi_enable(vi);
|
||||
return 0;
|
||||
|
@ -1020,7 +1020,7 @@ static void virtnet_config_changed(struct virtio_device *vdev)
|
|||
{
|
||||
struct virtnet_info *vi = vdev->priv;
|
||||
|
||||
queue_work(system_nrt_wq, &vi->config_work);
|
||||
schedule_work(&vi->config_work);
|
||||
}
|
||||
|
||||
static int init_vqs(struct virtnet_info *vi)
|
||||
|
@ -1152,7 +1152,7 @@ static int virtnet_probe(struct virtio_device *vdev)
|
|||
otherwise get link status from config. */
|
||||
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
|
||||
netif_carrier_off(dev);
|
||||
queue_work(system_nrt_wq, &vi->config_work);
|
||||
schedule_work(&vi->config_work);
|
||||
} else {
|
||||
vi->status = VIRTIO_NET_S_LINK_UP;
|
||||
netif_carrier_on(dev);
|
||||
|
@ -1264,7 +1264,7 @@ static int virtnet_restore(struct virtio_device *vdev)
|
|||
netif_device_attach(vi->dev);
|
||||
|
||||
if (!try_fill_recv(vi, GFP_KERNEL))
|
||||
queue_delayed_work(system_nrt_wq, &vi->refill, 0);
|
||||
schedule_delayed_work(&vi->refill, 0);
|
||||
|
||||
mutex_lock(&vi->config_lock);
|
||||
vi->config_enable = true;
|
||||
|
|
|
@ -860,10 +860,10 @@ void hostap_free_data(struct ap_data *ap)
|
|||
return;
|
||||
}
|
||||
|
||||
flush_work_sync(&ap->add_sta_proc_queue);
|
||||
flush_work(&ap->add_sta_proc_queue);
|
||||
|
||||
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
|
||||
flush_work_sync(&ap->wds_oper_queue);
|
||||
flush_work(&ap->wds_oper_queue);
|
||||
if (ap->crypt)
|
||||
ap->crypt->deinit(ap->crypt_priv);
|
||||
ap->crypt = ap->crypt_priv = NULL;
|
||||
|
|
|
@ -3311,13 +3311,13 @@ static void prism2_free_local_data(struct net_device *dev)
|
|||
|
||||
unregister_netdev(local->dev);
|
||||
|
||||
flush_work_sync(&local->reset_queue);
|
||||
flush_work_sync(&local->set_multicast_list_queue);
|
||||
flush_work_sync(&local->set_tim_queue);
|
||||
flush_work(&local->reset_queue);
|
||||
flush_work(&local->set_multicast_list_queue);
|
||||
flush_work(&local->set_tim_queue);
|
||||
#ifndef PRISM2_NO_STATION_MODES
|
||||
flush_work_sync(&local->info_queue);
|
||||
flush_work(&local->info_queue);
|
||||
#endif
|
||||
flush_work_sync(&local->comms_qual_update);
|
||||
flush_work(&local->comms_qual_update);
|
||||
|
||||
lib80211_crypt_info_free(&local->crypt_info);
|
||||
|
||||
|
|
|
@ -2181,8 +2181,7 @@ static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status)
|
|||
|
||||
/* Make sure the RF Kill check timer is running */
|
||||
priv->stop_rf_kill = 0;
|
||||
cancel_delayed_work(&priv->rf_kill);
|
||||
schedule_delayed_work(&priv->rf_kill, round_jiffies_relative(HZ));
|
||||
mod_delayed_work(system_wq, &priv->rf_kill, round_jiffies_relative(HZ));
|
||||
}
|
||||
|
||||
static void send_scan_event(void *data)
|
||||
|
@ -4322,9 +4321,8 @@ static int ipw_radio_kill_sw(struct ipw2100_priv *priv, int disable_radio)
|
|||
"disabled by HW switch\n");
|
||||
/* Make sure the RF_KILL check timer is running */
|
||||
priv->stop_rf_kill = 0;
|
||||
cancel_delayed_work(&priv->rf_kill);
|
||||
schedule_delayed_work(&priv->rf_kill,
|
||||
round_jiffies_relative(HZ));
|
||||
mod_delayed_work(system_wq, &priv->rf_kill,
|
||||
round_jiffies_relative(HZ));
|
||||
} else
|
||||
schedule_reset(priv);
|
||||
}
|
||||
|
|
|
@ -1164,8 +1164,7 @@ void zd_usb_reset_rx_idle_timer(struct zd_usb *usb)
|
|||
{
|
||||
struct zd_usb_rx *rx = &usb->rx;
|
||||
|
||||
cancel_delayed_work(&rx->idle_work);
|
||||
queue_delayed_work(zd_workqueue, &rx->idle_work, ZD_RX_IDLE_INTERVAL);
|
||||
mod_delayed_work(zd_workqueue, &rx->idle_work, ZD_RX_IDLE_INTERVAL);
|
||||
}
|
||||
|
||||
static inline void init_usb_interrupt(struct zd_usb *usb)
|
||||
|
|
|
@ -7685,25 +7685,15 @@ static int fan_set_speed(int speed)
|
|||
|
||||
static void fan_watchdog_reset(void)
|
||||
{
|
||||
static int fan_watchdog_active;
|
||||
|
||||
if (fan_control_access_mode == TPACPI_FAN_WR_NONE)
|
||||
return;
|
||||
|
||||
if (fan_watchdog_active)
|
||||
cancel_delayed_work(&fan_watchdog_task);
|
||||
|
||||
if (fan_watchdog_maxinterval > 0 &&
|
||||
tpacpi_lifecycle != TPACPI_LIFE_EXITING) {
|
||||
fan_watchdog_active = 1;
|
||||
if (!queue_delayed_work(tpacpi_wq, &fan_watchdog_task,
|
||||
msecs_to_jiffies(fan_watchdog_maxinterval
|
||||
* 1000))) {
|
||||
pr_err("failed to queue the fan watchdog, "
|
||||
"watchdog will not trigger\n");
|
||||
}
|
||||
} else
|
||||
fan_watchdog_active = 0;
|
||||
tpacpi_lifecycle != TPACPI_LIFE_EXITING)
|
||||
mod_delayed_work(tpacpi_wq, &fan_watchdog_task,
|
||||
msecs_to_jiffies(fan_watchdog_maxinterval * 1000));
|
||||
else
|
||||
cancel_delayed_work(&fan_watchdog_task);
|
||||
}
|
||||
|
||||
static void fan_watchdog_fire(struct work_struct *ignored)
|
||||
|
|
|
@ -1018,7 +1018,7 @@ static int __devinit ab8500_btemp_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
/* Init work for measuring temperature periodically */
|
||||
INIT_DELAYED_WORK_DEFERRABLE(&di->btemp_periodic_work,
|
||||
INIT_DEFERRABLE_WORK(&di->btemp_periodic_work,
|
||||
ab8500_btemp_periodic_work);
|
||||
|
||||
/* Identify the battery */
|
||||
|
|
|
@ -2618,9 +2618,9 @@ static int __devinit ab8500_charger_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
/* Init work for HW failure check */
|
||||
INIT_DELAYED_WORK_DEFERRABLE(&di->check_hw_failure_work,
|
||||
INIT_DEFERRABLE_WORK(&di->check_hw_failure_work,
|
||||
ab8500_charger_check_hw_failure_work);
|
||||
INIT_DELAYED_WORK_DEFERRABLE(&di->check_usbchgnotok_work,
|
||||
INIT_DEFERRABLE_WORK(&di->check_usbchgnotok_work,
|
||||
ab8500_charger_check_usbchargernotok_work);
|
||||
|
||||
/*
|
||||
|
@ -2632,10 +2632,10 @@ static int __devinit ab8500_charger_probe(struct platform_device *pdev)
|
|||
* watchdog have to be kicked by the charger driver
|
||||
* when the AC charger is disabled
|
||||
*/
|
||||
INIT_DELAYED_WORK_DEFERRABLE(&di->kick_wd_work,
|
||||
INIT_DEFERRABLE_WORK(&di->kick_wd_work,
|
||||
ab8500_charger_kick_watchdog_work);
|
||||
|
||||
INIT_DELAYED_WORK_DEFERRABLE(&di->check_vbat_work,
|
||||
INIT_DEFERRABLE_WORK(&di->check_vbat_work,
|
||||
ab8500_charger_check_vbat_work);
|
||||
|
||||
/* Init work for charger detection */
|
||||
|
|
|
@ -2516,19 +2516,19 @@ static int __devinit ab8500_fg_probe(struct platform_device *pdev)
|
|||
INIT_WORK(&di->fg_acc_cur_work, ab8500_fg_acc_cur_work);
|
||||
|
||||
/* Init work for reinitialising the fg algorithm */
|
||||
INIT_DELAYED_WORK_DEFERRABLE(&di->fg_reinit_work,
|
||||
INIT_DEFERRABLE_WORK(&di->fg_reinit_work,
|
||||
ab8500_fg_reinit_work);
|
||||
|
||||
/* Work delayed Queue to run the state machine */
|
||||
INIT_DELAYED_WORK_DEFERRABLE(&di->fg_periodic_work,
|
||||
INIT_DEFERRABLE_WORK(&di->fg_periodic_work,
|
||||
ab8500_fg_periodic_work);
|
||||
|
||||
/* Work to check low battery condition */
|
||||
INIT_DELAYED_WORK_DEFERRABLE(&di->fg_low_bat_work,
|
||||
INIT_DEFERRABLE_WORK(&di->fg_low_bat_work,
|
||||
ab8500_fg_low_bat_work);
|
||||
|
||||
/* Init work for HW failure check */
|
||||
INIT_DELAYED_WORK_DEFERRABLE(&di->fg_check_hw_failure_work,
|
||||
INIT_DEFERRABLE_WORK(&di->fg_check_hw_failure_work,
|
||||
ab8500_fg_check_hw_failure_work);
|
||||
|
||||
/* Initialize OVV, and other registers */
|
||||
|
|
|
@ -1848,9 +1848,9 @@ static int __devinit abx500_chargalg_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
/* Init work for chargalg */
|
||||
INIT_DELAYED_WORK_DEFERRABLE(&di->chargalg_periodic_work,
|
||||
INIT_DEFERRABLE_WORK(&di->chargalg_periodic_work,
|
||||
abx500_chargalg_periodic_work);
|
||||
INIT_DELAYED_WORK_DEFERRABLE(&di->chargalg_wd_work,
|
||||
INIT_DEFERRABLE_WORK(&di->chargalg_wd_work,
|
||||
abx500_chargalg_wd_work);
|
||||
|
||||
/* Init work for chargalg */
|
||||
|
|
|
@ -509,9 +509,8 @@ static void _setup_polling(struct work_struct *work)
|
|||
if (!delayed_work_pending(&cm_monitor_work) ||
|
||||
(delayed_work_pending(&cm_monitor_work) &&
|
||||
time_after(next_polling, _next_polling))) {
|
||||
cancel_delayed_work_sync(&cm_monitor_work);
|
||||
next_polling = jiffies + polling_jiffy;
|
||||
queue_delayed_work(cm_wq, &cm_monitor_work, polling_jiffy);
|
||||
mod_delayed_work(cm_wq, &cm_monitor_work, polling_jiffy);
|
||||
}
|
||||
|
||||
out:
|
||||
|
@ -546,10 +545,8 @@ static void fullbatt_handler(struct charger_manager *cm)
|
|||
if (cm_suspended)
|
||||
device_set_wakeup_capable(cm->dev, true);
|
||||
|
||||
if (delayed_work_pending(&cm->fullbatt_vchk_work))
|
||||
cancel_delayed_work(&cm->fullbatt_vchk_work);
|
||||
queue_delayed_work(cm_wq, &cm->fullbatt_vchk_work,
|
||||
msecs_to_jiffies(desc->fullbatt_vchkdrop_ms));
|
||||
mod_delayed_work(cm_wq, &cm->fullbatt_vchk_work,
|
||||
msecs_to_jiffies(desc->fullbatt_vchkdrop_ms));
|
||||
cm->fullbatt_vchk_jiffies_at = jiffies + msecs_to_jiffies(
|
||||
desc->fullbatt_vchkdrop_ms);
|
||||
|
||||
|
|
|
@ -290,7 +290,7 @@ static struct gpio collie_batt_gpios[] = {
|
|||
static int collie_bat_suspend(struct ucb1x00_dev *dev, pm_message_t state)
|
||||
{
|
||||
/* flush all pending status updates */
|
||||
flush_work_sync(&bat_work);
|
||||
flush_work(&bat_work);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -355,8 +355,7 @@ static void ds2760_battery_external_power_changed(struct power_supply *psy)
|
|||
|
||||
dev_dbg(di->dev, "%s\n", __func__);
|
||||
|
||||
cancel_delayed_work(&di->monitor_work);
|
||||
queue_delayed_work(di->monitor_wqueue, &di->monitor_work, HZ/10);
|
||||
mod_delayed_work(di->monitor_wqueue, &di->monitor_work, HZ/10);
|
||||
}
|
||||
|
||||
|
||||
|
@ -401,8 +400,7 @@ static void ds2760_battery_set_charged(struct power_supply *psy)
|
|||
|
||||
/* postpone the actual work by 20 secs. This is for debouncing GPIO
|
||||
* signals and to let the current value settle. See AN4188. */
|
||||
cancel_delayed_work(&di->set_charged_work);
|
||||
queue_delayed_work(di->monitor_wqueue, &di->set_charged_work, HZ * 20);
|
||||
mod_delayed_work(di->monitor_wqueue, &di->set_charged_work, HZ * 20);
|
||||
}
|
||||
|
||||
static int ds2760_battery_get_property(struct power_supply *psy,
|
||||
|
@ -616,8 +614,7 @@ static int ds2760_battery_resume(struct platform_device *pdev)
|
|||
di->charge_status = POWER_SUPPLY_STATUS_UNKNOWN;
|
||||
power_supply_changed(&di->bat);
|
||||
|
||||
cancel_delayed_work(&di->monitor_work);
|
||||
queue_delayed_work(di->monitor_wqueue, &di->monitor_work, HZ);
|
||||
mod_delayed_work(di->monitor_wqueue, &di->monitor_work, HZ);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -173,16 +173,14 @@ static void jz_battery_external_power_changed(struct power_supply *psy)
|
|||
{
|
||||
struct jz_battery *jz_battery = psy_to_jz_battery(psy);
|
||||
|
||||
cancel_delayed_work(&jz_battery->work);
|
||||
schedule_delayed_work(&jz_battery->work, 0);
|
||||
mod_delayed_work(system_wq, &jz_battery->work, 0);
|
||||
}
|
||||
|
||||
static irqreturn_t jz_battery_charge_irq(int irq, void *data)
|
||||
{
|
||||
struct jz_battery *jz_battery = data;
|
||||
|
||||
cancel_delayed_work(&jz_battery->work);
|
||||
schedule_delayed_work(&jz_battery->work, 0);
|
||||
mod_delayed_work(system_wq, &jz_battery->work, 0);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
|
|
@ -232,7 +232,7 @@ static int __devinit max17040_probe(struct i2c_client *client,
|
|||
max17040_reset(client);
|
||||
max17040_get_version(client);
|
||||
|
||||
INIT_DELAYED_WORK_DEFERRABLE(&chip->work, max17040_work);
|
||||
INIT_DEFERRABLE_WORK(&chip->work, max17040_work);
|
||||
schedule_delayed_work(&chip->work, MAX17040_DELAY);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -327,7 +327,7 @@ static struct gpio tosa_bat_gpios[] = {
|
|||
static int tosa_bat_suspend(struct platform_device *dev, pm_message_t state)
|
||||
{
|
||||
/* flush all pending status updates */
|
||||
flush_work_sync(&bat_work);
|
||||
flush_work(&bat_work);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -146,7 +146,7 @@ static irqreturn_t wm97xx_chrg_irq(int irq, void *data)
|
|||
#ifdef CONFIG_PM
|
||||
static int wm97xx_bat_suspend(struct device *dev)
|
||||
{
|
||||
flush_work_sync(&bat_work);
|
||||
flush_work(&bat_work);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -276,7 +276,7 @@ static int z2_batt_suspend(struct device *dev)
|
|||
struct i2c_client *client = to_i2c_client(dev);
|
||||
struct z2_charger *charger = i2c_get_clientdata(client);
|
||||
|
||||
flush_work_sync(&charger->bat_work);
|
||||
flush_work(&charger->bat_work);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -3476,7 +3476,7 @@ void regulator_unregister(struct regulator_dev *rdev)
|
|||
regulator_put(rdev->supply);
|
||||
mutex_lock(®ulator_list_mutex);
|
||||
debugfs_remove_recursive(rdev->debugfs);
|
||||
flush_work_sync(&rdev->disable_work.work);
|
||||
flush_work(&rdev->disable_work.work);
|
||||
WARN_ON(rdev->open_count);
|
||||
unset_regulator_supplies(rdev);
|
||||
list_del(&rdev->list);
|
||||
|
|
|
@ -999,7 +999,7 @@ static void arcmsr_remove(struct pci_dev *pdev)
|
|||
int poll_count = 0;
|
||||
arcmsr_free_sysfs_attr(acb);
|
||||
scsi_remove_host(host);
|
||||
flush_work_sync(&acb->arcmsr_do_message_isr_bh);
|
||||
flush_work(&acb->arcmsr_do_message_isr_bh);
|
||||
del_timer_sync(&acb->eternal_timer);
|
||||
arcmsr_disable_outbound_ints(acb);
|
||||
arcmsr_stop_adapter_bgrb(acb);
|
||||
|
@ -1045,7 +1045,7 @@ static void arcmsr_shutdown(struct pci_dev *pdev)
|
|||
(struct AdapterControlBlock *)host->hostdata;
|
||||
del_timer_sync(&acb->eternal_timer);
|
||||
arcmsr_disable_outbound_ints(acb);
|
||||
flush_work_sync(&acb->arcmsr_do_message_isr_bh);
|
||||
flush_work(&acb->arcmsr_do_message_isr_bh);
|
||||
arcmsr_stop_adapter_bgrb(acb);
|
||||
arcmsr_flush_adapter_cache(acb);
|
||||
}
|
||||
|
|
|
@ -9020,7 +9020,7 @@ static void __ipr_remove(struct pci_dev *pdev)
|
|||
|
||||
spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
|
||||
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
|
||||
flush_work_sync(&ioa_cfg->work_q);
|
||||
flush_work(&ioa_cfg->work_q);
|
||||
spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
|
||||
|
||||
spin_lock(&ipr_driver_lock);
|
||||
|
|
|
@ -5459,7 +5459,7 @@ static void __devexit pmcraid_remove(struct pci_dev *pdev)
|
|||
pmcraid_shutdown(pdev);
|
||||
|
||||
pmcraid_disable_interrupts(pinstance, ~0);
|
||||
flush_work_sync(&pinstance->worker_q);
|
||||
flush_work(&pinstance->worker_q);
|
||||
|
||||
pmcraid_kill_tasklets(pinstance);
|
||||
pmcraid_unregister_interrupt_handler(pinstance);
|
||||
|
|
|
@ -969,7 +969,7 @@ void qlt_stop_phase1(struct qla_tgt *tgt)
|
|||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
mutex_unlock(&ha->tgt.tgt_mutex);
|
||||
|
||||
flush_delayed_work_sync(&tgt->sess_del_work);
|
||||
flush_delayed_work(&tgt->sess_del_work);
|
||||
|
||||
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
|
||||
"Waiting for sess works (tgt %p)", tgt);
|
||||
|
|
|
@ -827,7 +827,7 @@ void gether_cleanup(void)
|
|||
return;
|
||||
|
||||
unregister_netdev(the_dev->net);
|
||||
flush_work_sync(&the_dev->work);
|
||||
flush_work(&the_dev->work);
|
||||
free_netdev(the_dev->net);
|
||||
|
||||
the_dev = NULL;
|
||||
|
|
|
@ -264,7 +264,7 @@ int nvec_write_async(struct nvec_chip *nvec, const unsigned char *data,
|
|||
list_add_tail(&msg->node, &nvec->tx_data);
|
||||
spin_unlock_irqrestore(&nvec->tx_lock, flags);
|
||||
|
||||
queue_work(system_nrt_wq, &nvec->tx_work);
|
||||
schedule_work(&nvec->tx_work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -471,7 +471,7 @@ static void nvec_rx_completed(struct nvec_chip *nvec)
|
|||
if (!nvec_msg_is_event(nvec->rx))
|
||||
complete(&nvec->ec_transfer);
|
||||
|
||||
queue_work(system_nrt_wq, &nvec->rx_work);
|
||||
schedule_work(&nvec->rx_work);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -694,17 +694,14 @@ thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
|
|||
static void thermal_zone_device_set_polling(struct thermal_zone_device *tz,
|
||||
int delay)
|
||||
{
|
||||
cancel_delayed_work(&(tz->poll_queue));
|
||||
|
||||
if (!delay)
|
||||
return;
|
||||
|
||||
if (delay > 1000)
|
||||
queue_delayed_work(system_freezable_wq, &(tz->poll_queue),
|
||||
round_jiffies(msecs_to_jiffies(delay)));
|
||||
mod_delayed_work(system_freezable_wq, &tz->poll_queue,
|
||||
round_jiffies(msecs_to_jiffies(delay)));
|
||||
else if (delay)
|
||||
mod_delayed_work(system_freezable_wq, &tz->poll_queue,
|
||||
msecs_to_jiffies(delay));
|
||||
else
|
||||
queue_delayed_work(system_freezable_wq, &(tz->poll_queue),
|
||||
msecs_to_jiffies(delay));
|
||||
cancel_delayed_work(&tz->poll_queue);
|
||||
}
|
||||
|
||||
static void thermal_zone_device_passive(struct thermal_zone_device *tz,
|
||||
|
|
|
@ -765,7 +765,7 @@ static void hvsi_flush_output(struct hvsi_struct *hp)
|
|||
|
||||
/* 'writer' could still be pending if it didn't see n_outbuf = 0 yet */
|
||||
cancel_delayed_work_sync(&hp->writer);
|
||||
flush_work_sync(&hp->handshaker);
|
||||
flush_work(&hp->handshaker);
|
||||
|
||||
/*
|
||||
* it's also possible that our timeout expired and hvsi_write_worker
|
||||
|
|
|
@ -1729,7 +1729,7 @@ void ipwireless_hardware_free(struct ipw_hardware *hw)
|
|||
|
||||
ipwireless_stop_interrupts(hw);
|
||||
|
||||
flush_work_sync(&hw->work_rx);
|
||||
flush_work(&hw->work_rx);
|
||||
|
||||
for (i = 0; i < NL_NUM_OF_ADDRESSES; i++)
|
||||
if (hw->packet_assembler[i] != NULL)
|
||||
|
|
|
@ -435,8 +435,8 @@ void ipwireless_network_free(struct ipw_network *network)
|
|||
network->shutting_down = 1;
|
||||
|
||||
ipwireless_ppp_close(network);
|
||||
flush_work_sync(&network->work_go_online);
|
||||
flush_work_sync(&network->work_go_offline);
|
||||
flush_work(&network->work_go_online);
|
||||
flush_work(&network->work_go_offline);
|
||||
|
||||
ipwireless_stop_interrupts(network->hardware);
|
||||
ipwireless_associate_network(network->hardware, NULL);
|
||||
|
|
|
@ -122,7 +122,7 @@ static void kgdboc_unregister_kbd(void)
|
|||
i--;
|
||||
}
|
||||
}
|
||||
flush_work_sync(&kgdboc_restore_input_work);
|
||||
flush_work(&kgdboc_restore_input_work);
|
||||
}
|
||||
#else /* ! CONFIG_KDB_KEYBOARD */
|
||||
#define kgdboc_register_kbd(x) 0
|
||||
|
|
|
@ -1227,7 +1227,7 @@ static int serial_omap_suspend(struct device *dev)
|
|||
struct uart_omap_port *up = dev_get_drvdata(dev);
|
||||
|
||||
uart_suspend_port(&serial_omap_reg, &up->port);
|
||||
flush_work_sync(&up->qos_work);
|
||||
flush_work(&up->qos_work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -523,9 +523,9 @@ static int tty_ldisc_halt(struct tty_struct *tty)
|
|||
*/
|
||||
static void tty_ldisc_flush_works(struct tty_struct *tty)
|
||||
{
|
||||
flush_work_sync(&tty->hangup_work);
|
||||
flush_work_sync(&tty->SAK_work);
|
||||
flush_work_sync(&tty->buf.work);
|
||||
flush_work(&tty->hangup_work);
|
||||
flush_work(&tty->SAK_work);
|
||||
flush_work(&tty->buf.work);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -718,7 +718,7 @@ static void speedtch_atm_stop(struct usbatm_data *usbatm, struct atm_dev *atm_de
|
|||
del_timer_sync(&instance->resubmit_timer);
|
||||
usb_free_urb(int_urb);
|
||||
|
||||
flush_work_sync(&instance->status_check_work);
|
||||
flush_work(&instance->status_check_work);
|
||||
}
|
||||
|
||||
static int speedtch_pre_reset(struct usb_interface *intf)
|
||||
|
|
|
@ -2262,7 +2262,7 @@ static void uea_stop(struct uea_softc *sc)
|
|||
usb_free_urb(sc->urb_int);
|
||||
|
||||
/* flush the work item, when no one can schedule it */
|
||||
flush_work_sync(&sc->task);
|
||||
flush_work(&sc->task);
|
||||
|
||||
release_firmware(sc->dsp_firm);
|
||||
uea_leaves(INS_TO_USBDEV(sc));
|
||||
|
|
|
@ -834,7 +834,7 @@ void gether_cleanup(void)
|
|||
return;
|
||||
|
||||
unregister_netdev(the_dev->net);
|
||||
flush_work_sync(&the_dev->work);
|
||||
flush_work(&the_dev->work);
|
||||
free_netdev(the_dev->net);
|
||||
|
||||
the_dev = NULL;
|
||||
|
|
|
@ -893,7 +893,7 @@ static void ohci_stop (struct usb_hcd *hcd)
|
|||
ohci_dump (ohci, 1);
|
||||
|
||||
if (quirk_nec(ohci))
|
||||
flush_work_sync(&ohci->nec_work);
|
||||
flush_work(&ohci->nec_work);
|
||||
|
||||
ohci_usb_reset (ohci);
|
||||
ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
|
||||
|
|
|
@ -1230,7 +1230,7 @@ static int __exit isp1301_remove(struct i2c_client *i2c)
|
|||
isp->timer.data = 0;
|
||||
set_bit(WORK_STOP, &isp->todo);
|
||||
del_timer_sync(&isp->timer);
|
||||
flush_work_sync(&isp->work);
|
||||
flush_work(&isp->work);
|
||||
|
||||
put_device(&i2c->dev);
|
||||
the_transceiver = NULL;
|
||||
|
|
|
@ -906,7 +906,7 @@ static int taal_probe(struct omap_dss_device *dssdev)
|
|||
r = -ENOMEM;
|
||||
goto err_wq;
|
||||
}
|
||||
INIT_DELAYED_WORK_DEFERRABLE(&td->esd_work, taal_esd_work);
|
||||
INIT_DEFERRABLE_WORK(&td->esd_work, taal_esd_work);
|
||||
INIT_DELAYED_WORK(&td->ulps_work, taal_ulps_work);
|
||||
|
||||
dev_set_drvdata(&dssdev->dev, td);
|
||||
|
@ -962,8 +962,8 @@ static int taal_probe(struct omap_dss_device *dssdev)
|
|||
goto err_irq;
|
||||
}
|
||||
|
||||
INIT_DELAYED_WORK_DEFERRABLE(&td->te_timeout_work,
|
||||
taal_te_timeout_work_callback);
|
||||
INIT_DEFERRABLE_WORK(&td->te_timeout_work,
|
||||
taal_te_timeout_work_callback);
|
||||
|
||||
dev_dbg(&dssdev->dev, "Using GPIO TE\n");
|
||||
}
|
||||
|
|
|
@ -4306,7 +4306,7 @@ static void dsi_framedone_irq_callback(void *data, u32 mask)
|
|||
* and is sending the data.
|
||||
*/
|
||||
|
||||
__cancel_delayed_work(&dsi->framedone_timeout_work);
|
||||
cancel_delayed_work(&dsi->framedone_timeout_work);
|
||||
|
||||
dsi_handle_framedone(dsidev, 0);
|
||||
}
|
||||
|
@ -4863,8 +4863,8 @@ static int __init omap_dsihw_probe(struct platform_device *dsidev)
|
|||
mutex_init(&dsi->lock);
|
||||
sema_init(&dsi->bus_lock, 1);
|
||||
|
||||
INIT_DELAYED_WORK_DEFERRABLE(&dsi->framedone_timeout_work,
|
||||
dsi_framedone_timeout_work_callback);
|
||||
INIT_DEFERRABLE_WORK(&dsi->framedone_timeout_work,
|
||||
dsi_framedone_timeout_work_callback);
|
||||
|
||||
#ifdef DSI_CATCH_MISSING_TE
|
||||
init_timer(&dsi->te_timer);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue