workqueue: factor out start_flush_work()

Factor out start_flush_work() from flush_work().  start_flush_work()
has @wait_executing argument which controls whether the barrier is
queued only if the work is pending or also if executing.  As
flush_work() needs to wait for execution too, it uses %true.

This commit doesn't cause any behavior difference.  start_flush_work()
will be used to implement flush_work_sync().

Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
Tejun Heo 2010-09-16 10:42:16 +02:00
parent 401a8d048e
commit baf59022c3
1 changed files with 48 additions and 38 deletions

View File

@ -2326,6 +2326,48 @@ out_unlock:
} }
EXPORT_SYMBOL_GPL(flush_workqueue); EXPORT_SYMBOL_GPL(flush_workqueue);
static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
bool wait_executing)
{
struct worker *worker = NULL;
struct global_cwq *gcwq;
struct cpu_workqueue_struct *cwq;
might_sleep();
gcwq = get_work_gcwq(work);
if (!gcwq)
return false;
spin_lock_irq(&gcwq->lock);
if (!list_empty(&work->entry)) {
/*
* See the comment near try_to_grab_pending()->smp_rmb().
* If it was re-queued to a different gcwq under us, we
* are not going to wait.
*/
smp_rmb();
cwq = get_work_cwq(work);
if (unlikely(!cwq || gcwq != cwq->gcwq))
goto already_gone;
} else if (wait_executing) {
worker = find_worker_executing_work(gcwq, work);
if (!worker)
goto already_gone;
cwq = worker->current_cwq;
} else
goto already_gone;
insert_wq_barrier(cwq, barr, work, worker);
spin_unlock_irq(&gcwq->lock);
lock_map_acquire(&cwq->wq->lockdep_map);
lock_map_release(&cwq->wq->lockdep_map);
return true;
already_gone:
spin_unlock_irq(&gcwq->lock);
return false;
}
/** /**
* flush_work - wait for a work to finish executing the last queueing instance * flush_work - wait for a work to finish executing the last queueing instance
* @work: the work to flush * @work: the work to flush
@ -2346,46 +2388,14 @@ EXPORT_SYMBOL_GPL(flush_workqueue);
*/ */
bool flush_work(struct work_struct *work) bool flush_work(struct work_struct *work)
{ {
struct worker *worker = NULL;
struct global_cwq *gcwq;
struct cpu_workqueue_struct *cwq;
struct wq_barrier barr; struct wq_barrier barr;
might_sleep(); if (start_flush_work(work, &barr, true)) {
gcwq = get_work_gcwq(work); wait_for_completion(&barr.done);
if (!gcwq) destroy_work_on_stack(&barr.work);
return 0; return true;
} else
spin_lock_irq(&gcwq->lock); return false;
if (!list_empty(&work->entry)) {
/*
* See the comment near try_to_grab_pending()->smp_rmb().
* If it was re-queued to a different gcwq under us, we
* are not going to wait.
*/
smp_rmb();
cwq = get_work_cwq(work);
if (unlikely(!cwq || gcwq != cwq->gcwq))
goto already_gone;
} else {
worker = find_worker_executing_work(gcwq, work);
if (!worker)
goto already_gone;
cwq = worker->current_cwq;
}
insert_wq_barrier(cwq, &barr, work, worker);
spin_unlock_irq(&gcwq->lock);
lock_map_acquire(&cwq->wq->lockdep_map);
lock_map_release(&cwq->wq->lockdep_map);
wait_for_completion(&barr.done);
destroy_work_on_stack(&barr.work);
return true;
already_gone:
spin_unlock_irq(&gcwq->lock);
return false;
} }
EXPORT_SYMBOL_GPL(flush_work); EXPORT_SYMBOL_GPL(flush_work);