Merge branch 'for-3.19' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
Pull workqueue update from Tejun Heo: "Work items which may be involved in memory reclaim path may be executed by the rescuer under memory pressure. When a rescuer gets activated, it processes whatever are on the pending list and then goes back to sleep until the manager kicks it again which involves 100ms delay. This is problematic for self-requeueing work items or the ones running on ordered workqueues as there always is only one work item on the pending list when the rescuer kicks in. The execution of that work item produces more to execute but the rescuer won't see them until after the said 100ms has passed, so such workqueues would only execute one work item every 100ms under prolonged memory pressure, which BTW may be being prolonged due to the slow execution. Neil wrote up a patch which fixes this issue by keeping the rescuer working as long as the target workqueue is busy but doesn't have enough workers" * 'for-3.19' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq: workqueue: allow rescuer thread to do more work. workqueue: invert the order between pool->lock and wq_mayday_lock workqueue: cosmetic update in rescuer_thread()
This commit is contained in:
commit
0a27044c83
|
@ -1804,8 +1804,8 @@ static void pool_mayday_timeout(unsigned long __pool)
|
|||
struct worker_pool *pool = (void *)__pool;
|
||||
struct work_struct *work;
|
||||
|
||||
spin_lock_irq(&wq_mayday_lock); /* for wq->maydays */
|
||||
spin_lock(&pool->lock);
|
||||
spin_lock_irq(&pool->lock);
|
||||
spin_lock(&wq_mayday_lock); /* for wq->maydays */
|
||||
|
||||
if (need_to_create_worker(pool)) {
|
||||
/*
|
||||
|
@ -1818,8 +1818,8 @@ static void pool_mayday_timeout(unsigned long __pool)
|
|||
send_mayday(work);
|
||||
}
|
||||
|
||||
spin_unlock(&pool->lock);
|
||||
spin_unlock_irq(&wq_mayday_lock);
|
||||
spin_unlock(&wq_mayday_lock);
|
||||
spin_unlock_irq(&pool->lock);
|
||||
|
||||
mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
|
||||
}
|
||||
|
@ -2248,12 +2248,30 @@ repeat:
|
|||
* Slurp in all works issued via this workqueue and
|
||||
* process'em.
|
||||
*/
|
||||
WARN_ON_ONCE(!list_empty(&rescuer->scheduled));
|
||||
WARN_ON_ONCE(!list_empty(scheduled));
|
||||
list_for_each_entry_safe(work, n, &pool->worklist, entry)
|
||||
if (get_work_pwq(work) == pwq)
|
||||
move_linked_works(work, scheduled, &n);
|
||||
|
||||
process_scheduled_works(rescuer);
|
||||
if (!list_empty(scheduled)) {
|
||||
process_scheduled_works(rescuer);
|
||||
|
||||
/*
|
||||
* The above execution of rescued work items could
|
||||
* have created more to rescue through
|
||||
* pwq_activate_first_delayed() or chained
|
||||
* queueing. Let's put @pwq back on mayday list so
|
||||
* that such back-to-back work items, which may be
|
||||
* being used to relieve memory pressure, don't
|
||||
* incur MAYDAY_INTERVAL delay inbetween.
|
||||
*/
|
||||
if (need_to_create_worker(pool)) {
|
||||
spin_lock(&wq_mayday_lock);
|
||||
get_pwq(pwq);
|
||||
list_move_tail(&pwq->mayday_node, &wq->maydays);
|
||||
spin_unlock(&wq_mayday_lock);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Put the reference grabbed by send_mayday(). @pool won't
|
||||
|
|
Loading…
Reference in New Issue