workqueue: add wokrqueue_struct->maydays list to replace mayday cpu iterators
Similar to how pool_workqueue iteration used to be, raising and servicing mayday requests is based on CPU numbers. It's hairy because cpumask_t may not be able to handle WORK_CPU_UNBOUND and cpumasks are assumed to be always set on UP. This is ugly and can't handle multiple unbound pools to be added for unbound workqueues w/ custom attributes. Add workqueue_struct->maydays. When a pool_workqueue needs rescuing, it gets chained on the list through pool_workqueue->mayday_node and rescuer_thread() consumes the list until it's empty. This patch doesn't introduce any visible behavior changes. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
This commit is contained in:
parent
24b8a84718
commit
493a1724fe
|
@ -170,6 +170,7 @@ struct pool_workqueue {
|
||||||
int max_active; /* L: max active works */
|
int max_active; /* L: max active works */
|
||||||
struct list_head delayed_works; /* L: delayed works */
|
struct list_head delayed_works; /* L: delayed works */
|
||||||
struct list_head pwqs_node; /* I: node on wq->pwqs */
|
struct list_head pwqs_node; /* I: node on wq->pwqs */
|
||||||
|
struct list_head mayday_node; /* W: node on wq->maydays */
|
||||||
} __aligned(1 << WORK_STRUCT_FLAG_BITS);
|
} __aligned(1 << WORK_STRUCT_FLAG_BITS);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -181,27 +182,6 @@ struct wq_flusher {
|
||||||
struct completion done; /* flush completion */
|
struct completion done; /* flush completion */
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
|
||||||
* All cpumasks are assumed to be always set on UP and thus can't be
|
|
||||||
* used to determine whether there's something to be done.
|
|
||||||
*/
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
typedef cpumask_var_t mayday_mask_t;
|
|
||||||
#define mayday_test_and_set_cpu(cpu, mask) \
|
|
||||||
cpumask_test_and_set_cpu((cpu), (mask))
|
|
||||||
#define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask))
|
|
||||||
#define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask))
|
|
||||||
#define alloc_mayday_mask(maskp, gfp) zalloc_cpumask_var((maskp), (gfp))
|
|
||||||
#define free_mayday_mask(mask) free_cpumask_var((mask))
|
|
||||||
#else
|
|
||||||
typedef unsigned long mayday_mask_t;
|
|
||||||
#define mayday_test_and_set_cpu(cpu, mask) test_and_set_bit(0, &(mask))
|
|
||||||
#define mayday_clear_cpu(cpu, mask) clear_bit(0, &(mask))
|
|
||||||
#define for_each_mayday_cpu(cpu, mask) if ((cpu) = 0, (mask))
|
|
||||||
#define alloc_mayday_mask(maskp, gfp) true
|
|
||||||
#define free_mayday_mask(mask) do { } while (0)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The externally visible workqueue abstraction is an array of
|
* The externally visible workqueue abstraction is an array of
|
||||||
* per-CPU workqueues:
|
* per-CPU workqueues:
|
||||||
|
@ -224,7 +204,7 @@ struct workqueue_struct {
|
||||||
struct list_head flusher_queue; /* F: flush waiters */
|
struct list_head flusher_queue; /* F: flush waiters */
|
||||||
struct list_head flusher_overflow; /* F: flush overflow list */
|
struct list_head flusher_overflow; /* F: flush overflow list */
|
||||||
|
|
||||||
mayday_mask_t mayday_mask; /* cpus requesting rescue */
|
struct list_head maydays; /* W: pwqs requesting rescue */
|
||||||
struct worker *rescuer; /* I: rescue worker */
|
struct worker *rescuer; /* I: rescue worker */
|
||||||
|
|
||||||
int nr_drainers; /* W: drain in progress */
|
int nr_drainers; /* W: drain in progress */
|
||||||
|
@ -1850,23 +1830,21 @@ static void idle_worker_timeout(unsigned long __pool)
|
||||||
spin_unlock_irq(&pool->lock);
|
spin_unlock_irq(&pool->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool send_mayday(struct work_struct *work)
|
static void send_mayday(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct pool_workqueue *pwq = get_work_pwq(work);
|
struct pool_workqueue *pwq = get_work_pwq(work);
|
||||||
struct workqueue_struct *wq = pwq->wq;
|
struct workqueue_struct *wq = pwq->wq;
|
||||||
unsigned int cpu;
|
|
||||||
|
lockdep_assert_held(&workqueue_lock);
|
||||||
|
|
||||||
if (!(wq->flags & WQ_RESCUER))
|
if (!(wq->flags & WQ_RESCUER))
|
||||||
return false;
|
return;
|
||||||
|
|
||||||
/* mayday mayday mayday */
|
/* mayday mayday mayday */
|
||||||
cpu = pwq->pool->cpu;
|
if (list_empty(&pwq->mayday_node)) {
|
||||||
/* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */
|
list_add_tail(&pwq->mayday_node, &wq->maydays);
|
||||||
if (cpu == WORK_CPU_UNBOUND)
|
|
||||||
cpu = 0;
|
|
||||||
if (!mayday_test_and_set_cpu(cpu, wq->mayday_mask))
|
|
||||||
wake_up_process(wq->rescuer->task);
|
wake_up_process(wq->rescuer->task);
|
||||||
return true;
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pool_mayday_timeout(unsigned long __pool)
|
static void pool_mayday_timeout(unsigned long __pool)
|
||||||
|
@ -1874,7 +1852,8 @@ static void pool_mayday_timeout(unsigned long __pool)
|
||||||
struct worker_pool *pool = (void *)__pool;
|
struct worker_pool *pool = (void *)__pool;
|
||||||
struct work_struct *work;
|
struct work_struct *work;
|
||||||
|
|
||||||
spin_lock_irq(&pool->lock);
|
spin_lock_irq(&workqueue_lock); /* for wq->maydays */
|
||||||
|
spin_lock(&pool->lock);
|
||||||
|
|
||||||
if (need_to_create_worker(pool)) {
|
if (need_to_create_worker(pool)) {
|
||||||
/*
|
/*
|
||||||
|
@ -1887,7 +1866,8 @@ static void pool_mayday_timeout(unsigned long __pool)
|
||||||
send_mayday(work);
|
send_mayday(work);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irq(&pool->lock);
|
spin_unlock(&pool->lock);
|
||||||
|
spin_unlock_irq(&workqueue_lock);
|
||||||
|
|
||||||
mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
|
mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
|
||||||
}
|
}
|
||||||
|
@ -2336,8 +2316,6 @@ static int rescuer_thread(void *__rescuer)
|
||||||
struct worker *rescuer = __rescuer;
|
struct worker *rescuer = __rescuer;
|
||||||
struct workqueue_struct *wq = rescuer->rescue_wq;
|
struct workqueue_struct *wq = rescuer->rescue_wq;
|
||||||
struct list_head *scheduled = &rescuer->scheduled;
|
struct list_head *scheduled = &rescuer->scheduled;
|
||||||
bool is_unbound = wq->flags & WQ_UNBOUND;
|
|
||||||
unsigned int cpu;
|
|
||||||
|
|
||||||
set_user_nice(current, RESCUER_NICE_LEVEL);
|
set_user_nice(current, RESCUER_NICE_LEVEL);
|
||||||
|
|
||||||
|
@ -2355,18 +2333,19 @@ repeat:
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/* see whether any pwq is asking for help */
|
||||||
* See whether any cpu is asking for help. Unbounded
|
spin_lock_irq(&workqueue_lock);
|
||||||
* workqueues use cpu 0 in mayday_mask for CPU_UNBOUND.
|
|
||||||
*/
|
while (!list_empty(&wq->maydays)) {
|
||||||
for_each_mayday_cpu(cpu, wq->mayday_mask) {
|
struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
|
||||||
unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu;
|
struct pool_workqueue, mayday_node);
|
||||||
struct pool_workqueue *pwq = get_pwq(tcpu, wq);
|
|
||||||
struct worker_pool *pool = pwq->pool;
|
struct worker_pool *pool = pwq->pool;
|
||||||
struct work_struct *work, *n;
|
struct work_struct *work, *n;
|
||||||
|
|
||||||
__set_current_state(TASK_RUNNING);
|
__set_current_state(TASK_RUNNING);
|
||||||
mayday_clear_cpu(cpu, wq->mayday_mask);
|
list_del_init(&pwq->mayday_node);
|
||||||
|
|
||||||
|
spin_unlock_irq(&workqueue_lock);
|
||||||
|
|
||||||
/* migrate to the target cpu if possible */
|
/* migrate to the target cpu if possible */
|
||||||
worker_maybe_bind_and_lock(pool);
|
worker_maybe_bind_and_lock(pool);
|
||||||
|
@ -2392,9 +2371,12 @@ repeat:
|
||||||
wake_up_worker(pool);
|
wake_up_worker(pool);
|
||||||
|
|
||||||
rescuer->pool = NULL;
|
rescuer->pool = NULL;
|
||||||
spin_unlock_irq(&pool->lock);
|
spin_unlock(&pool->lock);
|
||||||
|
spin_lock(&workqueue_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
spin_unlock_irq(&workqueue_lock);
|
||||||
|
|
||||||
/* rescuers should never participate in concurrency management */
|
/* rescuers should never participate in concurrency management */
|
||||||
WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
|
WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
|
||||||
schedule();
|
schedule();
|
||||||
|
@ -3192,6 +3174,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
|
||||||
INIT_LIST_HEAD(&wq->pwqs);
|
INIT_LIST_HEAD(&wq->pwqs);
|
||||||
INIT_LIST_HEAD(&wq->flusher_queue);
|
INIT_LIST_HEAD(&wq->flusher_queue);
|
||||||
INIT_LIST_HEAD(&wq->flusher_overflow);
|
INIT_LIST_HEAD(&wq->flusher_overflow);
|
||||||
|
INIT_LIST_HEAD(&wq->maydays);
|
||||||
|
|
||||||
lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
|
lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
|
||||||
INIT_LIST_HEAD(&wq->list);
|
INIT_LIST_HEAD(&wq->list);
|
||||||
|
@ -3205,14 +3188,12 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
|
||||||
pwq->flush_color = -1;
|
pwq->flush_color = -1;
|
||||||
pwq->max_active = max_active;
|
pwq->max_active = max_active;
|
||||||
INIT_LIST_HEAD(&pwq->delayed_works);
|
INIT_LIST_HEAD(&pwq->delayed_works);
|
||||||
|
INIT_LIST_HEAD(&pwq->mayday_node);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (flags & WQ_RESCUER) {
|
if (flags & WQ_RESCUER) {
|
||||||
struct worker *rescuer;
|
struct worker *rescuer;
|
||||||
|
|
||||||
if (!alloc_mayday_mask(&wq->mayday_mask, GFP_KERNEL))
|
|
||||||
goto err;
|
|
||||||
|
|
||||||
wq->rescuer = rescuer = alloc_worker();
|
wq->rescuer = rescuer = alloc_worker();
|
||||||
if (!rescuer)
|
if (!rescuer)
|
||||||
goto err;
|
goto err;
|
||||||
|
@ -3246,7 +3227,6 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
|
||||||
err:
|
err:
|
||||||
if (wq) {
|
if (wq) {
|
||||||
free_pwqs(wq);
|
free_pwqs(wq);
|
||||||
free_mayday_mask(wq->mayday_mask);
|
|
||||||
kfree(wq->rescuer);
|
kfree(wq->rescuer);
|
||||||
kfree(wq);
|
kfree(wq);
|
||||||
}
|
}
|
||||||
|
@ -3289,7 +3269,6 @@ void destroy_workqueue(struct workqueue_struct *wq)
|
||||||
|
|
||||||
if (wq->flags & WQ_RESCUER) {
|
if (wq->flags & WQ_RESCUER) {
|
||||||
kthread_stop(wq->rescuer->task);
|
kthread_stop(wq->rescuer->task);
|
||||||
free_mayday_mask(wq->mayday_mask);
|
|
||||||
kfree(wq->rescuer);
|
kfree(wq->rescuer);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue