workqueue: consistently use int for @cpu variables

Workqueue is mixing unsigned int and int for @cpu variables.  There's
no point in using unsigned int for cpus - many of cpu related APIs
take int anyway.  Consistently use int for @cpu variables so that we
can use negative values to mark special ones.

This patch doesn't introduce any visible behavior changes.

Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
This commit is contained in:
Tejun Heo 2013-03-12 11:29:59 -07:00
parent 493a1724fe
commit d84ff0512f
3 changed files with 16 additions and 19 deletions

View File

@ -435,7 +435,7 @@ extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
extern void workqueue_set_max_active(struct workqueue_struct *wq, extern void workqueue_set_max_active(struct workqueue_struct *wq,
int max_active); int max_active);
extern bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq); extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
extern unsigned int work_busy(struct work_struct *work); extern unsigned int work_busy(struct work_struct *work);
/* /*
@ -466,12 +466,12 @@ static inline bool __deprecated flush_delayed_work_sync(struct delayed_work *dwo
} }
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
{ {
return fn(arg); return fn(arg);
} }
#else #else
long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg); long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
#ifdef CONFIG_FREEZER #ifdef CONFIG_FREEZER

View File

@ -124,7 +124,7 @@ enum {
struct worker_pool { struct worker_pool {
spinlock_t lock; /* the pool lock */ spinlock_t lock; /* the pool lock */
unsigned int cpu; /* I: the associated cpu */ int cpu; /* I: the associated cpu */
int id; /* I: pool ID */ int id; /* I: pool ID */
unsigned int flags; /* X: flags */ unsigned int flags; /* X: flags */
@ -467,8 +467,7 @@ static struct worker_pool *get_std_worker_pool(int cpu, bool highpri)
return &pools[highpri]; return &pools[highpri];
} }
static struct pool_workqueue *get_pwq(unsigned int cpu, static struct pool_workqueue *get_pwq(int cpu, struct workqueue_struct *wq)
struct workqueue_struct *wq)
{ {
if (!(wq->flags & WQ_UNBOUND)) { if (!(wq->flags & WQ_UNBOUND)) {
if (likely(cpu < nr_cpu_ids)) if (likely(cpu < nr_cpu_ids))
@ -730,7 +729,7 @@ static void wake_up_worker(struct worker_pool *pool)
* CONTEXT: * CONTEXT:
* spin_lock_irq(rq->lock) * spin_lock_irq(rq->lock)
*/ */
void wq_worker_waking_up(struct task_struct *task, unsigned int cpu) void wq_worker_waking_up(struct task_struct *task, int cpu)
{ {
struct worker *worker = kthread_data(task); struct worker *worker = kthread_data(task);
@ -755,8 +754,7 @@ void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
* RETURNS: * RETURNS:
* Worker task on @cpu to wake up, %NULL if none. * Worker task on @cpu to wake up, %NULL if none.
*/ */
struct task_struct *wq_worker_sleeping(struct task_struct *task, struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu)
unsigned int cpu)
{ {
struct worker *worker = kthread_data(task), *to_wakeup = NULL; struct worker *worker = kthread_data(task), *to_wakeup = NULL;
struct worker_pool *pool; struct worker_pool *pool;
@ -1159,7 +1157,7 @@ static bool is_chained_work(struct workqueue_struct *wq)
return worker && worker->current_pwq->wq == wq; return worker && worker->current_pwq->wq == wq;
} }
static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, static void __queue_work(int cpu, struct workqueue_struct *wq,
struct work_struct *work) struct work_struct *work)
{ {
struct pool_workqueue *pwq; struct pool_workqueue *pwq;
@ -1714,7 +1712,7 @@ static struct worker *create_worker(struct worker_pool *pool)
if (pool->cpu != WORK_CPU_UNBOUND) if (pool->cpu != WORK_CPU_UNBOUND)
worker->task = kthread_create_on_node(worker_thread, worker->task = kthread_create_on_node(worker_thread,
worker, cpu_to_node(pool->cpu), worker, cpu_to_node(pool->cpu),
"kworker/%u:%d%s", pool->cpu, id, pri); "kworker/%d:%d%s", pool->cpu, id, pri);
else else
worker->task = kthread_create(worker_thread, worker, worker->task = kthread_create(worker_thread, worker,
"kworker/u:%d%s", id, pri); "kworker/u:%d%s", id, pri);
@ -3345,7 +3343,7 @@ EXPORT_SYMBOL_GPL(workqueue_set_max_active);
* RETURNS: * RETURNS:
* %true if congested, %false otherwise. * %true if congested, %false otherwise.
*/ */
bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq) bool workqueue_congested(int cpu, struct workqueue_struct *wq)
{ {
struct pool_workqueue *pwq = get_pwq(cpu, wq); struct pool_workqueue *pwq = get_pwq(cpu, wq);
@ -3461,7 +3459,7 @@ static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb,
unsigned long action, unsigned long action,
void *hcpu) void *hcpu)
{ {
unsigned int cpu = (unsigned long)hcpu; int cpu = (unsigned long)hcpu;
struct worker_pool *pool; struct worker_pool *pool;
switch (action & ~CPU_TASKS_FROZEN) { switch (action & ~CPU_TASKS_FROZEN) {
@ -3507,7 +3505,7 @@ static int __cpuinit workqueue_cpu_down_callback(struct notifier_block *nfb,
unsigned long action, unsigned long action,
void *hcpu) void *hcpu)
{ {
unsigned int cpu = (unsigned long)hcpu; int cpu = (unsigned long)hcpu;
struct work_struct unbind_work; struct work_struct unbind_work;
switch (action & ~CPU_TASKS_FROZEN) { switch (action & ~CPU_TASKS_FROZEN) {
@ -3547,7 +3545,7 @@ static void work_for_cpu_fn(struct work_struct *work)
* It is up to the caller to ensure that the cpu doesn't go offline. * It is up to the caller to ensure that the cpu doesn't go offline.
* The caller must not hold any locks which would prevent @fn from completing. * The caller must not hold any locks which would prevent @fn from completing.
*/ */
long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
{ {
struct work_for_cpu wfc = { .fn = fn, .arg = arg }; struct work_for_cpu wfc = { .fn = fn, .arg = arg };
@ -3705,7 +3703,7 @@ out_unlock:
static int __init init_workqueues(void) static int __init init_workqueues(void)
{ {
unsigned int cpu; int cpu;
/* make sure we have enough bits for OFFQ pool ID */ /* make sure we have enough bits for OFFQ pool ID */
BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT)) < BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT)) <

View File

@ -59,8 +59,7 @@ static inline struct worker *current_wq_worker(void)
* Scheduler hooks for concurrency managed workqueue. Only to be used from * Scheduler hooks for concurrency managed workqueue. Only to be used from
* sched.c and workqueue.c. * sched.c and workqueue.c.
*/ */
void wq_worker_waking_up(struct task_struct *task, unsigned int cpu); void wq_worker_waking_up(struct task_struct *task, int cpu);
struct task_struct *wq_worker_sleeping(struct task_struct *task, struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu);
unsigned int cpu);
#endif /* _KERNEL_WORKQUEUE_INTERNAL_H */ #endif /* _KERNEL_WORKQUEUE_INTERNAL_H */