workqueue: move struct worker definition to workqueue_internal.h
This will be used to implement an inline function to query whether %current is a workqueue worker and, if so, allow determining which work item it's executing. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
ea138446e5
commit
2eaebdb33e
|
@ -122,37 +122,7 @@ enum {
|
|||
* W: workqueue_lock protected.
|
||||
*/
|
||||
|
||||
struct global_cwq;
|
||||
struct worker_pool;
|
||||
|
||||
/*
|
||||
* The poor guys doing the actual heavy lifting. All on-duty workers
|
||||
* are either serving the manager role, on idle list or on busy hash.
|
||||
*/
|
||||
struct worker {
|
||||
/* on idle list while idle, on busy hash table while busy */
|
||||
union {
|
||||
struct list_head entry; /* L: while idle */
|
||||
struct hlist_node hentry; /* L: while busy */
|
||||
};
|
||||
|
||||
struct work_struct *current_work; /* L: work being processed */
|
||||
work_func_t current_func; /* L: current_work's fn */
|
||||
struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
|
||||
struct list_head scheduled; /* L: scheduled works */
|
||||
struct task_struct *task; /* I: worker task */
|
||||
struct worker_pool *pool; /* I: the associated pool */
|
||||
/* 64 bytes boundary on 64bit, 32 on 32bit */
|
||||
unsigned long last_active; /* L: last active timestamp */
|
||||
unsigned int flags; /* X: flags */
|
||||
int id; /* I: worker id */
|
||||
|
||||
/* for rebinding worker to CPU */
|
||||
struct work_struct rebind_work; /* L: for busy worker */
|
||||
|
||||
/* used only by rescuers to point to the target workqueue */
|
||||
struct workqueue_struct *rescue_wq; /* I: the workqueue to rescue */
|
||||
};
|
||||
/* struct worker is defined in workqueue_internal.h */
|
||||
|
||||
struct worker_pool {
|
||||
struct global_cwq *gcwq; /* I: the owning gcwq */
|
||||
|
|
|
@ -7,6 +7,43 @@
|
|||
#ifndef _KERNEL_WORKQUEUE_INTERNAL_H
|
||||
#define _KERNEL_WORKQUEUE_INTERNAL_H
|
||||
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
struct global_cwq;
|
||||
struct worker_pool;
|
||||
|
||||
/*
|
||||
* The poor guys doing the actual heavy lifting. All on-duty workers are
|
||||
* either serving the manager role, on idle list or on busy hash. For
|
||||
* details on the locking annotation (L, I, X...), refer to workqueue.c.
|
||||
*
|
||||
* Only to be used in workqueue and async.
|
||||
*/
|
||||
struct worker {
|
||||
/* on idle list while idle, on busy hash table while busy */
|
||||
union {
|
||||
struct list_head entry; /* L: while idle */
|
||||
struct hlist_node hentry; /* L: while busy */
|
||||
};
|
||||
|
||||
struct work_struct *current_work; /* L: work being processed */
|
||||
work_func_t current_func; /* L: current_work's fn */
|
||||
struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
|
||||
struct list_head scheduled; /* L: scheduled works */
|
||||
struct task_struct *task; /* I: worker task */
|
||||
struct worker_pool *pool; /* I: the associated pool */
|
||||
/* 64 bytes boundary on 64bit, 32 on 32bit */
|
||||
unsigned long last_active; /* L: last active timestamp */
|
||||
unsigned int flags; /* X: flags */
|
||||
int id; /* I: worker id */
|
||||
|
||||
/* for rebinding worker to CPU */
|
||||
struct work_struct rebind_work; /* L: for busy worker */
|
||||
|
||||
/* used only by rescuers to point to the target workqueue */
|
||||
struct workqueue_struct *rescue_wq; /* I: the workqueue to rescue */
|
||||
};
|
||||
|
||||
/*
|
||||
* Scheduler hooks for concurrency managed workqueue. Only to be used from
|
||||
* sched.c and workqueue.c.
|
||||
|
|
Loading…
Reference in New Issue