2005-04-17 06:20:36 +08:00
|
|
|
#ifndef _LINUX_KTHREAD_H
|
|
|
|
#define _LINUX_KTHREAD_H
|
|
|
|
/* Simple interface for creating and stopping kernel threads without mess. */
|
|
|
|
#include <linux/err.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
|
2011-11-01 08:11:33 +08:00
|
|
|
__printf(4, 5)
|
2011-03-23 07:30:44 +08:00
|
|
|
struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
|
|
|
|
void *data,
|
|
|
|
int node,
|
2011-11-01 08:11:33 +08:00
|
|
|
const char namefmt[], ...);
|
2011-03-23 07:30:44 +08:00
|
|
|
|
2016-10-12 04:55:53 +08:00
|
|
|
/**
|
|
|
|
* kthread_create - create a kthread on the current node
|
|
|
|
* @threadfn: the function to run in the thread
|
|
|
|
* @data: data pointer for @threadfn()
|
|
|
|
* @namefmt: printf-style format string for the thread name
|
2017-08-03 04:32:01 +08:00
|
|
|
* @arg...: arguments for @namefmt.
|
2016-10-12 04:55:53 +08:00
|
|
|
*
|
|
|
|
* This macro will create a kthread on the current node, leaving it in
|
|
|
|
* the stopped state. This is just a helper for kthread_create_on_node();
|
|
|
|
* see the documentation there for more details.
|
|
|
|
*/
|
2011-03-23 07:30:44 +08:00
|
|
|
#define kthread_create(threadfn, data, namefmt, arg...) \
|
2015-09-05 06:42:42 +08:00
|
|
|
kthread_create_on_node(threadfn, data, NUMA_NO_NODE, namefmt, ##arg)
|
2011-03-23 07:30:44 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-07-16 18:42:36 +08:00
|
|
|
struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
|
|
|
|
void *data,
|
|
|
|
unsigned int cpu,
|
|
|
|
const char *namefmt);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/**
|
2006-06-25 20:49:19 +08:00
|
|
|
* kthread_run - create and wake a thread.
|
2005-04-17 06:20:36 +08:00
|
|
|
* @threadfn: the function to run until signal_pending(current).
|
|
|
|
* @data: data ptr for @threadfn.
|
|
|
|
* @namefmt: printf-style name for the thread.
|
|
|
|
*
|
|
|
|
* Description: Convenient wrapper for kthread_create() followed by
|
2006-06-25 20:49:19 +08:00
|
|
|
* wake_up_process(). Returns the kthread or ERR_PTR(-ENOMEM).
|
|
|
|
*/
|
2005-04-17 06:20:36 +08:00
|
|
|
#define kthread_run(threadfn, data, namefmt, ...) \
|
|
|
|
({ \
|
|
|
|
struct task_struct *__k \
|
|
|
|
= kthread_create(threadfn, data, namefmt, ## __VA_ARGS__); \
|
|
|
|
if (!IS_ERR(__k)) \
|
|
|
|
wake_up_process(__k); \
|
|
|
|
__k; \
|
|
|
|
})
|
|
|
|
|
2016-11-30 01:50:57 +08:00
|
|
|
void free_kthread_struct(struct task_struct *k);
|
2005-04-17 06:20:36 +08:00
|
|
|
void kthread_bind(struct task_struct *k, unsigned int cpu);
|
2015-05-15 23:43:34 +08:00
|
|
|
void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask);
|
2005-04-17 06:20:36 +08:00
|
|
|
int kthread_stop(struct task_struct *k);
|
2012-07-16 18:42:36 +08:00
|
|
|
bool kthread_should_stop(void);
|
|
|
|
bool kthread_should_park(void);
|
2011-11-22 04:32:23 +08:00
|
|
|
bool kthread_freezable_should_stop(bool *was_frozen);
|
2010-06-29 16:07:09 +08:00
|
|
|
void *kthread_data(struct task_struct *k);
|
2016-10-12 04:55:17 +08:00
|
|
|
void *kthread_probe_data(struct task_struct *k);
|
2012-07-16 18:42:36 +08:00
|
|
|
int kthread_park(struct task_struct *k);
|
|
|
|
void kthread_unpark(struct task_struct *k);
|
|
|
|
void kthread_parkme(void);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-05-09 17:34:32 +08:00
|
|
|
int kthreadd(void *unused);
|
|
|
|
extern struct task_struct *kthreadd_task;
|
2011-03-23 07:30:44 +08:00
|
|
|
extern int tsk_fork_get_node(struct task_struct *tsk);
|
2007-05-09 17:34:32 +08:00
|
|
|
|
2010-06-29 16:07:09 +08:00
|
|
|
/*
|
|
|
|
* Simple work processor based on kthread.
|
|
|
|
*
|
|
|
|
* This provides easier way to make use of kthreads. A kthread_work
|
2016-10-12 04:55:20 +08:00
|
|
|
* can be queued and flushed using queue/kthread_flush_work()
|
2010-06-29 16:07:09 +08:00
|
|
|
* respectively. Queued kthread_works are processed by a kthread
|
|
|
|
* running kthread_worker_fn().
|
|
|
|
*/
|
|
|
|
struct kthread_work;
|
|
|
|
typedef void (*kthread_work_func_t)(struct kthread_work *work);
|
2016-10-12 04:55:40 +08:00
|
|
|
void kthread_delayed_work_timer_fn(unsigned long __data);
|
2010-06-29 16:07:09 +08:00
|
|
|
|
2016-10-12 04:55:50 +08:00
|
|
|
enum {
|
|
|
|
KTW_FREEZABLE = 1 << 0, /* freeze during suspend */
|
|
|
|
};
|
|
|
|
|
2010-06-29 16:07:09 +08:00
|
|
|
struct kthread_worker {
|
2016-10-12 04:55:50 +08:00
|
|
|
unsigned int flags;
|
2010-06-29 16:07:09 +08:00
|
|
|
spinlock_t lock;
|
|
|
|
struct list_head work_list;
|
2016-10-12 04:55:40 +08:00
|
|
|
struct list_head delayed_work_list;
|
2010-06-29 16:07:09 +08:00
|
|
|
struct task_struct *task;
|
2012-07-20 04:52:53 +08:00
|
|
|
struct kthread_work *current_work;
|
2010-06-29 16:07:09 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct kthread_work {
|
|
|
|
struct list_head node;
|
|
|
|
kthread_work_func_t func;
|
2012-07-20 04:52:53 +08:00
|
|
|
struct kthread_worker *worker;
|
kthread: allow to cancel kthread work
We are going to use kthread workers more widely and sometimes we will need
to make sure that the work is neither pending nor running.
This patch implements cancel_*_sync() operations as inspired by
workqueues. Well, we are synchronized against the other operations via
the worker lock, we use del_timer_sync() and a counter to count parallel
cancel operations. Therefore the implementation might be easier.
First, we check if a worker is assigned. If not, the work has newer been
queued after it was initialized.
Second, we take the worker lock. It must be the right one. The work must
not be assigned to another worker unless it is initialized in between.
Third, we try to cancel the timer when it exists. The timer is deleted
synchronously to make sure that the timer call back is not running. We
need to temporary release the worker->lock to avoid a possible deadlock
with the callback. In the meantime, we set work->canceling counter to
avoid any queuing.
Fourth, we try to remove the work from a worker list. It might be
the list of either normal or delayed works.
Fifth, if the work is running, we call kthread_flush_work(). It might
take an arbitrary time. We need to release the worker-lock again. In the
meantime, we again block any queuing by the canceling counter.
As already mentioned, the check for a pending kthread work is done under a
lock. In compare with workqueues, we do not need to fight for a single
PENDING bit to block other operations. Therefore we do not suffer from
the thundering storm problem and all parallel canceling jobs might use
kthread_flush_work(). Any queuing is blocked until the counter gets zero.
Link: http://lkml.kernel.org/r/1470754545-17632-10-git-send-email-pmladek@suse.com
Signed-off-by: Petr Mladek <pmladek@suse.com>
Acked-by: Tejun Heo <tj@kernel.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: Borislav Petkov <bp@suse.de>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-10-12 04:55:43 +08:00
|
|
|
/* Number of canceling calls that are running at the moment. */
|
|
|
|
int canceling;
|
2010-06-29 16:07:09 +08:00
|
|
|
};
|
|
|
|
|
2016-10-12 04:55:40 +08:00
|
|
|
struct kthread_delayed_work {
|
|
|
|
struct kthread_work work;
|
|
|
|
struct timer_list timer;
|
|
|
|
};
|
|
|
|
|
2010-06-29 16:07:09 +08:00
|
|
|
#define KTHREAD_WORKER_INIT(worker) { \
|
2011-01-23 22:24:55 +08:00
|
|
|
.lock = __SPIN_LOCK_UNLOCKED((worker).lock), \
|
2010-06-29 16:07:09 +08:00
|
|
|
.work_list = LIST_HEAD_INIT((worker).work_list), \
|
2016-10-12 04:55:40 +08:00
|
|
|
.delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\
|
2010-06-29 16:07:09 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#define KTHREAD_WORK_INIT(work, fn) { \
|
|
|
|
.node = LIST_HEAD_INIT((work).node), \
|
|
|
|
.func = (fn), \
|
|
|
|
}
|
|
|
|
|
2016-10-12 04:55:40 +08:00
|
|
|
#define KTHREAD_DELAYED_WORK_INIT(dwork, fn) { \
|
|
|
|
.work = KTHREAD_WORK_INIT((dwork).work, (fn)), \
|
|
|
|
.timer = __TIMER_INITIALIZER(kthread_delayed_work_timer_fn, \
|
2017-10-05 07:27:05 +08:00
|
|
|
(unsigned long)&(dwork), \
|
2016-10-12 04:55:40 +08:00
|
|
|
TIMER_IRQSAFE), \
|
|
|
|
}
|
|
|
|
|
2010-06-29 16:07:09 +08:00
|
|
|
#define DEFINE_KTHREAD_WORKER(worker) \
|
|
|
|
struct kthread_worker worker = KTHREAD_WORKER_INIT(worker)
|
|
|
|
|
|
|
|
#define DEFINE_KTHREAD_WORK(work, fn) \
|
|
|
|
struct kthread_work work = KTHREAD_WORK_INIT(work, fn)
|
|
|
|
|
2016-10-12 04:55:40 +08:00
|
|
|
#define DEFINE_KTHREAD_DELAYED_WORK(dwork, fn) \
|
|
|
|
struct kthread_delayed_work dwork = \
|
|
|
|
KTHREAD_DELAYED_WORK_INIT(dwork, fn)
|
|
|
|
|
2010-12-22 17:27:53 +08:00
|
|
|
/*
|
2014-07-26 12:04:00 +08:00
|
|
|
* kthread_worker.lock needs its own lockdep class key when defined on
|
|
|
|
* stack with lockdep enabled. Use the following macros in such cases.
|
2010-12-22 17:27:53 +08:00
|
|
|
*/
|
|
|
|
#ifdef CONFIG_LOCKDEP
|
|
|
|
# define KTHREAD_WORKER_INIT_ONSTACK(worker) \
|
2016-10-12 04:55:20 +08:00
|
|
|
({ kthread_init_worker(&worker); worker; })
|
2010-12-22 17:27:53 +08:00
|
|
|
# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) \
|
|
|
|
struct kthread_worker worker = KTHREAD_WORKER_INIT_ONSTACK(worker)
|
|
|
|
#else
|
|
|
|
# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) DEFINE_KTHREAD_WORKER(worker)
|
|
|
|
#endif
|
|
|
|
|
2016-10-12 04:55:20 +08:00
|
|
|
extern void __kthread_init_worker(struct kthread_worker *worker,
|
2010-12-22 17:27:53 +08:00
|
|
|
const char *name, struct lock_class_key *key);
|
|
|
|
|
2016-10-12 04:55:20 +08:00
|
|
|
#define kthread_init_worker(worker) \
|
2010-12-22 17:27:53 +08:00
|
|
|
do { \
|
|
|
|
static struct lock_class_key __key; \
|
2016-10-12 04:55:20 +08:00
|
|
|
__kthread_init_worker((worker), "("#worker")->lock", &__key); \
|
2010-12-22 17:27:53 +08:00
|
|
|
} while (0)
|
|
|
|
|
2016-10-12 04:55:20 +08:00
|
|
|
#define kthread_init_work(work, fn) \
|
2010-12-22 17:27:53 +08:00
|
|
|
do { \
|
|
|
|
memset((work), 0, sizeof(struct kthread_work)); \
|
|
|
|
INIT_LIST_HEAD(&(work)->node); \
|
|
|
|
(work)->func = (fn); \
|
|
|
|
} while (0)
|
2010-06-29 16:07:09 +08:00
|
|
|
|
2016-10-12 04:55:40 +08:00
|
|
|
#define kthread_init_delayed_work(dwork, fn) \
|
|
|
|
do { \
|
|
|
|
kthread_init_work(&(dwork)->work, (fn)); \
|
|
|
|
__setup_timer(&(dwork)->timer, \
|
|
|
|
kthread_delayed_work_timer_fn, \
|
|
|
|
(unsigned long)(dwork), \
|
|
|
|
TIMER_IRQSAFE); \
|
|
|
|
} while (0)
|
|
|
|
|
2010-06-29 16:07:09 +08:00
|
|
|
int kthread_worker_fn(void *worker_ptr);
|
|
|
|
|
2016-10-12 04:55:50 +08:00
|
|
|
__printf(2, 3)
|
kthread: add kthread_create_worker*()
Kthread workers are currently created using the classic kthread API,
namely kthread_run(). kthread_worker_fn() is passed as the @threadfn
parameter.
This patch defines kthread_create_worker() and
kthread_create_worker_on_cpu() functions that hide implementation details.
They enforce using kthread_worker_fn() for the main thread. But I doubt
that there are any plans to create any alternative. In fact, I think that
we do not want any alternative main thread because it would be hard to
support consistency with the rest of the kthread worker API.
The naming and function of kthread_create_worker() is inspired by the
workqueues API like the rest of the kthread worker API.
The kthread_create_worker_on_cpu() variant is motivated by the original
kthread_create_on_cpu(). Note that we need to bind per-CPU kthread
workers already when they are created. It makes the life easier.
kthread_bind() could not be used later for an already running worker.
This patch does _not_ convert existing kthread workers. The kthread
worker API need more improvements first, e.g. a function to destroy the
worker.
IMPORTANT:
kthread_create_worker_on_cpu() allows to use any format of the worker
name, in compare with kthread_create_on_cpu(). The good thing is that it
is more generic. The bad thing is that most users will need to pass the
cpu number in two parameters, e.g. kthread_create_worker_on_cpu(cpu,
"helper/%d", cpu).
To be honest, the main motivation was to avoid the need for an empty
va_list. The only legal way was to create a helper function that would be
called with an empty list. Other attempts caused compilation warnings or
even errors on different architectures.
There were also other alternatives, for example, using #define or
splitting __kthread_create_worker(). The used solution looked like the
least ugly.
Link: http://lkml.kernel.org/r/1470754545-17632-6-git-send-email-pmladek@suse.com
Signed-off-by: Petr Mladek <pmladek@suse.com>
Acked-by: Tejun Heo <tj@kernel.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: Borislav Petkov <bp@suse.de>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-10-12 04:55:30 +08:00
|
|
|
struct kthread_worker *
|
2016-10-12 04:55:50 +08:00
|
|
|
kthread_create_worker(unsigned int flags, const char namefmt[], ...);
|
kthread: add kthread_create_worker*()
Kthread workers are currently created using the classic kthread API,
namely kthread_run(). kthread_worker_fn() is passed as the @threadfn
parameter.
This patch defines kthread_create_worker() and
kthread_create_worker_on_cpu() functions that hide implementation details.
They enforce using kthread_worker_fn() for the main thread. But I doubt
that there are any plans to create any alternative. In fact, I think that
we do not want any alternative main thread because it would be hard to
support consistency with the rest of the kthread worker API.
The naming and function of kthread_create_worker() is inspired by the
workqueues API like the rest of the kthread worker API.
The kthread_create_worker_on_cpu() variant is motivated by the original
kthread_create_on_cpu(). Note that we need to bind per-CPU kthread
workers already when they are created. It makes the life easier.
kthread_bind() could not be used later for an already running worker.
This patch does _not_ convert existing kthread workers. The kthread
worker API need more improvements first, e.g. a function to destroy the
worker.
IMPORTANT:
kthread_create_worker_on_cpu() allows to use any format of the worker
name, in compare with kthread_create_on_cpu(). The good thing is that it
is more generic. The bad thing is that most users will need to pass the
cpu number in two parameters, e.g. kthread_create_worker_on_cpu(cpu,
"helper/%d", cpu).
To be honest, the main motivation was to avoid the need for an empty
va_list. The only legal way was to create a helper function that would be
called with an empty list. Other attempts caused compilation warnings or
even errors on different architectures.
There were also other alternatives, for example, using #define or
splitting __kthread_create_worker(). The used solution looked like the
least ugly.
Link: http://lkml.kernel.org/r/1470754545-17632-6-git-send-email-pmladek@suse.com
Signed-off-by: Petr Mladek <pmladek@suse.com>
Acked-by: Tejun Heo <tj@kernel.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: Borislav Petkov <bp@suse.de>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-10-12 04:55:30 +08:00
|
|
|
|
2016-12-13 08:40:39 +08:00
|
|
|
__printf(3, 4) struct kthread_worker *
|
2016-10-12 04:55:50 +08:00
|
|
|
kthread_create_worker_on_cpu(int cpu, unsigned int flags,
|
|
|
|
const char namefmt[], ...);
|
kthread: add kthread_create_worker*()
Kthread workers are currently created using the classic kthread API,
namely kthread_run(). kthread_worker_fn() is passed as the @threadfn
parameter.
This patch defines kthread_create_worker() and
kthread_create_worker_on_cpu() functions that hide implementation details.
They enforce using kthread_worker_fn() for the main thread. But I doubt
that there are any plans to create any alternative. In fact, I think that
we do not want any alternative main thread because it would be hard to
support consistency with the rest of the kthread worker API.
The naming and function of kthread_create_worker() is inspired by the
workqueues API like the rest of the kthread worker API.
The kthread_create_worker_on_cpu() variant is motivated by the original
kthread_create_on_cpu(). Note that we need to bind per-CPU kthread
workers already when they are created. It makes the life easier.
kthread_bind() could not be used later for an already running worker.
This patch does _not_ convert existing kthread workers. The kthread
worker API need more improvements first, e.g. a function to destroy the
worker.
IMPORTANT:
kthread_create_worker_on_cpu() allows to use any format of the worker
name, in compare with kthread_create_on_cpu(). The good thing is that it
is more generic. The bad thing is that most users will need to pass the
cpu number in two parameters, e.g. kthread_create_worker_on_cpu(cpu,
"helper/%d", cpu).
To be honest, the main motivation was to avoid the need for an empty
va_list. The only legal way was to create a helper function that would be
called with an empty list. Other attempts caused compilation warnings or
even errors on different architectures.
There were also other alternatives, for example, using #define or
splitting __kthread_create_worker(). The used solution looked like the
least ugly.
Link: http://lkml.kernel.org/r/1470754545-17632-6-git-send-email-pmladek@suse.com
Signed-off-by: Petr Mladek <pmladek@suse.com>
Acked-by: Tejun Heo <tj@kernel.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: Borislav Petkov <bp@suse.de>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-10-12 04:55:30 +08:00
|
|
|
|
2016-10-12 04:55:20 +08:00
|
|
|
bool kthread_queue_work(struct kthread_worker *worker,
|
2010-06-29 16:07:09 +08:00
|
|
|
struct kthread_work *work);
|
2016-10-12 04:55:40 +08:00
|
|
|
|
|
|
|
bool kthread_queue_delayed_work(struct kthread_worker *worker,
|
|
|
|
struct kthread_delayed_work *dwork,
|
|
|
|
unsigned long delay);
|
|
|
|
|
2016-10-12 04:55:46 +08:00
|
|
|
bool kthread_mod_delayed_work(struct kthread_worker *worker,
|
|
|
|
struct kthread_delayed_work *dwork,
|
|
|
|
unsigned long delay);
|
|
|
|
|
2016-10-12 04:55:20 +08:00
|
|
|
void kthread_flush_work(struct kthread_work *work);
|
|
|
|
void kthread_flush_worker(struct kthread_worker *worker);
|
2010-06-29 16:07:09 +08:00
|
|
|
|
kthread: allow to cancel kthread work
We are going to use kthread workers more widely and sometimes we will need
to make sure that the work is neither pending nor running.
This patch implements cancel_*_sync() operations as inspired by
workqueues. Well, we are synchronized against the other operations via
the worker lock, we use del_timer_sync() and a counter to count parallel
cancel operations. Therefore the implementation might be easier.
First, we check if a worker is assigned. If not, the work has newer been
queued after it was initialized.
Second, we take the worker lock. It must be the right one. The work must
not be assigned to another worker unless it is initialized in between.
Third, we try to cancel the timer when it exists. The timer is deleted
synchronously to make sure that the timer call back is not running. We
need to temporary release the worker->lock to avoid a possible deadlock
with the callback. In the meantime, we set work->canceling counter to
avoid any queuing.
Fourth, we try to remove the work from a worker list. It might be
the list of either normal or delayed works.
Fifth, if the work is running, we call kthread_flush_work(). It might
take an arbitrary time. We need to release the worker-lock again. In the
meantime, we again block any queuing by the canceling counter.
As already mentioned, the check for a pending kthread work is done under a
lock. In compare with workqueues, we do not need to fight for a single
PENDING bit to block other operations. Therefore we do not suffer from
the thundering storm problem and all parallel canceling jobs might use
kthread_flush_work(). Any queuing is blocked until the counter gets zero.
Link: http://lkml.kernel.org/r/1470754545-17632-10-git-send-email-pmladek@suse.com
Signed-off-by: Petr Mladek <pmladek@suse.com>
Acked-by: Tejun Heo <tj@kernel.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: Borislav Petkov <bp@suse.de>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-10-12 04:55:43 +08:00
|
|
|
bool kthread_cancel_work_sync(struct kthread_work *work);
|
|
|
|
bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *work);
|
|
|
|
|
2016-10-12 04:55:33 +08:00
|
|
|
void kthread_destroy_worker(struct kthread_worker *worker);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif /* _LINUX_KTHREAD_H */
|