2010-10-14 14:01:34 +08:00
|
|
|
#ifndef _LINUX_IRQ_WORK_H
|
|
|
|
#define _LINUX_IRQ_WORK_H
|
|
|
|
|
2011-09-08 14:00:46 +08:00
|
|
|
#include <linux/llist.h>
|
|
|
|
|
2012-10-20 04:43:41 +08:00
|
|
|
/*
|
|
|
|
* An entry can be in one of four states:
|
|
|
|
*
|
|
|
|
* free NULL, 0 -> {claimed} : free to be used
|
|
|
|
* claimed NULL, 3 -> {pending} : claimed to be enqueued
|
|
|
|
* pending next, 3 -> {busy} : queued, pending callback
|
|
|
|
* busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define IRQ_WORK_PENDING 1UL
|
|
|
|
#define IRQ_WORK_BUSY 2UL
|
|
|
|
#define IRQ_WORK_FLAGS 3UL
|
|
|
|
#define IRQ_WORK_LAZY 4UL /* Doesn't want IPI, wait for tick */
|
|
|
|
|
2010-10-14 14:01:34 +08:00
|
|
|
struct irq_work {
|
2011-09-08 14:00:46 +08:00
|
|
|
unsigned long flags;
|
|
|
|
struct llist_node llnode;
|
2010-10-14 14:01:34 +08:00
|
|
|
void (*func)(struct irq_work *);
|
|
|
|
};
|
|
|
|
|
|
|
|
static inline
|
2011-09-08 14:00:46 +08:00
|
|
|
void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *))
|
2010-10-14 14:01:34 +08:00
|
|
|
{
|
2011-09-08 14:00:46 +08:00
|
|
|
work->flags = 0;
|
|
|
|
work->func = func;
|
2010-10-14 14:01:34 +08:00
|
|
|
}
|
|
|
|
|
2013-02-04 05:08:23 +08:00
|
|
|
void irq_work_queue(struct irq_work *work);
|
2010-10-14 14:01:34 +08:00
|
|
|
void irq_work_run(void);
|
2011-09-08 14:00:46 +08:00
|
|
|
void irq_work_sync(struct irq_work *work);
|
2010-10-14 14:01:34 +08:00
|
|
|
|
2012-11-08 04:03:07 +08:00
|
|
|
#ifdef CONFIG_IRQ_WORK
|
|
|
|
bool irq_work_needs_cpu(void);
|
|
|
|
#else
|
2013-03-23 06:04:37 +08:00
|
|
|
static inline bool irq_work_needs_cpu(void) { return false; }
|
2012-11-08 04:03:07 +08:00
|
|
|
#endif
|
|
|
|
|
2010-10-14 14:01:34 +08:00
|
|
|
#endif /* _LINUX_IRQ_WORK_H */
|