2010-10-01 22:03:45 +08:00
|
|
|
#ifndef _LINUX_IRQDESC_H
|
|
|
|
#define _LINUX_IRQDESC_H
|
|
|
|
|
2015-12-14 01:02:22 +08:00
|
|
|
#include <linux/rcupdate.h>
|
2016-09-14 00:14:51 +08:00
|
|
|
#include <linux/kobject.h>
|
2017-06-30 05:33:37 +08:00
|
|
|
#include <linux/mutex.h>
|
2015-12-14 01:02:22 +08:00
|
|
|
|
2010-10-01 22:03:45 +08:00
|
|
|
/*
|
|
|
|
* Core internal functions to deal with irq descriptors
|
|
|
|
*/
|
|
|
|
|
2011-01-20 05:01:44 +08:00
|
|
|
struct irq_affinity_notify;
|
2010-10-01 22:03:45 +08:00
|
|
|
struct proc_dir_entry;
|
2011-09-20 08:33:19 +08:00
|
|
|
struct module;
|
2012-10-17 06:07:49 +08:00
|
|
|
struct irq_desc;
|
genirq: Add irq_domain-aware core IRQ handler
Calling irq_find_mapping from outside a irq_{enter,exit} section is
unsafe and produces ugly messages if CONFIG_PROVE_RCU is enabled:
If coming from the idle state, the rcu_read_lock call in irq_find_mapping
will generate an unpleasant warning:
<quote>
===============================
[ INFO: suspicious RCU usage. ]
3.16.0-rc1+ #135 Not tainted
-------------------------------
include/linux/rcupdate.h:871 rcu_read_lock() used illegally while idle!
other info that might help us debug this:
RCU used illegally from idle CPU!
rcu_scheduler_active = 1, debug_locks = 0
RCU used illegally from extended quiescent state!
1 lock held by swapper/0/0:
#0: (rcu_read_lock){......}, at: [<ffffffc00010206c>]
irq_find_mapping+0x4c/0x198
</quote>
As this issue is fairly widespread and involves at least three
different architectures, a possible solution is to add a new
handle_domain_irq entry point into the generic IRQ code that
the interrupt controller code can call.
This new function takes an irq_domain, and calls into irq_find_domain
inside the irq_{enter,exit} block. An additional "lookup" parameter is
used to allow non-domain architecture code to be replaced by this as well.
Interrupt controllers can then be updated to use the new mechanism.
This code is sitting behind a new CONFIG_HANDLE_DOMAIN_IRQ, as not all
architectures implement set_irq_regs (yes, mn10300, I'm looking at you...).
Reported-by: Vladimir Murzin <vladimir.murzin@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Link: https://lkml.kernel.org/r/1409047421-27649-2-git-send-email-marc.zyngier@arm.com
Signed-off-by: Jason Cooper <jason@lakedaemon.net>
2014-08-26 18:03:16 +08:00
|
|
|
struct irq_domain;
|
|
|
|
struct pt_regs;
|
2012-10-17 06:07:49 +08:00
|
|
|
|
2010-10-01 22:03:45 +08:00
|
|
|
/**
|
|
|
|
* struct irq_desc - interrupt descriptor
|
2015-06-01 16:05:12 +08:00
|
|
|
* @irq_common_data: per irq and chip data passed down to chip functions
|
2010-10-01 22:03:45 +08:00
|
|
|
* @kstat_irqs: irq stats per cpu
|
2011-04-10 17:01:52 +08:00
|
|
|
* @handle_irq: highlevel irq-events handler
|
|
|
|
* @preflow_handler: handler called before the flow handler (currently used by sparc)
|
2010-10-01 22:03:45 +08:00
|
|
|
* @action: the irq action chain
|
|
|
|
* @status: status information
|
2011-02-08 03:19:55 +08:00
|
|
|
* @core_internal_state__do_not_mess_with_it: core internal status information
|
2010-10-01 22:03:45 +08:00
|
|
|
* @depth: disable-depth, for nested irq_disable() calls
|
2011-04-10 17:01:51 +08:00
|
|
|
* @wake_depth: enable depth, for multiple irq_set_irq_wake() callers
|
2010-10-01 22:03:45 +08:00
|
|
|
* @irq_count: stats field to detect stalled irqs
|
|
|
|
* @last_unhandled: aging timer for unhandled count
|
|
|
|
* @irqs_unhandled: stats field for spurious unhandled interrupts
|
2013-03-07 21:53:45 +08:00
|
|
|
* @threads_handled: stats field for deferred spurious detection of threaded handlers
|
|
|
|
* @threads_handled_last: comparator field for deferred spurious detection of theraded handlers
|
2010-10-01 22:03:45 +08:00
|
|
|
* @lock: locking for SMP
|
2011-04-10 17:01:52 +08:00
|
|
|
* @affinity_hint: hint to user space for preferred irq affinity
|
2011-01-20 05:01:44 +08:00
|
|
|
* @affinity_notify: context for notification of affinity changes
|
2010-10-01 22:03:45 +08:00
|
|
|
* @pending_mask: pending rebalanced interrupts
|
2011-02-24 07:52:13 +08:00
|
|
|
* @threads_oneshot: bitfield to handle shared oneshot threads
|
2010-10-01 22:03:45 +08:00
|
|
|
* @threads_active: number of irqaction threads currently running
|
|
|
|
* @wait_for_threads: wait queue for sync_irq to wait for threaded handlers
|
2014-08-28 17:44:31 +08:00
|
|
|
* @nr_actions: number of installed actions on this descriptor
|
|
|
|
* @no_suspend_depth: number of irqactions on a irq descriptor with
|
|
|
|
* IRQF_NO_SUSPEND set
|
|
|
|
* @force_resume_depth: number of irqactions on a irq descriptor with
|
|
|
|
* IRQF_FORCE_RESUME set
|
2015-12-14 01:02:22 +08:00
|
|
|
* @rcu: rcu head for delayed free
|
2016-09-14 00:14:51 +08:00
|
|
|
* @kobj: kobject used to represent this struct in sysfs
|
2017-06-30 05:33:37 +08:00
|
|
|
* @request_mutex: mutex to protect request/free before locking desc->lock
|
2010-10-01 22:03:45 +08:00
|
|
|
* @dir: /proc/irq/ procfs entry
|
2017-06-20 07:37:17 +08:00
|
|
|
* @debugfs_file: dentry for the debugfs file
|
2010-10-01 22:03:45 +08:00
|
|
|
* @name: flow handler name for /proc/interrupts output
|
|
|
|
*/
|
|
|
|
struct irq_desc {
|
2015-06-01 16:05:12 +08:00
|
|
|
struct irq_common_data irq_common_data;
|
2010-10-01 22:03:45 +08:00
|
|
|
struct irq_data irq_data;
|
2011-01-14 07:45:38 +08:00
|
|
|
unsigned int __percpu *kstat_irqs;
|
2010-10-01 22:03:45 +08:00
|
|
|
irq_flow_handler_t handle_irq;
|
2011-02-10 22:14:20 +08:00
|
|
|
#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
|
|
|
|
irq_preflow_handler_t preflow_handler;
|
|
|
|
#endif
|
2010-10-01 22:03:45 +08:00
|
|
|
struct irqaction *action; /* IRQ action list */
|
2011-02-11 05:01:25 +08:00
|
|
|
unsigned int status_use_accessors;
|
2011-02-08 03:19:55 +08:00
|
|
|
unsigned int core_internal_state__do_not_mess_with_it;
|
2010-10-01 22:03:45 +08:00
|
|
|
unsigned int depth; /* nested irq disables */
|
|
|
|
unsigned int wake_depth; /* nested wake enables */
|
|
|
|
unsigned int irq_count; /* For detecting broken IRQs */
|
|
|
|
unsigned long last_unhandled; /* Aging timer for unhandled count */
|
|
|
|
unsigned int irqs_unhandled;
|
2013-03-07 21:53:45 +08:00
|
|
|
atomic_t threads_handled;
|
|
|
|
int threads_handled_last;
|
2010-10-01 22:03:45 +08:00
|
|
|
raw_spinlock_t lock;
|
genirq: Add support for per-cpu dev_id interrupts
The ARM GIC interrupt controller offers per CPU interrupts (PPIs),
which are usually used to connect local timers to each core. Each CPU
has its own private interface to the GIC, and only sees the PPIs that
are directly connect to it.
While these timers are separate devices and have a separate interrupt
line to a core, they all use the same IRQ number.
For these devices, request_irq() is not the right API as it assumes
that an IRQ number is visible by a number of CPUs (through the
affinity setting), but makes it very awkward to express that an IRQ
number can be handled by all CPUs, and yet be a different interrupt
line on each CPU, requiring a different dev_id cookie to be passed
back to the handler.
The *_percpu_irq() functions is designed to overcome these
limitations, by providing a per-cpu dev_id vector:
int request_percpu_irq(unsigned int irq, irq_handler_t handler,
const char *devname, void __percpu *percpu_dev_id);
void free_percpu_irq(unsigned int, void __percpu *);
int setup_percpu_irq(unsigned int irq, struct irqaction *new);
void remove_percpu_irq(unsigned int irq, struct irqaction *act);
void enable_percpu_irq(unsigned int irq);
void disable_percpu_irq(unsigned int irq);
The API has a number of limitations:
- no interrupt sharing
- no threading
- common handler across all the CPUs
Once the interrupt is requested using setup_percpu_irq() or
request_percpu_irq(), it must be enabled by each core that wishes its
local interrupt to be delivered.
Based on an initial patch by Thomas Gleixner.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Cc: linux-arm-kernel@lists.infradead.org
Link: http://lkml.kernel.org/r/1316793788-14500-2-git-send-email-marc.zyngier@arm.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2011-09-24 00:03:06 +08:00
|
|
|
struct cpumask *percpu_enabled;
|
2016-04-11 16:57:52 +08:00
|
|
|
const struct cpumask *percpu_affinity;
|
2010-10-01 22:03:45 +08:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
const struct cpumask *affinity_hint;
|
2011-01-20 05:01:44 +08:00
|
|
|
struct irq_affinity_notify *affinity_notify;
|
2010-10-01 22:03:45 +08:00
|
|
|
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
|
|
|
cpumask_var_t pending_mask;
|
|
|
|
#endif
|
|
|
|
#endif
|
2011-02-24 07:52:13 +08:00
|
|
|
unsigned long threads_oneshot;
|
2010-10-01 22:03:45 +08:00
|
|
|
atomic_t threads_active;
|
|
|
|
wait_queue_head_t wait_for_threads;
|
2014-08-28 17:44:31 +08:00
|
|
|
#ifdef CONFIG_PM_SLEEP
|
|
|
|
unsigned int nr_actions;
|
|
|
|
unsigned int no_suspend_depth;
|
genirq / PM: Add flag for shared NO_SUSPEND interrupt lines
It currently is required that all users of NO_SUSPEND interrupt
lines pass the IRQF_NO_SUSPEND flag when requesting the IRQ or the
WARN_ON_ONCE() in irq_pm_install_action() will trigger. That is
done to warn about situations in which unprepared interrupt handlers
may be run unnecessarily for suspended devices and may attempt to
access those devices by mistake. However, it may cause drivers
that have no technical reasons for using IRQF_NO_SUSPEND to set
that flag just because they happen to share the interrupt line
with something like a timer.
Moreover, the generic handling of wakeup interrupts introduced by
commit 9ce7a25849e8 (genirq: Simplify wakeup mechanism) only works
for IRQs without any NO_SUSPEND users, so the drivers of wakeup
devices needing to use shared NO_SUSPEND interrupt lines for
signaling system wakeup generally have to detect wakeup in their
interrupt handlers. Thus if they happen to share an interrupt line
with a NO_SUSPEND user, they also need to request that their
interrupt handlers be run after suspend_device_irqs().
In both cases the reason for using IRQF_NO_SUSPEND is not because
the driver in question has a genuine need to run its interrupt
handler after suspend_device_irqs(), but because it happens to
share the line with some other NO_SUSPEND user. Otherwise, the
driver would do without IRQF_NO_SUSPEND just fine.
To make it possible to specify that condition explicitly, introduce
a new IRQ action handler flag for shared IRQs, IRQF_COND_SUSPEND,
that, when set, will indicate to the IRQ core that the interrupt
user is generally fine with suspending the IRQ, but it also can
tolerate handler invocations after suspend_device_irqs() and, in
particular, it is capable of detecting system wakeup and triggering
it as appropriate from its interrupt handler.
That will allow us to work around a problem with a shared timer
interrupt line on at91 platforms.
Link: http://marc.info/?l=linux-kernel&m=142252777602084&w=2
Link: http://marc.info/?t=142252775300011&r=1&w=2
Link: https://lkml.org/lkml/2014/12/15/552
Reported-by: Boris Brezillon <boris.brezillon@free-electrons.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Mark Rutland <mark.rutland@arm.com>
2015-02-27 07:07:55 +08:00
|
|
|
unsigned int cond_suspend_depth;
|
2014-08-28 17:44:31 +08:00
|
|
|
unsigned int force_resume_depth;
|
|
|
|
#endif
|
2010-10-01 22:03:45 +08:00
|
|
|
#ifdef CONFIG_PROC_FS
|
|
|
|
struct proc_dir_entry *dir;
|
2015-12-14 01:02:22 +08:00
|
|
|
#endif
|
2017-06-20 07:37:17 +08:00
|
|
|
#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
|
|
|
|
struct dentry *debugfs_file;
|
|
|
|
#endif
|
2015-12-14 01:02:22 +08:00
|
|
|
#ifdef CONFIG_SPARSE_IRQ
|
|
|
|
struct rcu_head rcu;
|
2016-09-14 00:14:51 +08:00
|
|
|
struct kobject kobj;
|
2010-10-01 22:03:45 +08:00
|
|
|
#endif
|
2017-06-30 05:33:37 +08:00
|
|
|
struct mutex request_mutex;
|
2012-10-17 06:07:49 +08:00
|
|
|
int parent_irq;
|
2011-07-11 18:17:31 +08:00
|
|
|
struct module *owner;
|
2010-10-01 22:03:45 +08:00
|
|
|
const char *name;
|
|
|
|
} ____cacheline_internodealigned_in_smp;
|
|
|
|
|
2015-07-06 01:12:30 +08:00
|
|
|
#ifdef CONFIG_SPARSE_IRQ
|
|
|
|
extern void irq_lock_sparse(void);
|
|
|
|
extern void irq_unlock_sparse(void);
|
|
|
|
#else
|
|
|
|
static inline void irq_lock_sparse(void) { }
|
|
|
|
static inline void irq_unlock_sparse(void) { }
|
2010-10-01 22:03:45 +08:00
|
|
|
extern struct irq_desc irq_desc[NR_IRQS];
|
|
|
|
#endif
|
|
|
|
|
2015-06-01 16:05:10 +08:00
|
|
|
static inline struct irq_desc *irq_data_to_desc(struct irq_data *data)
|
|
|
|
{
|
2015-09-16 20:37:12 +08:00
|
|
|
return container_of(data->common, struct irq_desc, irq_common_data);
|
2015-06-01 16:05:10 +08:00
|
|
|
}
|
|
|
|
|
2015-06-04 12:13:26 +08:00
|
|
|
static inline unsigned int irq_desc_get_irq(struct irq_desc *desc)
|
|
|
|
{
|
|
|
|
return desc->irq_data.irq;
|
|
|
|
}
|
|
|
|
|
2011-03-11 21:15:35 +08:00
|
|
|
static inline struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc)
|
|
|
|
{
|
|
|
|
return &desc->irq_data;
|
|
|
|
}
|
|
|
|
|
2011-02-10 18:36:33 +08:00
|
|
|
static inline struct irq_chip *irq_desc_get_chip(struct irq_desc *desc)
|
|
|
|
{
|
|
|
|
return desc->irq_data.chip;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void *irq_desc_get_chip_data(struct irq_desc *desc)
|
|
|
|
{
|
|
|
|
return desc->irq_data.chip_data;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void *irq_desc_get_handler_data(struct irq_desc *desc)
|
|
|
|
{
|
2015-06-01 16:05:21 +08:00
|
|
|
return desc->irq_common_data.handler_data;
|
2011-02-10 18:36:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc)
|
|
|
|
{
|
2015-06-01 16:05:43 +08:00
|
|
|
return desc->irq_common_data.msi_desc;
|
2011-02-10 18:36:33 +08:00
|
|
|
}
|
|
|
|
|
2010-10-01 22:03:45 +08:00
|
|
|
/*
|
|
|
|
* Architectures call this to let the generic IRQ layer
|
2015-09-01 10:35:50 +08:00
|
|
|
* handle an interrupt.
|
2010-10-01 22:03:45 +08:00
|
|
|
*/
|
2015-09-14 16:42:37 +08:00
|
|
|
static inline void generic_handle_irq_desc(struct irq_desc *desc)
|
2010-10-01 22:03:45 +08:00
|
|
|
{
|
2015-09-14 16:42:37 +08:00
|
|
|
desc->handle_irq(desc);
|
2010-10-01 22:03:45 +08:00
|
|
|
}
|
|
|
|
|
2011-05-18 18:48:00 +08:00
|
|
|
int generic_handle_irq(unsigned int irq);
|
2010-10-01 22:03:45 +08:00
|
|
|
|
genirq: Add irq_domain-aware core IRQ handler
Calling irq_find_mapping from outside a irq_{enter,exit} section is
unsafe and produces ugly messages if CONFIG_PROVE_RCU is enabled:
If coming from the idle state, the rcu_read_lock call in irq_find_mapping
will generate an unpleasant warning:
<quote>
===============================
[ INFO: suspicious RCU usage. ]
3.16.0-rc1+ #135 Not tainted
-------------------------------
include/linux/rcupdate.h:871 rcu_read_lock() used illegally while idle!
other info that might help us debug this:
RCU used illegally from idle CPU!
rcu_scheduler_active = 1, debug_locks = 0
RCU used illegally from extended quiescent state!
1 lock held by swapper/0/0:
#0: (rcu_read_lock){......}, at: [<ffffffc00010206c>]
irq_find_mapping+0x4c/0x198
</quote>
As this issue is fairly widespread and involves at least three
different architectures, a possible solution is to add a new
handle_domain_irq entry point into the generic IRQ code that
the interrupt controller code can call.
This new function takes an irq_domain, and calls into irq_find_domain
inside the irq_{enter,exit} block. An additional "lookup" parameter is
used to allow non-domain architecture code to be replaced by this as well.
Interrupt controllers can then be updated to use the new mechanism.
This code is sitting behind a new CONFIG_HANDLE_DOMAIN_IRQ, as not all
architectures implement set_irq_regs (yes, mn10300, I'm looking at you...).
Reported-by: Vladimir Murzin <vladimir.murzin@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Link: https://lkml.kernel.org/r/1409047421-27649-2-git-send-email-marc.zyngier@arm.com
Signed-off-by: Jason Cooper <jason@lakedaemon.net>
2014-08-26 18:03:16 +08:00
|
|
|
#ifdef CONFIG_HANDLE_DOMAIN_IRQ
|
|
|
|
/*
|
|
|
|
* Convert a HW interrupt number to a logical one using a IRQ domain,
|
|
|
|
* and handle the result interrupt number. Return -EINVAL if
|
|
|
|
* conversion failed. Providing a NULL domain indicates that the
|
|
|
|
* conversion has already been done.
|
|
|
|
*/
|
|
|
|
int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq,
|
|
|
|
bool lookup, struct pt_regs *regs);
|
|
|
|
|
|
|
|
static inline int handle_domain_irq(struct irq_domain *domain,
|
|
|
|
unsigned int hwirq, struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
return __handle_domain_irq(domain, hwirq, true, regs);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2010-10-01 22:03:45 +08:00
|
|
|
/* Test to see if a driver has successfully requested an irq */
|
2015-08-03 04:38:26 +08:00
|
|
|
static inline int irq_desc_has_action(struct irq_desc *desc)
|
2010-10-01 22:03:45 +08:00
|
|
|
{
|
|
|
|
return desc->action != NULL;
|
|
|
|
}
|
|
|
|
|
2015-08-03 04:38:26 +08:00
|
|
|
static inline int irq_has_action(unsigned int irq)
|
|
|
|
{
|
|
|
|
return irq_desc_has_action(irq_to_desc(irq));
|
|
|
|
}
|
|
|
|
|
2015-06-23 21:01:30 +08:00
|
|
|
/**
|
|
|
|
* irq_set_handler_locked - Set irq handler from a locked region
|
|
|
|
* @data: Pointer to the irq_data structure which identifies the irq
|
|
|
|
* @handler: Flow control handler function for this interrupt
|
|
|
|
*
|
|
|
|
* Sets the handler in the irq descriptor associated to @data.
|
|
|
|
*
|
|
|
|
* Must be called with irq_desc locked and valid parameters. Typical
|
|
|
|
* call site is the irq_set_type() callback.
|
|
|
|
*/
|
|
|
|
static inline void irq_set_handler_locked(struct irq_data *data,
|
|
|
|
irq_flow_handler_t handler)
|
|
|
|
{
|
|
|
|
struct irq_desc *desc = irq_data_to_desc(data);
|
|
|
|
|
|
|
|
desc->handle_irq = handler;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* irq_set_chip_handler_name_locked - Set chip, handler and name from a locked region
|
|
|
|
* @data: Pointer to the irq_data structure for which the chip is set
|
|
|
|
* @chip: Pointer to the new irq chip
|
|
|
|
* @handler: Flow control handler function for this interrupt
|
|
|
|
* @name: Name of the interrupt
|
|
|
|
*
|
|
|
|
* Replace the irq chip at the proper hierarchy level in @data and
|
|
|
|
* sets the handler and name in the associated irq descriptor.
|
|
|
|
*
|
|
|
|
* Must be called with irq_desc locked and valid parameters.
|
|
|
|
*/
|
|
|
|
static inline void
|
|
|
|
irq_set_chip_handler_name_locked(struct irq_data *data, struct irq_chip *chip,
|
|
|
|
irq_flow_handler_t handler, const char *name)
|
|
|
|
{
|
|
|
|
struct irq_desc *desc = irq_data_to_desc(data);
|
|
|
|
|
|
|
|
desc->handle_irq = handler;
|
|
|
|
desc->name = name;
|
|
|
|
data->chip = chip;
|
|
|
|
}
|
|
|
|
|
2011-03-23 20:10:31 +08:00
|
|
|
static inline int irq_balancing_disabled(unsigned int irq)
|
2010-10-01 22:03:45 +08:00
|
|
|
{
|
|
|
|
struct irq_desc *desc;
|
|
|
|
|
|
|
|
desc = irq_to_desc(irq);
|
2011-03-28 19:32:20 +08:00
|
|
|
return desc->status_use_accessors & IRQ_NO_BALANCING_MASK;
|
2010-10-01 22:03:45 +08:00
|
|
|
}
|
2011-02-10 22:14:20 +08:00
|
|
|
|
2013-12-04 18:09:50 +08:00
|
|
|
static inline int irq_is_percpu(unsigned int irq)
|
|
|
|
{
|
|
|
|
struct irq_desc *desc;
|
|
|
|
|
|
|
|
desc = irq_to_desc(irq);
|
|
|
|
return desc->status_use_accessors & IRQ_PER_CPU;
|
|
|
|
}
|
|
|
|
|
2011-03-23 00:08:15 +08:00
|
|
|
static inline void
|
|
|
|
irq_set_lockdep_class(unsigned int irq, struct lock_class_key *class)
|
|
|
|
{
|
|
|
|
struct irq_desc *desc = irq_to_desc(irq);
|
|
|
|
|
|
|
|
if (desc)
|
|
|
|
lockdep_set_class(&desc->lock, class);
|
|
|
|
}
|
|
|
|
|
2011-02-10 22:14:20 +08:00
|
|
|
#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
|
|
|
|
static inline void
|
|
|
|
__irq_set_preflow_handler(unsigned int irq, irq_preflow_handler_t handler)
|
|
|
|
{
|
|
|
|
struct irq_desc *desc;
|
|
|
|
|
|
|
|
desc = irq_to_desc(irq);
|
|
|
|
desc->preflow_handler = handler;
|
|
|
|
}
|
|
|
|
#endif
|
2010-10-01 22:03:45 +08:00
|
|
|
|
|
|
|
#endif
|