genirq: Add fasteoi IPI flow
For irqchips using the fasteoi flow, IPIs are a bit special. They need to be EOI'd early (before calling the handler), as funny things may happen in the handler (they do not necessarily behave like a normal interrupt). Reviewed-by: Valentin Schneider <valentin.schneider@arm.com> Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
parent
f4d51dffc6
commit
c5e5ec033c
|
@ -634,6 +634,7 @@ static inline int irq_set_parent(int irq, int parent_irq)
|
||||||
*/
|
*/
|
||||||
extern void handle_level_irq(struct irq_desc *desc);
|
extern void handle_level_irq(struct irq_desc *desc);
|
||||||
extern void handle_fasteoi_irq(struct irq_desc *desc);
|
extern void handle_fasteoi_irq(struct irq_desc *desc);
|
||||||
|
extern void handle_percpu_devid_fasteoi_ipi(struct irq_desc *desc);
|
||||||
extern void handle_edge_irq(struct irq_desc *desc);
|
extern void handle_edge_irq(struct irq_desc *desc);
|
||||||
extern void handle_edge_eoi_irq(struct irq_desc *desc);
|
extern void handle_edge_eoi_irq(struct irq_desc *desc);
|
||||||
extern void handle_simple_irq(struct irq_desc *desc);
|
extern void handle_simple_irq(struct irq_desc *desc);
|
||||||
|
|
|
@ -944,6 +944,33 @@ void handle_percpu_devid_irq(struct irq_desc *desc)
|
||||||
chip->irq_eoi(&desc->irq_data);
|
chip->irq_eoi(&desc->irq_data);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* handle_percpu_devid_fasteoi_ipi - Per CPU local IPI handler with per cpu
|
||||||
|
* dev ids
|
||||||
|
* @desc: the interrupt description structure for this irq
|
||||||
|
*
|
||||||
|
* The biggest difference with the IRQ version is that the interrupt is
|
||||||
|
* EOIed early, as the IPI could result in a context switch, and we need to
|
||||||
|
* make sure the IPI can fire again. We also assume that the arch code has
|
||||||
|
* registered an action. If not, we are positively doomed.
|
||||||
|
*/
|
||||||
|
void handle_percpu_devid_fasteoi_ipi(struct irq_desc *desc)
|
||||||
|
{
|
||||||
|
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||||
|
struct irqaction *action = desc->action;
|
||||||
|
unsigned int irq = irq_desc_get_irq(desc);
|
||||||
|
irqreturn_t res;
|
||||||
|
|
||||||
|
__kstat_incr_irqs_this_cpu(desc);
|
||||||
|
|
||||||
|
if (chip->irq_eoi)
|
||||||
|
chip->irq_eoi(&desc->irq_data);
|
||||||
|
|
||||||
|
trace_irq_handler_entry(irq, action);
|
||||||
|
res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
|
||||||
|
trace_irq_handler_exit(irq, action, res);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* handle_percpu_devid_fasteoi_nmi - Per CPU local NMI handler with per cpu
|
* handle_percpu_devid_fasteoi_nmi - Per CPU local NMI handler with per cpu
|
||||||
* dev ids
|
* dev ids
|
||||||
|
|
Loading…
Reference in New Issue