ppc64: remove ppc_irq_dispatch_handler
Use __do_IRQ instead. The only difference is that every controller is now assumed to have an end() routine (only xics_8259 did not). Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
This commit is contained in:
parent
5a7b3ff467
commit
d4be4f37d9
|
@ -89,12 +89,12 @@ _GLOBAL(call_do_softirq)
|
|||
mtlr r0
|
||||
blr
|
||||
|
||||
_GLOBAL(call_ppc_irq_dispatch_handler)
|
||||
_GLOBAL(call___do_IRQ)
|
||||
mflr r0
|
||||
std r0,16(r1)
|
||||
stdu r1,THREAD_SIZE-112(r5)
|
||||
mr r1,r5
|
||||
bl .ppc_irq_dispatch_handler
|
||||
bl .__do_IRQ
|
||||
ld r1,0(r1)
|
||||
ld r0,16(r1)
|
||||
mtlr r0
|
||||
|
|
|
@ -120,13 +120,13 @@ static void intReceived(struct XmPciLpEvent *eventParm,
|
|||
if (curtp != irqtp) {
|
||||
irqtp->task = curtp->task;
|
||||
irqtp->flags = 0;
|
||||
call_ppc_irq_dispatch_handler(regsParm, irq, irqtp);
|
||||
call___do_IRQ(irq, regsParm, irqtp);
|
||||
irqtp->task = NULL;
|
||||
if (irqtp->flags)
|
||||
set_bits(irqtp->flags, &curtp->flags);
|
||||
} else
|
||||
#endif
|
||||
ppc_irq_dispatch_handler(regsParm, irq);
|
||||
__do_IRQ(irq, regsParm);
|
||||
HvCallPci_eoi(eventParm->eventData.slotInterrupt.busNumber,
|
||||
eventParm->eventData.slotInterrupt.subBusNumber,
|
||||
eventParm->eventData.slotInterrupt.deviceId);
|
||||
|
@ -326,10 +326,8 @@ static void iSeries_disable_IRQ(unsigned int irq)
|
|||
}
|
||||
|
||||
/*
|
||||
* Need to define this so ppc_irq_dispatch_handler will NOT call
|
||||
* enable_IRQ at the end of interrupt handling. However, this does
|
||||
* nothing because there is not enough information provided to do
|
||||
* the EOI HvCall. This is done by XmPciLpEvent.c
|
||||
* This does nothing because there is not enough information
|
||||
* provided to do the EOI HvCall. This is done by XmPciLpEvent.c
|
||||
*/
|
||||
static void iSeries_end_IRQ(unsigned int irq)
|
||||
{
|
||||
|
|
|
@ -567,6 +567,7 @@ nextnode:
|
|||
|
||||
xics_8259_pic.enable = i8259_pic.enable;
|
||||
xics_8259_pic.disable = i8259_pic.disable;
|
||||
xics_8259_pic.end = i8259_pic.end;
|
||||
for (i = 0; i < 16; ++i)
|
||||
get_irq_desc(i)->handler = &xics_8259_pic;
|
||||
for (; i < NR_IRQS; ++i)
|
||||
|
|
|
@ -144,110 +144,6 @@ void fixup_irqs(cpumask_t map)
|
|||
}
|
||||
#endif
|
||||
|
||||
extern int noirqdebug;
|
||||
|
||||
/*
|
||||
* Eventually, this should take an array of interrupts and an array size
|
||||
* so it can dispatch multiple interrupts.
|
||||
*/
|
||||
void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq)
|
||||
{
|
||||
int status;
|
||||
struct irqaction *action;
|
||||
int cpu = smp_processor_id();
|
||||
irq_desc_t *desc = get_irq_desc(irq);
|
||||
irqreturn_t action_ret;
|
||||
|
||||
kstat_cpu(cpu).irqs[irq]++;
|
||||
|
||||
if (desc->status & IRQ_PER_CPU) {
|
||||
/* no locking required for CPU-local interrupts: */
|
||||
ack_irq(irq);
|
||||
action_ret = handle_IRQ_event(irq, regs, desc->action);
|
||||
desc->handler->end(irq);
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock(&desc->lock);
|
||||
ack_irq(irq);
|
||||
/*
|
||||
REPLAY is when Linux resends an IRQ that was dropped earlier
|
||||
WAITING is used by probe to mark irqs that are being tested
|
||||
*/
|
||||
status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
|
||||
status |= IRQ_PENDING; /* we _want_ to handle it */
|
||||
|
||||
/*
|
||||
* If the IRQ is disabled for whatever reason, we cannot
|
||||
* use the action we have.
|
||||
*/
|
||||
action = NULL;
|
||||
if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
|
||||
action = desc->action;
|
||||
if (!action || !action->handler) {
|
||||
ppc_spurious_interrupts++;
|
||||
printk(KERN_DEBUG "Unhandled interrupt %x, disabled\n", irq);
|
||||
/* We can't call disable_irq here, it would deadlock */
|
||||
if (!desc->depth)
|
||||
desc->depth = 1;
|
||||
desc->status |= IRQ_DISABLED;
|
||||
/* This is not a real spurrious interrupt, we
|
||||
* have to eoi it, so we jump to out
|
||||
*/
|
||||
mask_irq(irq);
|
||||
goto out;
|
||||
}
|
||||
status &= ~IRQ_PENDING; /* we commit to handling */
|
||||
status |= IRQ_INPROGRESS; /* we are handling it */
|
||||
}
|
||||
desc->status = status;
|
||||
|
||||
/*
|
||||
* If there is no IRQ handler or it was disabled, exit early.
|
||||
Since we set PENDING, if another processor is handling
|
||||
a different instance of this same irq, the other processor
|
||||
will take care of it.
|
||||
*/
|
||||
if (unlikely(!action))
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Edge triggered interrupts need to remember
|
||||
* pending events.
|
||||
* This applies to any hw interrupts that allow a second
|
||||
* instance of the same irq to arrive while we are in do_IRQ
|
||||
* or in the handler. But the code here only handles the _second_
|
||||
* instance of the irq, not the third or fourth. So it is mostly
|
||||
* useful for irq hardware that does not mask cleanly in an
|
||||
* SMP environment.
|
||||
*/
|
||||
for (;;) {
|
||||
spin_unlock(&desc->lock);
|
||||
|
||||
action_ret = handle_IRQ_event(irq, regs, action);
|
||||
|
||||
spin_lock(&desc->lock);
|
||||
if (!noirqdebug)
|
||||
note_interrupt(irq, desc, action_ret, regs);
|
||||
if (likely(!(desc->status & IRQ_PENDING)))
|
||||
break;
|
||||
desc->status &= ~IRQ_PENDING;
|
||||
}
|
||||
out:
|
||||
desc->status &= ~IRQ_INPROGRESS;
|
||||
/*
|
||||
* The ->end() handler has to deal with interrupts which got
|
||||
* disabled while the handler was running.
|
||||
*/
|
||||
if (desc->handler) {
|
||||
if (desc->handler->end)
|
||||
desc->handler->end(irq);
|
||||
else if (desc->handler->enable)
|
||||
desc->handler->enable(irq);
|
||||
}
|
||||
spin_unlock(&desc->lock);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC_ISERIES
|
||||
void do_IRQ(struct pt_regs *regs)
|
||||
{
|
||||
|
@ -325,13 +221,13 @@ void do_IRQ(struct pt_regs *regs)
|
|||
if (curtp != irqtp) {
|
||||
irqtp->task = curtp->task;
|
||||
irqtp->flags = 0;
|
||||
call_ppc_irq_dispatch_handler(regs, irq, irqtp);
|
||||
call___do_IRQ(irq, regs, irqtp);
|
||||
irqtp->task = NULL;
|
||||
if (irqtp->flags)
|
||||
set_bits(irqtp->flags, &curtp->flags);
|
||||
} else
|
||||
#endif
|
||||
ppc_irq_dispatch_handler(regs, irq);
|
||||
__do_IRQ(irq, regs);
|
||||
} else
|
||||
/* That's not SMP safe ... but who cares ? */
|
||||
ppc_spurious_interrupts++;
|
||||
|
|
|
@ -78,12 +78,12 @@ _GLOBAL(call_do_softirq)
|
|||
mtlr r0
|
||||
blr
|
||||
|
||||
_GLOBAL(call_ppc_irq_dispatch_handler)
|
||||
_GLOBAL(call___do_IRQ)
|
||||
mflr r0
|
||||
std r0,16(r1)
|
||||
stdu r1,THREAD_SIZE-112(r5)
|
||||
mr r1,r5
|
||||
bl .ppc_irq_dispatch_handler
|
||||
bl .__do_IRQ
|
||||
ld r1,0(r1)
|
||||
ld r0,16(r1)
|
||||
mtlr r0
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
#include <asm/processor.h>
|
||||
|
||||
extern void timer_interrupt(struct pt_regs *);
|
||||
extern void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq);
|
||||
|
||||
#ifdef CONFIG_PPC_ISERIES
|
||||
|
||||
|
|
|
@ -488,7 +488,7 @@ extern struct thread_info *softirq_ctx[NR_CPUS];
|
|||
|
||||
extern void irq_ctx_init(void);
|
||||
extern void call_do_softirq(struct thread_info *tp);
|
||||
extern int call_ppc_irq_dispatch_handler(struct pt_regs *regs, int irq,
|
||||
extern int call___do_IRQ(int irq, struct pt_regs *regs,
|
||||
struct thread_info *tp);
|
||||
|
||||
#define __ARCH_HAS_DO_SOFTIRQ
|
||||
|
|
Loading…
Reference in New Issue