[MIPS] Separate performance counter interrupts
Support for performance counter overflow interrupt that is on a separate interrupt from the timer. Signed-off-by: Chris Dearman <chris@mips.com> Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
This commit is contained in:
parent
b72c052622
commit
ffe9ee4709
|
@ -129,13 +129,13 @@ static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
|
|||
|
||||
static struct irqaction irq_resched = {
|
||||
.handler = ipi_resched_interrupt,
|
||||
.flags = IRQF_DISABLED,
|
||||
.flags = IRQF_DISABLED|IRQF_PERCPU,
|
||||
.name = "IPI_resched"
|
||||
};
|
||||
|
||||
static struct irqaction irq_call = {
|
||||
.handler = ipi_call_interrupt,
|
||||
.flags = IRQF_DISABLED,
|
||||
.flags = IRQF_DISABLED|IRQF_PERCPU,
|
||||
.name = "IPI_call"
|
||||
};
|
||||
|
||||
|
@ -275,10 +275,7 @@ void __init plat_prepare_cpus(unsigned int max_cpus)
|
|||
setup_irq(cpu_ipi_resched_irq, &irq_resched);
|
||||
setup_irq(cpu_ipi_call_irq, &irq_call);
|
||||
|
||||
/* need to mark IPI's as IRQ_PER_CPU */
|
||||
irq_desc[cpu_ipi_resched_irq].status |= IRQ_PER_CPU;
|
||||
set_irq_handler(cpu_ipi_resched_irq, handle_percpu_irq);
|
||||
irq_desc[cpu_ipi_call_irq].status |= IRQ_PER_CPU;
|
||||
set_irq_handler(cpu_ipi_call_irq, handle_percpu_irq);
|
||||
}
|
||||
|
||||
|
@ -326,8 +323,11 @@ void prom_boot_secondary(int cpu, struct task_struct *idle)
|
|||
|
||||
void prom_init_secondary(void)
|
||||
{
|
||||
/* Enable per-cpu interrupts */
|
||||
|
||||
/* This is Malta specific: IPI,performance and timer inetrrupts */
|
||||
write_c0_status((read_c0_status() & ~ST0_IM ) |
|
||||
(STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP7));
|
||||
(STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP6 | STATUSF_IP7));
|
||||
}
|
||||
|
||||
void prom_smp_finish(void)
|
||||
|
|
|
@ -199,6 +199,30 @@ int (*perf_irq)(void) = null_perf_irq;
|
|||
EXPORT_SYMBOL(null_perf_irq);
|
||||
EXPORT_SYMBOL(perf_irq);
|
||||
|
||||
/*
|
||||
* Performance counter IRQ or -1 if shared with timer
|
||||
*/
|
||||
int mipsxx_perfcount_irq;
|
||||
EXPORT_SYMBOL(mipsxx_perfcount_irq);
|
||||
|
||||
/*
|
||||
* Possibly handle a performance counter interrupt.
|
||||
* Return true if the timer interrupt should not be checked
|
||||
*/
|
||||
static inline int handle_perf_irq (int r2)
|
||||
{
|
||||
/*
|
||||
* The performance counter overflow interrupt may be shared with the
|
||||
* timer interrupt (mipsxx_perfcount_irq < 0). If it is and a
|
||||
* performance counter has overflowed (perf_irq() == IRQ_HANDLED)
|
||||
* and we can't reliably determine if a counter interrupt has also
|
||||
* happened (!r2) then don't check for a timer interrupt.
|
||||
*/
|
||||
return (mipsxx_perfcount_irq < 0) &&
|
||||
perf_irq() == IRQ_HANDLED &&
|
||||
!r2;
|
||||
}
|
||||
|
||||
asmlinkage void ll_timer_interrupt(int irq)
|
||||
{
|
||||
int r2 = cpu_has_mips_r2;
|
||||
|
@ -206,19 +230,13 @@ asmlinkage void ll_timer_interrupt(int irq)
|
|||
irq_enter();
|
||||
kstat_this_cpu.irqs[irq]++;
|
||||
|
||||
/*
|
||||
* Suckage alert:
|
||||
* Before R2 of the architecture there was no way to see if a
|
||||
* performance counter interrupt was pending, so we have to run the
|
||||
* performance counter interrupt handler anyway.
|
||||
*/
|
||||
if (!r2 || (read_c0_cause() & (1 << 26)))
|
||||
if (perf_irq())
|
||||
goto out;
|
||||
if (handle_perf_irq(r2))
|
||||
goto out;
|
||||
|
||||
/* we keep interrupt disabled all the time */
|
||||
if (!r2 || (read_c0_cause() & (1 << 30)))
|
||||
timer_interrupt(irq, NULL);
|
||||
if (r2 && ((read_c0_cause() & (1 << 30)) == 0))
|
||||
goto out;
|
||||
|
||||
timer_interrupt(irq, NULL);
|
||||
|
||||
out:
|
||||
irq_exit();
|
||||
|
@ -258,7 +276,7 @@ unsigned int mips_hpt_frequency;
|
|||
|
||||
static struct irqaction timer_irqaction = {
|
||||
.handler = timer_interrupt,
|
||||
.flags = IRQF_DISABLED,
|
||||
.flags = IRQF_DISABLED | IRQF_PERCPU,
|
||||
.name = "timer",
|
||||
};
|
||||
|
||||
|
|
|
@ -53,9 +53,8 @@
|
|||
|
||||
unsigned long cpu_khz;
|
||||
|
||||
#define CPUCTR_IMASKBIT (0x100 << MIPSCPU_INT_CPUCTR)
|
||||
|
||||
static int mips_cpu_timer_irq;
|
||||
extern int mipsxx_perfcount_irq;
|
||||
extern void smtc_timer_broadcast(int);
|
||||
|
||||
static void mips_timer_dispatch(void)
|
||||
|
@ -63,6 +62,11 @@ static void mips_timer_dispatch(void)
|
|||
do_IRQ(mips_cpu_timer_irq);
|
||||
}
|
||||
|
||||
static void mips_perf_dispatch(void)
|
||||
{
|
||||
do_IRQ(mipsxx_perfcount_irq);
|
||||
}
|
||||
|
||||
/*
|
||||
* Redeclare until I get around mopping the timer code insanity on MIPS.
|
||||
*/
|
||||
|
@ -70,6 +74,24 @@ extern int null_perf_irq(void);
|
|||
|
||||
extern int (*perf_irq)(void);
|
||||
|
||||
/*
|
||||
* Possibly handle a performance counter interrupt.
|
||||
* Return true if the timer interrupt should not be checked
|
||||
*/
|
||||
static inline int handle_perf_irq (int r2)
|
||||
{
|
||||
/*
|
||||
* The performance counter overflow interrupt may be shared with the
|
||||
* timer interrupt (mipsxx_perfcount_irq < 0). If it is and a
|
||||
* performance counter has overflowed (perf_irq() == IRQ_HANDLED)
|
||||
* and we can't reliably determine if a counter interrupt has also
|
||||
* happened (!r2) then don't check for a timer interrupt.
|
||||
*/
|
||||
return (mipsxx_perfcount_irq < 0) &&
|
||||
perf_irq() == IRQ_HANDLED &&
|
||||
!r2;
|
||||
}
|
||||
|
||||
irqreturn_t mips_timer_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
|
@ -92,8 +114,7 @@ irqreturn_t mips_timer_interrupt(int irq, void *dev_id)
|
|||
* We could be here due to timer interrupt,
|
||||
* perf counter overflow, or both.
|
||||
*/
|
||||
if (read_c0_cause() & (1 << 26))
|
||||
perf_irq();
|
||||
(void) handle_perf_irq(1);
|
||||
|
||||
if (read_c0_cause() & (1 << 30)) {
|
||||
/*
|
||||
|
@ -115,19 +136,19 @@ irqreturn_t mips_timer_interrupt(int irq, void *dev_id)
|
|||
#else /* CONFIG_MIPS_MT_SMTC */
|
||||
int r2 = cpu_has_mips_r2;
|
||||
|
||||
if (handle_perf_irq(r2))
|
||||
goto out;
|
||||
|
||||
if (r2 && ((read_c0_cause() & (1 << 30)) == 0))
|
||||
goto out;
|
||||
|
||||
if (cpu == 0) {
|
||||
/*
|
||||
* CPU 0 handles the global timer interrupt job and process
|
||||
* accounting resets count/compare registers to trigger next
|
||||
* timer int.
|
||||
*/
|
||||
if (!r2 || (read_c0_cause() & (1 << 26)))
|
||||
if (perf_irq())
|
||||
goto out;
|
||||
|
||||
/* we keep interrupt disabled all the time */
|
||||
if (!r2 || (read_c0_cause() & (1 << 30)))
|
||||
timer_interrupt(irq, NULL);
|
||||
timer_interrupt(irq, NULL);
|
||||
} else {
|
||||
/* Everyone else needs to reset the timer int here as
|
||||
ll_local_timer_interrupt doesn't */
|
||||
|
@ -225,35 +246,82 @@ void __init mips_time_init(void)
|
|||
mips_scroll_message();
|
||||
}
|
||||
|
||||
irqreturn_t mips_perf_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
return perf_irq();
|
||||
}
|
||||
|
||||
static struct irqaction perf_irqaction = {
|
||||
.handler = mips_perf_interrupt,
|
||||
.flags = IRQF_DISABLED | IRQF_PERCPU,
|
||||
.name = "performance",
|
||||
};
|
||||
|
||||
void __init plat_perf_setup(struct irqaction *irq)
|
||||
{
|
||||
int hwint = 0;
|
||||
mipsxx_perfcount_irq = -1;
|
||||
|
||||
#ifdef MSC01E_INT_BASE
|
||||
if (cpu_has_veic) {
|
||||
set_vi_handler (MSC01E_INT_PERFCTR, mips_perf_dispatch);
|
||||
mipsxx_perfcount_irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR;
|
||||
} else
|
||||
#endif
|
||||
if (cpu_has_mips_r2) {
|
||||
/*
|
||||
* Read IntCtl.IPPCI to determine the performance
|
||||
* counter interrupt
|
||||
*/
|
||||
hwint = (read_c0_intctl () >> 26) & 7;
|
||||
if (hwint != MIPSCPU_INT_CPUCTR) {
|
||||
if (cpu_has_vint)
|
||||
set_vi_handler (hwint, mips_perf_dispatch);
|
||||
mipsxx_perfcount_irq = MIPSCPU_INT_BASE + hwint;
|
||||
}
|
||||
}
|
||||
if (mipsxx_perfcount_irq >= 0) {
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
setup_irq_smtc(mipsxx_perfcount_irq, irq, 0x100 << hwint);
|
||||
#else
|
||||
setup_irq(mipsxx_perfcount_irq, irq);
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
#ifdef CONFIG_SMP
|
||||
set_irq_handler(mipsxx_perfcount_irq, handle_percpu_irq);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
void __init plat_timer_setup(struct irqaction *irq)
|
||||
{
|
||||
#ifdef MSC01E_INT_BASE
|
||||
int hwint = 0;
|
||||
if (cpu_has_veic) {
|
||||
set_vi_handler (MSC01E_INT_CPUCTR, mips_timer_dispatch);
|
||||
mips_cpu_timer_irq = MSC01E_INT_BASE + MSC01E_INT_CPUCTR;
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
if (cpu_has_vint)
|
||||
set_vi_handler (MIPSCPU_INT_CPUCTR, mips_timer_dispatch);
|
||||
mips_cpu_timer_irq = MIPSCPU_INT_BASE + MIPSCPU_INT_CPUCTR;
|
||||
}
|
||||
|
||||
else {
|
||||
if (cpu_has_mips_r2)
|
||||
/*
|
||||
* Read IntCtl.IPTI to determine the timer interrupt
|
||||
*/
|
||||
hwint = (read_c0_intctl () >> 29) & 7;
|
||||
else
|
||||
hwint = MIPSCPU_INT_CPUCTR;
|
||||
if (cpu_has_vint)
|
||||
set_vi_handler (hwint, mips_timer_dispatch);
|
||||
mips_cpu_timer_irq = MIPSCPU_INT_BASE + hwint;
|
||||
}
|
||||
|
||||
/* we are using the cpu counter for timer interrupts */
|
||||
irq->handler = mips_timer_interrupt; /* we use our own handler */
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
setup_irq_smtc(mips_cpu_timer_irq, irq, CPUCTR_IMASKBIT);
|
||||
setup_irq_smtc(mips_cpu_timer_irq, irq, 0x100 << hwint);
|
||||
#else
|
||||
setup_irq(mips_cpu_timer_irq, irq);
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* irq_desc(riptor) is a global resource, when the interrupt overlaps
|
||||
on seperate cpu's the first one tries to handle the second interrupt.
|
||||
The effect is that the int remains disabled on the second cpu.
|
||||
Mark the interrupt with IRQ_PER_CPU to avoid any confusion */
|
||||
irq_desc[mips_cpu_timer_irq].status |= IRQ_PER_CPU;
|
||||
set_irq_handler(mips_cpu_timer_irq, handle_percpu_irq);
|
||||
#endif
|
||||
|
||||
plat_perf_setup(&perf_irqaction);
|
||||
}
|
||||
|
|
|
@ -177,7 +177,10 @@ static int mipsxx_perfcount_handler(void)
|
|||
unsigned int counters = op_model_mipsxx_ops.num_counters;
|
||||
unsigned int control;
|
||||
unsigned int counter;
|
||||
int handled = 0;
|
||||
int handled = IRQ_NONE;
|
||||
|
||||
if (cpu_has_mips_r2 && !(read_c0_cause() & (1 << 26)))
|
||||
return handled;
|
||||
|
||||
switch (counters) {
|
||||
#define HANDLE_COUNTER(n) \
|
||||
|
@ -188,7 +191,7 @@ static int mipsxx_perfcount_handler(void)
|
|||
(counter & M_COUNTER_OVERFLOW)) { \
|
||||
oprofile_add_sample(get_irq_regs(), n); \
|
||||
w_c0_perfcntr ## n(reg.counter[n]); \
|
||||
handled = 1; \
|
||||
handled = IRQ_HANDLED; \
|
||||
}
|
||||
HANDLE_COUNTER(3)
|
||||
HANDLE_COUNTER(2)
|
||||
|
|
Loading…
Reference in New Issue