perf_counter: powerpc: add nmi_enter/nmi_exit calls
Impact: fix potential deadlocks on powerpc
Now that the core is using in_nmi() (added in e30e08f6
, "perf_counter:
fix NMI race in task clock"), we need the powerpc perf_counter_interrupt
to call nmi_enter() and nmi_exit() in those cases where the interrupt
happens when interrupts are soft-disabled.
If interrupts were soft-enabled, we can treat it as a regular interrupt
and do irq_enter/irq_exit around the whole routine. This lets us get rid
of the test_perf_counter_pending() call at the end of
perf_counter_interrupt, thus simplifying things a little.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <18909.31952.873098.336615@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
6c0b324435
commit
ca8f2d7f01
|
@ -714,7 +714,7 @@ hw_perf_counter_init(struct perf_counter *counter)
|
||||||
* here so there is no possibility of being interrupted.
|
* here so there is no possibility of being interrupted.
|
||||||
*/
|
*/
|
||||||
static void record_and_restart(struct perf_counter *counter, long val,
|
static void record_and_restart(struct perf_counter *counter, long val,
|
||||||
struct pt_regs *regs)
|
struct pt_regs *regs, int nmi)
|
||||||
{
|
{
|
||||||
s64 prev, delta, left;
|
s64 prev, delta, left;
|
||||||
int record = 0;
|
int record = 0;
|
||||||
|
@ -749,7 +749,7 @@ static void record_and_restart(struct perf_counter *counter, long val,
|
||||||
* Finally record data if requested.
|
* Finally record data if requested.
|
||||||
*/
|
*/
|
||||||
if (record)
|
if (record)
|
||||||
perf_counter_overflow(counter, 1, regs, 0);
|
perf_counter_overflow(counter, nmi, regs, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -762,6 +762,17 @@ static void perf_counter_interrupt(struct pt_regs *regs)
|
||||||
struct perf_counter *counter;
|
struct perf_counter *counter;
|
||||||
long val;
|
long val;
|
||||||
int found = 0;
|
int found = 0;
|
||||||
|
int nmi;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If interrupts were soft-disabled when this PMU interrupt
|
||||||
|
* occurred, treat it as an NMI.
|
||||||
|
*/
|
||||||
|
nmi = !regs->softe;
|
||||||
|
if (nmi)
|
||||||
|
nmi_enter();
|
||||||
|
else
|
||||||
|
irq_enter();
|
||||||
|
|
||||||
for (i = 0; i < cpuhw->n_counters; ++i) {
|
for (i = 0; i < cpuhw->n_counters; ++i) {
|
||||||
counter = cpuhw->counter[i];
|
counter = cpuhw->counter[i];
|
||||||
|
@ -769,7 +780,7 @@ static void perf_counter_interrupt(struct pt_regs *regs)
|
||||||
if ((int)val < 0) {
|
if ((int)val < 0) {
|
||||||
/* counter has overflowed */
|
/* counter has overflowed */
|
||||||
found = 1;
|
found = 1;
|
||||||
record_and_restart(counter, val, regs);
|
record_and_restart(counter, val, regs, nmi);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -796,19 +807,11 @@ static void perf_counter_interrupt(struct pt_regs *regs)
|
||||||
*/
|
*/
|
||||||
mtspr(SPRN_MMCR0, cpuhw->mmcr[0]);
|
mtspr(SPRN_MMCR0, cpuhw->mmcr[0]);
|
||||||
|
|
||||||
/*
|
if (nmi)
|
||||||
* If we need a wakeup, check whether interrupts were soft-enabled
|
nmi_exit();
|
||||||
* when we took the interrupt. If they were, we can wake stuff up
|
else
|
||||||
* immediately; otherwise we'll have do the wakeup when interrupts
|
|
||||||
* get soft-enabled.
|
|
||||||
*/
|
|
||||||
if (test_perf_counter_pending() && regs->softe) {
|
|
||||||
irq_enter();
|
|
||||||
clear_perf_counter_pending();
|
|
||||||
perf_counter_do_pending();
|
|
||||||
irq_exit();
|
irq_exit();
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
void hw_perf_counter_setup(int cpu)
|
void hw_perf_counter_setup(int cpu)
|
||||||
{
|
{
|
||||||
|
|
Loading…
Reference in New Issue