perf_events, x86: Clean up hw_perf_*_all() implementation
Put the recursion avoidance code in the generic hook instead of replicating it in each implementation. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Stephane Eranian <eranian@google.com> LKML-Reference: <20100127221122.057507285@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
ed8777fc13
commit
1a6e21f791
|
@ -1099,15 +1099,8 @@ static int __hw_perf_event_init(struct perf_event *event)
|
||||||
|
|
||||||
static void p6_pmu_disable_all(void)
|
static void p6_pmu_disable_all(void)
|
||||||
{
|
{
|
||||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
|
||||||
u64 val;
|
u64 val;
|
||||||
|
|
||||||
if (!cpuc->enabled)
|
|
||||||
return;
|
|
||||||
|
|
||||||
cpuc->enabled = 0;
|
|
||||||
barrier();
|
|
||||||
|
|
||||||
/* p6 only has one enable register */
|
/* p6 only has one enable register */
|
||||||
rdmsrl(MSR_P6_EVNTSEL0, val);
|
rdmsrl(MSR_P6_EVNTSEL0, val);
|
||||||
val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
|
val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
|
||||||
|
@ -1118,12 +1111,6 @@ static void intel_pmu_disable_all(void)
|
||||||
{
|
{
|
||||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
|
|
||||||
if (!cpuc->enabled)
|
|
||||||
return;
|
|
||||||
|
|
||||||
cpuc->enabled = 0;
|
|
||||||
barrier();
|
|
||||||
|
|
||||||
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
|
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
|
||||||
|
|
||||||
if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
|
if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
|
||||||
|
@ -1135,17 +1122,6 @@ static void amd_pmu_disable_all(void)
|
||||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
int idx;
|
int idx;
|
||||||
|
|
||||||
if (!cpuc->enabled)
|
|
||||||
return;
|
|
||||||
|
|
||||||
cpuc->enabled = 0;
|
|
||||||
/*
|
|
||||||
* ensure we write the disable before we start disabling the
|
|
||||||
* events proper, so that amd_pmu_enable_event() does the
|
|
||||||
* right thing.
|
|
||||||
*/
|
|
||||||
barrier();
|
|
||||||
|
|
||||||
for (idx = 0; idx < x86_pmu.num_events; idx++) {
|
for (idx = 0; idx < x86_pmu.num_events; idx++) {
|
||||||
u64 val;
|
u64 val;
|
||||||
|
|
||||||
|
@ -1166,23 +1142,20 @@ void hw_perf_disable(void)
|
||||||
if (!x86_pmu_initialized())
|
if (!x86_pmu_initialized())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (cpuc->enabled)
|
if (!cpuc->enabled)
|
||||||
|
return;
|
||||||
|
|
||||||
cpuc->n_added = 0;
|
cpuc->n_added = 0;
|
||||||
|
cpuc->enabled = 0;
|
||||||
|
barrier();
|
||||||
|
|
||||||
x86_pmu.disable_all();
|
x86_pmu.disable_all();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void p6_pmu_enable_all(void)
|
static void p6_pmu_enable_all(void)
|
||||||
{
|
{
|
||||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
|
||||||
unsigned long val;
|
unsigned long val;
|
||||||
|
|
||||||
if (cpuc->enabled)
|
|
||||||
return;
|
|
||||||
|
|
||||||
cpuc->enabled = 1;
|
|
||||||
barrier();
|
|
||||||
|
|
||||||
/* p6 only has one enable register */
|
/* p6 only has one enable register */
|
||||||
rdmsrl(MSR_P6_EVNTSEL0, val);
|
rdmsrl(MSR_P6_EVNTSEL0, val);
|
||||||
val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
|
val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
|
||||||
|
@ -1193,12 +1166,6 @@ static void intel_pmu_enable_all(void)
|
||||||
{
|
{
|
||||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
|
|
||||||
if (cpuc->enabled)
|
|
||||||
return;
|
|
||||||
|
|
||||||
cpuc->enabled = 1;
|
|
||||||
barrier();
|
|
||||||
|
|
||||||
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
|
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
|
||||||
|
|
||||||
if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
|
if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
|
||||||
|
@ -1217,12 +1184,6 @@ static void amd_pmu_enable_all(void)
|
||||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
int idx;
|
int idx;
|
||||||
|
|
||||||
if (cpuc->enabled)
|
|
||||||
return;
|
|
||||||
|
|
||||||
cpuc->enabled = 1;
|
|
||||||
barrier();
|
|
||||||
|
|
||||||
for (idx = 0; idx < x86_pmu.num_events; idx++) {
|
for (idx = 0; idx < x86_pmu.num_events; idx++) {
|
||||||
struct perf_event *event = cpuc->events[idx];
|
struct perf_event *event = cpuc->events[idx];
|
||||||
u64 val;
|
u64 val;
|
||||||
|
@ -1417,6 +1378,10 @@ void hw_perf_enable(void)
|
||||||
|
|
||||||
if (!x86_pmu_initialized())
|
if (!x86_pmu_initialized())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
if (cpuc->enabled)
|
||||||
|
return;
|
||||||
|
|
||||||
if (cpuc->n_added) {
|
if (cpuc->n_added) {
|
||||||
/*
|
/*
|
||||||
* apply assignment obtained either from
|
* apply assignment obtained either from
|
||||||
|
@ -1461,6 +1426,10 @@ void hw_perf_enable(void)
|
||||||
cpuc->n_added = 0;
|
cpuc->n_added = 0;
|
||||||
perf_events_lapic_init();
|
perf_events_lapic_init();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cpuc->enabled = 1;
|
||||||
|
barrier();
|
||||||
|
|
||||||
x86_pmu.enable_all();
|
x86_pmu.enable_all();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue