perf, x86: Use helper function in x86_pmu_enable_all()

Use helper function in x86_pmu_enable_all() to minimize access to
x86_pmu.eventsel in the fast path. The counter's msr address is now
calculated using struct hw_perf_event. Later we add code that
calculates the msr addresses with a table lookup which shouldn't be
done in the fast path.

Signed-off-by: Robert Richter <robert.richter@amd.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1296664860-10886-2-git-send-email-robert.richter@amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Robert Richter 2011-02-02 17:40:56 +01:00 committed by Ingo Molnar
parent 023695d96e
commit d45dd923fc
1 changed files with 8 additions and 11 deletions

View File

@ -642,21 +642,24 @@ static void x86_pmu_disable(struct pmu *pmu)
x86_pmu.disable_all();
}
static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
u64 enable_mask)
{
wrmsrl(hwc->config_base + hwc->idx, hwc->config | enable_mask);
}
static void x86_pmu_enable_all(int added)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx;
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
struct perf_event *event = cpuc->events[idx];
u64 val;
struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
if (!test_bit(idx, cpuc->active_mask))
continue;
val = event->hw.config;
val |= ARCH_PERFMON_EVENTSEL_ENABLE;
wrmsrl(x86_pmu.eventsel + idx, val);
__x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
}
}
@ -915,12 +918,6 @@ static void x86_pmu_enable(struct pmu *pmu)
x86_pmu.enable_all(added);
}
static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
u64 enable_mask)
{
wrmsrl(hwc->config_base + hwc->idx, hwc->config | enable_mask);
}
static inline void x86_pmu_disable_event(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;