perf/x86/intel/uncore: Store box in event->pmu_private
Store the PMU pointer in event->pmu_private, so we can get rid of the per CPU data storage. We keep it after converting to per package data, because a CPU to package lookup will be 3 loads versus one and these usage sites are in the perf fast path. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andi Kleen <andi.kleen@intel.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Harish Chegondi <harish.chegondi@intel.com> Cc: Jacob Pan <jacob.jun.pan@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Kan Liang <kan.liang@intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Vince Weaver <vincent.weaver@maine.edu> Cc: linux-kernel@vger.kernel.org Link: http://lkml.kernel.org/r/20160222221011.460851335@linutronix.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
54d751d4ad
commit
1f2569fac6
|
@ -92,11 +92,6 @@ ssize_t uncore_event_show(struct kobject *kobj,
|
|||
return sprintf(buf, "%s", event->config);
|
||||
}
|
||||
|
||||
struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
|
||||
{
|
||||
return container_of(event->pmu, struct intel_uncore_pmu, pmu);
|
||||
}
|
||||
|
||||
struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
|
||||
{
|
||||
struct intel_uncore_box *box;
|
||||
|
@ -122,15 +117,6 @@ out:
|
|||
return *per_cpu_ptr(pmu->box, cpu);
|
||||
}
|
||||
|
||||
struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
|
||||
{
|
||||
/*
|
||||
* perf core schedules event on the basis of cpu, uncore events are
|
||||
* collected by one of the cpus inside a physical package.
|
||||
*/
|
||||
return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id());
|
||||
}
|
||||
|
||||
u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
|
||||
{
|
||||
u64 count;
|
||||
|
@ -690,6 +676,7 @@ static int uncore_pmu_event_init(struct perf_event *event)
|
|||
if (!box || box->cpu < 0)
|
||||
return -EINVAL;
|
||||
event->cpu = box->cpu;
|
||||
event->pmu_private = box;
|
||||
|
||||
event->hw.idx = -1;
|
||||
event->hw.last_tag = ~0ULL;
|
||||
|
|
|
@ -319,9 +319,17 @@ static inline bool uncore_box_is_fake(struct intel_uncore_box *box)
|
|||
return (box->phys_id < 0);
|
||||
}
|
||||
|
||||
struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event);
|
||||
static inline struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
|
||||
{
|
||||
return container_of(event->pmu, struct intel_uncore_pmu, pmu);
|
||||
}
|
||||
|
||||
static inline struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
|
||||
{
|
||||
return event->pmu_private;
|
||||
}
|
||||
|
||||
struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu);
|
||||
struct intel_uncore_box *uncore_event_to_box(struct perf_event *event);
|
||||
u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event);
|
||||
void uncore_pmu_start_hrtimer(struct intel_uncore_box *box);
|
||||
void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box);
|
||||
|
|
|
@ -313,6 +313,7 @@ static int snb_uncore_imc_event_init(struct perf_event *event)
|
|||
return -EINVAL;
|
||||
|
||||
event->cpu = box->cpu;
|
||||
event->pmu_private = box;
|
||||
|
||||
event->hw.idx = -1;
|
||||
event->hw.last_tag = ~0ULL;
|
||||
|
|
Loading…
Reference in New Issue