perf/x86/uncore: Fix event group support
The events in the same group don't start or stop simultaneously.
Here is the ftrace when enabling event group for uncore_iio_0:
# perf stat -e "{uncore_iio_0/event=0x1/,uncore_iio_0/event=0xe/}"
<idle>-0 [000] d.h. 8959.064832: read_msr: a41, value
b2b0b030 //Read counter reg of IIO unit0 counter0
<idle>-0 [000] d.h. 8959.064835: write_msr: a48, value
400001 //Write Ctrl reg of IIO unit0 counter0 to enable
counter0. <------ Although counter0 is enabled, Unit Ctrl is still
freezed. Nothing will count. We are still good here.
<idle>-0 [000] d.h. 8959.064836: read_msr: a40, value
30100 //Read Unit Ctrl reg of IIO unit0
<idle>-0 [000] d.h. 8959.064838: write_msr: a40, value
30000 //Write Unit Ctrl reg of IIO unit0 to enable all
counters in the unit by clear Freeze bit <------Unit0 is un-freezed.
Counter0 has been enabled. Now it starts counting. But counter1 has not
been enabled yet. The issue starts here.
<idle>-0 [000] d.h. 8959.064846: read_msr: a42, value 0
//Read counter reg of IIO unit0 counter1
<idle>-0 [000] d.h. 8959.064847: write_msr: a49, value
40000e //Write Ctrl reg of IIO unit0 counter1 to enable
counter1. <------ Now, counter1 just starts to count. Counter0 has
been running for a while.
Current code un-freezes the Unit Ctrl right after the first counter is
enabled. The subsequent group events always loses some counter values.
Implement pmu_enable and pmu_disable support for uncore, which can help
to batch hardware accesses.
No one uses uncore_enable_box and uncore_disable_box. Remove them.
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: linux-drivers-review@eclists.intel.com
Cc: linux-perf@eclists.intel.com
Fixes: 087bfbb032
("perf/x86: Add generic Intel uncore PMU support")
Link: https://lkml.kernel.org/r/1572014593-31591-1-git-send-email-kan.liang@linux.intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
e431e79b60
commit
75be6f703a
|
@ -502,10 +502,8 @@ void uncore_pmu_event_start(struct perf_event *event, int flags)
|
||||||
local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
|
local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
|
||||||
uncore_enable_event(box, event);
|
uncore_enable_event(box, event);
|
||||||
|
|
||||||
if (box->n_active == 1) {
|
if (box->n_active == 1)
|
||||||
uncore_enable_box(box);
|
|
||||||
uncore_pmu_start_hrtimer(box);
|
uncore_pmu_start_hrtimer(box);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void uncore_pmu_event_stop(struct perf_event *event, int flags)
|
void uncore_pmu_event_stop(struct perf_event *event, int flags)
|
||||||
|
@ -529,10 +527,8 @@ void uncore_pmu_event_stop(struct perf_event *event, int flags)
|
||||||
WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
|
WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
|
||||||
hwc->state |= PERF_HES_STOPPED;
|
hwc->state |= PERF_HES_STOPPED;
|
||||||
|
|
||||||
if (box->n_active == 0) {
|
if (box->n_active == 0)
|
||||||
uncore_disable_box(box);
|
|
||||||
uncore_pmu_cancel_hrtimer(box);
|
uncore_pmu_cancel_hrtimer(box);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
|
if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
|
||||||
|
@ -778,6 +774,40 @@ static int uncore_pmu_event_init(struct perf_event *event)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void uncore_pmu_enable(struct pmu *pmu)
|
||||||
|
{
|
||||||
|
struct intel_uncore_pmu *uncore_pmu;
|
||||||
|
struct intel_uncore_box *box;
|
||||||
|
|
||||||
|
uncore_pmu = container_of(pmu, struct intel_uncore_pmu, pmu);
|
||||||
|
if (!uncore_pmu)
|
||||||
|
return;
|
||||||
|
|
||||||
|
box = uncore_pmu_to_box(uncore_pmu, smp_processor_id());
|
||||||
|
if (!box)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (uncore_pmu->type->ops->enable_box)
|
||||||
|
uncore_pmu->type->ops->enable_box(box);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void uncore_pmu_disable(struct pmu *pmu)
|
||||||
|
{
|
||||||
|
struct intel_uncore_pmu *uncore_pmu;
|
||||||
|
struct intel_uncore_box *box;
|
||||||
|
|
||||||
|
uncore_pmu = container_of(pmu, struct intel_uncore_pmu, pmu);
|
||||||
|
if (!uncore_pmu)
|
||||||
|
return;
|
||||||
|
|
||||||
|
box = uncore_pmu_to_box(uncore_pmu, smp_processor_id());
|
||||||
|
if (!box)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (uncore_pmu->type->ops->disable_box)
|
||||||
|
uncore_pmu->type->ops->disable_box(box);
|
||||||
|
}
|
||||||
|
|
||||||
static ssize_t uncore_get_attr_cpumask(struct device *dev,
|
static ssize_t uncore_get_attr_cpumask(struct device *dev,
|
||||||
struct device_attribute *attr, char *buf)
|
struct device_attribute *attr, char *buf)
|
||||||
{
|
{
|
||||||
|
@ -803,6 +833,8 @@ static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
|
||||||
pmu->pmu = (struct pmu) {
|
pmu->pmu = (struct pmu) {
|
||||||
.attr_groups = pmu->type->attr_groups,
|
.attr_groups = pmu->type->attr_groups,
|
||||||
.task_ctx_nr = perf_invalid_context,
|
.task_ctx_nr = perf_invalid_context,
|
||||||
|
.pmu_enable = uncore_pmu_enable,
|
||||||
|
.pmu_disable = uncore_pmu_disable,
|
||||||
.event_init = uncore_pmu_event_init,
|
.event_init = uncore_pmu_event_init,
|
||||||
.add = uncore_pmu_event_add,
|
.add = uncore_pmu_event_add,
|
||||||
.del = uncore_pmu_event_del,
|
.del = uncore_pmu_event_del,
|
||||||
|
|
|
@ -441,18 +441,6 @@ static inline int uncore_freerunning_hw_config(struct intel_uncore_box *box,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void uncore_disable_box(struct intel_uncore_box *box)
|
|
||||||
{
|
|
||||||
if (box->pmu->type->ops->disable_box)
|
|
||||||
box->pmu->type->ops->disable_box(box);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void uncore_enable_box(struct intel_uncore_box *box)
|
|
||||||
{
|
|
||||||
if (box->pmu->type->ops->enable_box)
|
|
||||||
box->pmu->type->ops->enable_box(box);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void uncore_disable_event(struct intel_uncore_box *box,
|
static inline void uncore_disable_event(struct intel_uncore_box *box,
|
||||||
struct perf_event *event)
|
struct perf_event *event)
|
||||||
{
|
{
|
||||||
|
|
Loading…
Reference in New Issue