perf/x86/cqm: Factor out some common code
Having the same code twice (and once quite ugly) is fragile. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: David Ahern <dsahern@gmail.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vince Weaver <vincent.weaver@maine.edu> Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
e7ee3e8cb5
commit
27348f382b
|
@ -463,6 +463,14 @@ static bool is_mbm_event(int e)
|
|||
return (e >= QOS_MBM_TOTAL_EVENT_ID && e <= QOS_MBM_LOCAL_EVENT_ID);
|
||||
}
|
||||
|
||||
static void cqm_mask_call(struct rmid_read *rr)
|
||||
{
|
||||
if (is_mbm_event(rr->evt_type))
|
||||
on_each_cpu_mask(&cqm_cpumask, __intel_mbm_event_count, rr, 1);
|
||||
else
|
||||
on_each_cpu_mask(&cqm_cpumask, __intel_cqm_event_count, rr, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Exchange the RMID of a group of events.
|
||||
*/
|
||||
|
@ -479,18 +487,12 @@ static u32 intel_cqm_xchg_rmid(struct perf_event *group, u32 rmid)
|
|||
*/
|
||||
if (__rmid_valid(old_rmid) && !__rmid_valid(rmid)) {
|
||||
struct rmid_read rr = {
|
||||
.value = ATOMIC64_INIT(0),
|
||||
.rmid = old_rmid,
|
||||
.evt_type = group->attr.config,
|
||||
.value = ATOMIC64_INIT(0),
|
||||
};
|
||||
|
||||
if (is_mbm_event(group->attr.config)) {
|
||||
rr.evt_type = group->attr.config;
|
||||
on_each_cpu_mask(&cqm_cpumask, __intel_mbm_event_count,
|
||||
&rr, 1);
|
||||
} else {
|
||||
on_each_cpu_mask(&cqm_cpumask, __intel_cqm_event_count,
|
||||
&rr, 1);
|
||||
}
|
||||
cqm_mask_call(&rr);
|
||||
local64_set(&group->count, atomic64_read(&rr.value));
|
||||
}
|
||||
|
||||
|
@ -1180,6 +1182,7 @@ static u64 intel_cqm_event_count(struct perf_event *event)
|
|||
{
|
||||
unsigned long flags;
|
||||
struct rmid_read rr = {
|
||||
.evt_type = event->attr.config,
|
||||
.value = ATOMIC64_INIT(0),
|
||||
};
|
||||
|
||||
|
@ -1229,12 +1232,7 @@ static u64 intel_cqm_event_count(struct perf_event *event)
|
|||
if (!__rmid_valid(rr.rmid))
|
||||
goto out;
|
||||
|
||||
if (is_mbm_event(event->attr.config)) {
|
||||
rr.evt_type = event->attr.config;
|
||||
on_each_cpu_mask(&cqm_cpumask, __intel_mbm_event_count, &rr, 1);
|
||||
} else {
|
||||
on_each_cpu_mask(&cqm_cpumask, __intel_cqm_event_count, &rr, 1);
|
||||
}
|
||||
cqm_mask_call(&rr);
|
||||
|
||||
raw_spin_lock_irqsave(&cache_lock, flags);
|
||||
if (event->hw.cqm_rmid == rr.rmid)
|
||||
|
|
Loading…
Reference in New Issue