perf/core: fix userpage->time_enabled of inactive events

Users of rdpmc rely on the mmapped user page to calculate accurate
time_enabled. Currently, userpage->time_enabled is only updated when the
event is added to the pmu. As a result, inactive event (due to counter
multiplexing) does not have accurate userpage->time_enabled. This can
be reproduced with something like:

   /* open 20 task perf_event "cycles", to create multiplexing */

   fd = perf_event_open();  /* open task perf_event "cycles" */
   userpage = mmap(fd);     /* use mmap and rdmpc */

   while (true) {
     time_enabled_mmap = xxx; /* use logic in perf_event_mmap_page */
     time_enabled_read = read(fd).time_enabled;
     if (time_enabled_mmap > time_enabled_read)
         BUG();
   }

Fix this by updating userpage for inactive events in merge_sched_in.

Suggested-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reported-and-tested-by: Lucian Grijincu <lucian@fb.com>
Signed-off-by: Song Liu <songliubraving@fb.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20210929194313.2398474-1-songliubraving@fb.com
This commit is contained in:
Song Liu 2021-09-29 12:43:13 -07:00 committed by Peter Zijlstra
parent ecc2123e09
commit f792565326
2 changed files with 33 additions and 5 deletions

View File

@ -683,7 +683,9 @@ struct perf_event {
/*
* timestamp shadows the actual context timing but it can
* be safely used in NMI interrupt context. It reflects the
* context time as it was when the event was last scheduled in.
* context time as it was when the event was last scheduled in,
* or when ctx_sched_in failed to schedule the event because we
* run out of PMC.
*
* ctx_time already accounts for ctx->timestamp. Therefore to
* compute ctx_time for a sample, simply add perf_clock().

View File

@ -3707,6 +3707,29 @@ static noinline int visit_groups_merge(struct perf_cpu_context *cpuctx,
return 0;
}
static inline bool event_update_userpage(struct perf_event *event)
{
if (likely(!atomic_read(&event->mmap_count)))
return false;
perf_event_update_time(event);
perf_set_shadow_time(event, event->ctx);
perf_event_update_userpage(event);
return true;
}
static inline void group_update_userpage(struct perf_event *group_event)
{
struct perf_event *event;
if (!event_update_userpage(group_event))
return;
for_each_sibling_event(event, group_event)
event_update_userpage(event);
}
static int merge_sched_in(struct perf_event *event, void *data)
{
struct perf_event_context *ctx = event->ctx;
@ -3725,14 +3748,15 @@ static int merge_sched_in(struct perf_event *event, void *data)
}
if (event->state == PERF_EVENT_STATE_INACTIVE) {
*can_add_hw = 0;
if (event->attr.pinned) {
perf_cgroup_event_disable(event, ctx);
perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
} else {
ctx->rotate_necessary = 1;
perf_mux_hrtimer_restart(cpuctx);
group_update_userpage(event);
}
*can_add_hw = 0;
ctx->rotate_necessary = 1;
perf_mux_hrtimer_restart(cpuctx);
}
return 0;
@ -6324,6 +6348,8 @@ accounting:
ring_buffer_attach(event, rb);
perf_event_update_time(event);
perf_set_shadow_time(event, event->ctx);
perf_event_init_userpage(event);
perf_event_update_userpage(event);
} else {