perf: Fix race between event install and jump_labels
perf_install_in_context() relies upon the context switch hooks to have scheduled in events when the IPI misses its target -- after all, if the task has moved from the CPU (or wasn't running at all), it will have to context switch to run elsewhere. This however doesn't appear to be happening. It is possible for the IPI to not happen (task wasn't running) only to later observe the task running with an inactive context. The only possible explanation is that the context switch hooks are not called. Therefore put in a sync_sched() after toggling the jump_label to guarantee all CPUs will have them enabled before we install an event. A simple if (0->1) sync_sched() will not in fact work, because any further increment can race and complete before the sync_sched(). Therefore we must jump through some hoops. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: dvyukov@google.com Cc: eranian@google.com Cc: oleg@redhat.com Cc: panand@redhat.com Cc: sasha.levin@oracle.com Cc: vince@deater.net Link: http://lkml.kernel.org/r/20160224174947.980211985@infradead.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
a69b0ca4ac
commit
9107c89e26
|
@ -906,7 +906,7 @@ perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
extern struct static_key_deferred perf_sched_events;
|
extern struct static_key_false perf_sched_events;
|
||||||
|
|
||||||
static __always_inline bool
|
static __always_inline bool
|
||||||
perf_sw_migrate_enabled(void)
|
perf_sw_migrate_enabled(void)
|
||||||
|
@ -925,7 +925,7 @@ static inline void perf_event_task_migrate(struct task_struct *task)
|
||||||
static inline void perf_event_task_sched_in(struct task_struct *prev,
|
static inline void perf_event_task_sched_in(struct task_struct *prev,
|
||||||
struct task_struct *task)
|
struct task_struct *task)
|
||||||
{
|
{
|
||||||
if (static_key_false(&perf_sched_events.key))
|
if (static_branch_unlikely(&perf_sched_events))
|
||||||
__perf_event_task_sched_in(prev, task);
|
__perf_event_task_sched_in(prev, task);
|
||||||
|
|
||||||
if (perf_sw_migrate_enabled() && task->sched_migrated) {
|
if (perf_sw_migrate_enabled() && task->sched_migrated) {
|
||||||
|
@ -942,7 +942,7 @@ static inline void perf_event_task_sched_out(struct task_struct *prev,
|
||||||
{
|
{
|
||||||
perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
|
perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
|
||||||
|
|
||||||
if (static_key_false(&perf_sched_events.key))
|
if (static_branch_unlikely(&perf_sched_events))
|
||||||
__perf_event_task_sched_out(prev, next);
|
__perf_event_task_sched_out(prev, next);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -321,7 +321,13 @@ enum event_type_t {
|
||||||
* perf_sched_events : >0 events exist
|
* perf_sched_events : >0 events exist
|
||||||
* perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
|
* perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
|
||||||
*/
|
*/
|
||||||
struct static_key_deferred perf_sched_events __read_mostly;
|
|
||||||
|
static void perf_sched_delayed(struct work_struct *work);
|
||||||
|
DEFINE_STATIC_KEY_FALSE(perf_sched_events);
|
||||||
|
static DECLARE_DELAYED_WORK(perf_sched_work, perf_sched_delayed);
|
||||||
|
static DEFINE_MUTEX(perf_sched_mutex);
|
||||||
|
static atomic_t perf_sched_count;
|
||||||
|
|
||||||
static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
|
static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
|
||||||
static DEFINE_PER_CPU(int, perf_sched_cb_usages);
|
static DEFINE_PER_CPU(int, perf_sched_cb_usages);
|
||||||
|
|
||||||
|
@ -3536,12 +3542,22 @@ static void unaccount_event(struct perf_event *event)
|
||||||
if (has_branch_stack(event))
|
if (has_branch_stack(event))
|
||||||
dec = true;
|
dec = true;
|
||||||
|
|
||||||
if (dec)
|
if (dec) {
|
||||||
static_key_slow_dec_deferred(&perf_sched_events);
|
if (!atomic_add_unless(&perf_sched_count, -1, 1))
|
||||||
|
schedule_delayed_work(&perf_sched_work, HZ);
|
||||||
|
}
|
||||||
|
|
||||||
unaccount_event_cpu(event, event->cpu);
|
unaccount_event_cpu(event, event->cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void perf_sched_delayed(struct work_struct *work)
|
||||||
|
{
|
||||||
|
mutex_lock(&perf_sched_mutex);
|
||||||
|
if (atomic_dec_and_test(&perf_sched_count))
|
||||||
|
static_branch_disable(&perf_sched_events);
|
||||||
|
mutex_unlock(&perf_sched_mutex);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The following implement mutual exclusion of events on "exclusive" pmus
|
* The following implement mutual exclusion of events on "exclusive" pmus
|
||||||
* (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled
|
* (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled
|
||||||
|
@ -7780,8 +7796,28 @@ static void account_event(struct perf_event *event)
|
||||||
if (is_cgroup_event(event))
|
if (is_cgroup_event(event))
|
||||||
inc = true;
|
inc = true;
|
||||||
|
|
||||||
if (inc)
|
if (inc) {
|
||||||
static_key_slow_inc(&perf_sched_events.key);
|
if (atomic_inc_not_zero(&perf_sched_count))
|
||||||
|
goto enabled;
|
||||||
|
|
||||||
|
mutex_lock(&perf_sched_mutex);
|
||||||
|
if (!atomic_read(&perf_sched_count)) {
|
||||||
|
static_branch_enable(&perf_sched_events);
|
||||||
|
/*
|
||||||
|
* Guarantee that all CPUs observe they key change and
|
||||||
|
* call the perf scheduling hooks before proceeding to
|
||||||
|
* install events that need them.
|
||||||
|
*/
|
||||||
|
synchronize_sched();
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* Now that we have waited for the sync_sched(), allow further
|
||||||
|
* increments to by-pass the mutex.
|
||||||
|
*/
|
||||||
|
atomic_inc(&perf_sched_count);
|
||||||
|
mutex_unlock(&perf_sched_mutex);
|
||||||
|
}
|
||||||
|
enabled:
|
||||||
|
|
||||||
account_event_cpu(event, event->cpu);
|
account_event_cpu(event, event->cpu);
|
||||||
}
|
}
|
||||||
|
@ -9344,9 +9380,6 @@ void __init perf_event_init(void)
|
||||||
ret = init_hw_breakpoint();
|
ret = init_hw_breakpoint();
|
||||||
WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
|
WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
|
||||||
|
|
||||||
/* do not patch jump label more than once per second */
|
|
||||||
jump_label_rate_limit(&perf_sched_events, HZ);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Build time assertion that we keep the data_head at the intended
|
* Build time assertion that we keep the data_head at the intended
|
||||||
* location. IOW, validation we got the __reserved[] size right.
|
* location. IOW, validation we got the __reserved[] size right.
|
||||||
|
|
Loading…
Reference in New Issue