perf: Implement finer grained full dynticks kick
Currently the full dynticks subsystem keep the tick alive as long as there are perf events running. This prevents the tick from being stopped as long as features such that the lockup detectors are running. As a temporary fix, the lockup detector is disabled by default when full dynticks is built but this is not a long term viable solution. To fix this, only keep the tick alive when an event configured with a frequency rather than a period is running on the CPU, or when an event throttles on the CPU. These are the only purposes of the perf tick, especially now that the rotation of flexible events is handled from a seperate hrtimer. The tick can be shutdown the rest of the time. Original-patch-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Stephane Eranian <eranian@google.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1374539466-4799-8-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
ba8a75c16e
commit
d84153d6c9
|
@ -870,12 +870,8 @@ static void perf_pmu_rotate_start(struct pmu *pmu)
|
|||
|
||||
WARN_ON(!irqs_disabled());
|
||||
|
||||
if (list_empty(&cpuctx->rotation_list)) {
|
||||
int was_empty = list_empty(head);
|
||||
if (list_empty(&cpuctx->rotation_list))
|
||||
list_add(&cpuctx->rotation_list, head);
|
||||
if (was_empty)
|
||||
tick_nohz_full_kick();
|
||||
}
|
||||
}
|
||||
|
||||
static void get_ctx(struct perf_event_context *ctx)
|
||||
|
@ -1875,6 +1871,9 @@ static int __perf_install_in_context(void *info)
|
|||
perf_pmu_enable(cpuctx->ctx.pmu);
|
||||
perf_ctx_unlock(cpuctx, task_ctx);
|
||||
|
||||
if (atomic_read(&__get_cpu_var(perf_freq_events)))
|
||||
tick_nohz_full_kick();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2812,10 +2811,11 @@ done:
|
|||
#ifdef CONFIG_NO_HZ_FULL
|
||||
bool perf_event_can_stop_tick(void)
|
||||
{
|
||||
if (list_empty(&__get_cpu_var(rotation_list)))
|
||||
return true;
|
||||
else
|
||||
if (atomic_read(&__get_cpu_var(perf_freq_events)) ||
|
||||
__this_cpu_read(perf_throttled_count))
|
||||
return false;
|
||||
else
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -5202,6 +5202,7 @@ static int __perf_event_overflow(struct perf_event *event,
|
|||
__this_cpu_inc(perf_throttled_count);
|
||||
hwc->interrupts = MAX_INTERRUPTS;
|
||||
perf_log_throttle(event, 0);
|
||||
tick_nohz_full_kick();
|
||||
ret = 1;
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue