perf: Round robin flexible groups of events using list_rotate_left()

This is more proper that doing it through a list_for_each_entry()
that breaks after the first entry.

v2: Don't rotate pinned groups as its not needed to time share
them.

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
This commit is contained in:
Frederic Weisbecker 2010-01-09 21:05:28 +01:00
parent 5908cdc85e
commit e286417378
1 changed files with 5 additions and 14 deletions

View File

@ -1454,25 +1454,16 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
*/ */
static void rotate_ctx(struct perf_event_context *ctx) static void rotate_ctx(struct perf_event_context *ctx)
{ {
struct perf_event *event;
if (!ctx->nr_events) if (!ctx->nr_events)
return; return;
raw_spin_lock(&ctx->lock); raw_spin_lock(&ctx->lock);
/*
* Rotate the first entry last (works just fine for group events too):
*/
perf_disable();
list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
list_move_tail(&event->group_entry, &ctx->pinned_groups);
break;
}
list_for_each_entry(event, &ctx->flexible_groups, group_entry) { /* Rotate the first entry last of non-pinned groups */
list_move_tail(&event->group_entry, &ctx->flexible_groups); perf_disable();
break;
} list_rotate_left(&ctx->flexible_groups);
perf_enable(); perf_enable();
raw_spin_unlock(&ctx->lock); raw_spin_unlock(&ctx->lock);