perf: Collect the schedule-in rules in one function
This was scattered out - refactor it into a single function. No change in functionality. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/20110409192141.979862055@chello.nl Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
db24d33e08
commit
dce5855bba
|
@ -1476,6 +1476,18 @@ ctx_sched_in(struct perf_event_context *ctx,
|
||||||
enum event_type_t event_type,
|
enum event_type_t event_type,
|
||||||
struct task_struct *task);
|
struct task_struct *task);
|
||||||
|
|
||||||
|
static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
|
||||||
|
struct perf_event_context *ctx,
|
||||||
|
struct task_struct *task)
|
||||||
|
{
|
||||||
|
cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
|
||||||
|
if (ctx)
|
||||||
|
ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
|
||||||
|
cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
|
||||||
|
if (ctx)
|
||||||
|
ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Cross CPU call to install and enable a performance event
|
* Cross CPU call to install and enable a performance event
|
||||||
*
|
*
|
||||||
|
@ -1523,12 +1535,7 @@ static int __perf_install_in_context(void *info)
|
||||||
/*
|
/*
|
||||||
* Schedule everything back in
|
* Schedule everything back in
|
||||||
*/
|
*/
|
||||||
cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
|
perf_event_sched_in(cpuctx, task_ctx, task);
|
||||||
if (task_ctx)
|
|
||||||
ctx_sched_in(task_ctx, cpuctx, EVENT_PINNED, task);
|
|
||||||
cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
|
|
||||||
if (task_ctx)
|
|
||||||
ctx_sched_in(task_ctx, cpuctx, EVENT_FLEXIBLE, task);
|
|
||||||
|
|
||||||
perf_pmu_enable(cpuctx->ctx.pmu);
|
perf_pmu_enable(cpuctx->ctx.pmu);
|
||||||
perf_ctx_unlock(cpuctx, task_ctx);
|
perf_ctx_unlock(cpuctx, task_ctx);
|
||||||
|
@ -2107,9 +2114,7 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
|
||||||
*/
|
*/
|
||||||
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
|
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
|
||||||
|
|
||||||
ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
|
perf_event_sched_in(cpuctx, ctx, task);
|
||||||
cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
|
|
||||||
ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
|
|
||||||
|
|
||||||
cpuctx->task_ctx = ctx;
|
cpuctx->task_ctx = ctx;
|
||||||
|
|
||||||
|
@ -2347,9 +2352,7 @@ static void perf_rotate_context(struct perf_cpu_context *cpuctx)
|
||||||
if (ctx)
|
if (ctx)
|
||||||
rotate_ctx(ctx);
|
rotate_ctx(ctx);
|
||||||
|
|
||||||
cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, current);
|
perf_event_sched_in(cpuctx, ctx, current);
|
||||||
if (ctx)
|
|
||||||
ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, current);
|
|
||||||
|
|
||||||
done:
|
done:
|
||||||
if (remove)
|
if (remove)
|
||||||
|
|
Loading…
Reference in New Issue