perf_events: Simplify code by removing cpu argument to hw_perf_group_sched_in()
Since the cpu argument to hw_perf_group_sched_in() is always smp_processor_id(), simplify the code a little by removing this argument and using the current cpu where needed. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: David Miller <davem@davemloft.net> Cc: Paul Mackerras <paulus@samba.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> LKML-Reference: <1265890918.5396.3.camel@laptop> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
38331f62c2
commit
6e37738a2f
|
@ -718,10 +718,10 @@ static int collect_events(struct perf_event *group, int max_count,
|
||||||
return n;
|
return n;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void event_sched_in(struct perf_event *event, int cpu)
|
static void event_sched_in(struct perf_event *event)
|
||||||
{
|
{
|
||||||
event->state = PERF_EVENT_STATE_ACTIVE;
|
event->state = PERF_EVENT_STATE_ACTIVE;
|
||||||
event->oncpu = cpu;
|
event->oncpu = smp_processor_id();
|
||||||
event->tstamp_running += event->ctx->time - event->tstamp_stopped;
|
event->tstamp_running += event->ctx->time - event->tstamp_stopped;
|
||||||
if (is_software_event(event))
|
if (is_software_event(event))
|
||||||
event->pmu->enable(event);
|
event->pmu->enable(event);
|
||||||
|
@ -735,7 +735,7 @@ static void event_sched_in(struct perf_event *event, int cpu)
|
||||||
*/
|
*/
|
||||||
int hw_perf_group_sched_in(struct perf_event *group_leader,
|
int hw_perf_group_sched_in(struct perf_event *group_leader,
|
||||||
struct perf_cpu_context *cpuctx,
|
struct perf_cpu_context *cpuctx,
|
||||||
struct perf_event_context *ctx, int cpu)
|
struct perf_event_context *ctx)
|
||||||
{
|
{
|
||||||
struct cpu_hw_events *cpuhw;
|
struct cpu_hw_events *cpuhw;
|
||||||
long i, n, n0;
|
long i, n, n0;
|
||||||
|
@ -766,10 +766,10 @@ int hw_perf_group_sched_in(struct perf_event *group_leader,
|
||||||
cpuhw->event[i]->hw.config = cpuhw->events[i];
|
cpuhw->event[i]->hw.config = cpuhw->events[i];
|
||||||
cpuctx->active_oncpu += n;
|
cpuctx->active_oncpu += n;
|
||||||
n = 1;
|
n = 1;
|
||||||
event_sched_in(group_leader, cpu);
|
event_sched_in(group_leader);
|
||||||
list_for_each_entry(sub, &group_leader->sibling_list, group_entry) {
|
list_for_each_entry(sub, &group_leader->sibling_list, group_entry) {
|
||||||
if (sub->state != PERF_EVENT_STATE_OFF) {
|
if (sub->state != PERF_EVENT_STATE_OFF) {
|
||||||
event_sched_in(sub, cpu);
|
event_sched_in(sub);
|
||||||
++n;
|
++n;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -980,10 +980,10 @@ static int collect_events(struct perf_event *group, int max_count,
|
||||||
return n;
|
return n;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void event_sched_in(struct perf_event *event, int cpu)
|
static void event_sched_in(struct perf_event *event)
|
||||||
{
|
{
|
||||||
event->state = PERF_EVENT_STATE_ACTIVE;
|
event->state = PERF_EVENT_STATE_ACTIVE;
|
||||||
event->oncpu = cpu;
|
event->oncpu = smp_processor_id();
|
||||||
event->tstamp_running += event->ctx->time - event->tstamp_stopped;
|
event->tstamp_running += event->ctx->time - event->tstamp_stopped;
|
||||||
if (is_software_event(event))
|
if (is_software_event(event))
|
||||||
event->pmu->enable(event);
|
event->pmu->enable(event);
|
||||||
|
@ -991,7 +991,7 @@ static void event_sched_in(struct perf_event *event, int cpu)
|
||||||
|
|
||||||
int hw_perf_group_sched_in(struct perf_event *group_leader,
|
int hw_perf_group_sched_in(struct perf_event *group_leader,
|
||||||
struct perf_cpu_context *cpuctx,
|
struct perf_cpu_context *cpuctx,
|
||||||
struct perf_event_context *ctx, int cpu)
|
struct perf_event_context *ctx)
|
||||||
{
|
{
|
||||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
struct perf_event *sub;
|
struct perf_event *sub;
|
||||||
|
@ -1015,10 +1015,10 @@ int hw_perf_group_sched_in(struct perf_event *group_leader,
|
||||||
|
|
||||||
cpuctx->active_oncpu += n;
|
cpuctx->active_oncpu += n;
|
||||||
n = 1;
|
n = 1;
|
||||||
event_sched_in(group_leader, cpu);
|
event_sched_in(group_leader);
|
||||||
list_for_each_entry(sub, &group_leader->sibling_list, group_entry) {
|
list_for_each_entry(sub, &group_leader->sibling_list, group_entry) {
|
||||||
if (sub->state != PERF_EVENT_STATE_OFF) {
|
if (sub->state != PERF_EVENT_STATE_OFF) {
|
||||||
event_sched_in(sub, cpu);
|
event_sched_in(sub);
|
||||||
n++;
|
n++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -2403,12 +2403,12 @@ done:
|
||||||
}
|
}
|
||||||
|
|
||||||
static int x86_event_sched_in(struct perf_event *event,
|
static int x86_event_sched_in(struct perf_event *event,
|
||||||
struct perf_cpu_context *cpuctx, int cpu)
|
struct perf_cpu_context *cpuctx)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
event->state = PERF_EVENT_STATE_ACTIVE;
|
event->state = PERF_EVENT_STATE_ACTIVE;
|
||||||
event->oncpu = cpu;
|
event->oncpu = smp_processor_id();
|
||||||
event->tstamp_running += event->ctx->time - event->tstamp_stopped;
|
event->tstamp_running += event->ctx->time - event->tstamp_stopped;
|
||||||
|
|
||||||
if (!is_x86_event(event))
|
if (!is_x86_event(event))
|
||||||
|
@ -2424,7 +2424,7 @@ static int x86_event_sched_in(struct perf_event *event,
|
||||||
}
|
}
|
||||||
|
|
||||||
static void x86_event_sched_out(struct perf_event *event,
|
static void x86_event_sched_out(struct perf_event *event,
|
||||||
struct perf_cpu_context *cpuctx, int cpu)
|
struct perf_cpu_context *cpuctx)
|
||||||
{
|
{
|
||||||
event->state = PERF_EVENT_STATE_INACTIVE;
|
event->state = PERF_EVENT_STATE_INACTIVE;
|
||||||
event->oncpu = -1;
|
event->oncpu = -1;
|
||||||
|
@ -2452,9 +2452,9 @@ static void x86_event_sched_out(struct perf_event *event,
|
||||||
*/
|
*/
|
||||||
int hw_perf_group_sched_in(struct perf_event *leader,
|
int hw_perf_group_sched_in(struct perf_event *leader,
|
||||||
struct perf_cpu_context *cpuctx,
|
struct perf_cpu_context *cpuctx,
|
||||||
struct perf_event_context *ctx, int cpu)
|
struct perf_event_context *ctx)
|
||||||
{
|
{
|
||||||
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
struct perf_event *sub;
|
struct perf_event *sub;
|
||||||
int assign[X86_PMC_IDX_MAX];
|
int assign[X86_PMC_IDX_MAX];
|
||||||
int n0, n1, ret;
|
int n0, n1, ret;
|
||||||
|
@ -2468,14 +2468,14 @@ int hw_perf_group_sched_in(struct perf_event *leader,
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ret = x86_event_sched_in(leader, cpuctx, cpu);
|
ret = x86_event_sched_in(leader, cpuctx);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
n1 = 1;
|
n1 = 1;
|
||||||
list_for_each_entry(sub, &leader->sibling_list, group_entry) {
|
list_for_each_entry(sub, &leader->sibling_list, group_entry) {
|
||||||
if (sub->state > PERF_EVENT_STATE_OFF) {
|
if (sub->state > PERF_EVENT_STATE_OFF) {
|
||||||
ret = x86_event_sched_in(sub, cpuctx, cpu);
|
ret = x86_event_sched_in(sub, cpuctx);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto undo;
|
goto undo;
|
||||||
++n1;
|
++n1;
|
||||||
|
@ -2500,11 +2500,11 @@ int hw_perf_group_sched_in(struct perf_event *leader,
|
||||||
*/
|
*/
|
||||||
return 1;
|
return 1;
|
||||||
undo:
|
undo:
|
||||||
x86_event_sched_out(leader, cpuctx, cpu);
|
x86_event_sched_out(leader, cpuctx);
|
||||||
n0 = 1;
|
n0 = 1;
|
||||||
list_for_each_entry(sub, &leader->sibling_list, group_entry) {
|
list_for_each_entry(sub, &leader->sibling_list, group_entry) {
|
||||||
if (sub->state == PERF_EVENT_STATE_ACTIVE) {
|
if (sub->state == PERF_EVENT_STATE_ACTIVE) {
|
||||||
x86_event_sched_out(sub, cpuctx, cpu);
|
x86_event_sched_out(sub, cpuctx);
|
||||||
if (++n0 == n1)
|
if (++n0 == n1)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -772,7 +772,7 @@ extern int perf_event_task_disable(void);
|
||||||
extern int perf_event_task_enable(void);
|
extern int perf_event_task_enable(void);
|
||||||
extern int hw_perf_group_sched_in(struct perf_event *group_leader,
|
extern int hw_perf_group_sched_in(struct perf_event *group_leader,
|
||||||
struct perf_cpu_context *cpuctx,
|
struct perf_cpu_context *cpuctx,
|
||||||
struct perf_event_context *ctx, int cpu);
|
struct perf_event_context *ctx);
|
||||||
extern void perf_event_update_userpage(struct perf_event *event);
|
extern void perf_event_update_userpage(struct perf_event *event);
|
||||||
extern int perf_event_release_kernel(struct perf_event *event);
|
extern int perf_event_release_kernel(struct perf_event *event);
|
||||||
extern struct perf_event *
|
extern struct perf_event *
|
||||||
|
|
|
@ -103,7 +103,7 @@ void __weak hw_perf_event_setup_offline(int cpu) { barrier(); }
|
||||||
int __weak
|
int __weak
|
||||||
hw_perf_group_sched_in(struct perf_event *group_leader,
|
hw_perf_group_sched_in(struct perf_event *group_leader,
|
||||||
struct perf_cpu_context *cpuctx,
|
struct perf_cpu_context *cpuctx,
|
||||||
struct perf_event_context *ctx, int cpu)
|
struct perf_event_context *ctx)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -633,14 +633,13 @@ void perf_event_disable(struct perf_event *event)
|
||||||
static int
|
static int
|
||||||
event_sched_in(struct perf_event *event,
|
event_sched_in(struct perf_event *event,
|
||||||
struct perf_cpu_context *cpuctx,
|
struct perf_cpu_context *cpuctx,
|
||||||
struct perf_event_context *ctx,
|
struct perf_event_context *ctx)
|
||||||
int cpu)
|
|
||||||
{
|
{
|
||||||
if (event->state <= PERF_EVENT_STATE_OFF)
|
if (event->state <= PERF_EVENT_STATE_OFF)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
event->state = PERF_EVENT_STATE_ACTIVE;
|
event->state = PERF_EVENT_STATE_ACTIVE;
|
||||||
event->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
|
event->oncpu = smp_processor_id();
|
||||||
/*
|
/*
|
||||||
* The new state must be visible before we turn it on in the hardware:
|
* The new state must be visible before we turn it on in the hardware:
|
||||||
*/
|
*/
|
||||||
|
@ -667,8 +666,7 @@ event_sched_in(struct perf_event *event,
|
||||||
static int
|
static int
|
||||||
group_sched_in(struct perf_event *group_event,
|
group_sched_in(struct perf_event *group_event,
|
||||||
struct perf_cpu_context *cpuctx,
|
struct perf_cpu_context *cpuctx,
|
||||||
struct perf_event_context *ctx,
|
struct perf_event_context *ctx)
|
||||||
int cpu)
|
|
||||||
{
|
{
|
||||||
struct perf_event *event, *partial_group;
|
struct perf_event *event, *partial_group;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -676,18 +674,18 @@ group_sched_in(struct perf_event *group_event,
|
||||||
if (group_event->state == PERF_EVENT_STATE_OFF)
|
if (group_event->state == PERF_EVENT_STATE_OFF)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
ret = hw_perf_group_sched_in(group_event, cpuctx, ctx, cpu);
|
ret = hw_perf_group_sched_in(group_event, cpuctx, ctx);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret < 0 ? ret : 0;
|
return ret < 0 ? ret : 0;
|
||||||
|
|
||||||
if (event_sched_in(group_event, cpuctx, ctx, cpu))
|
if (event_sched_in(group_event, cpuctx, ctx))
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Schedule in siblings as one group (if any):
|
* Schedule in siblings as one group (if any):
|
||||||
*/
|
*/
|
||||||
list_for_each_entry(event, &group_event->sibling_list, group_entry) {
|
list_for_each_entry(event, &group_event->sibling_list, group_entry) {
|
||||||
if (event_sched_in(event, cpuctx, ctx, cpu)) {
|
if (event_sched_in(event, cpuctx, ctx)) {
|
||||||
partial_group = event;
|
partial_group = event;
|
||||||
goto group_error;
|
goto group_error;
|
||||||
}
|
}
|
||||||
|
@ -761,7 +759,6 @@ static void __perf_install_in_context(void *info)
|
||||||
struct perf_event *event = info;
|
struct perf_event *event = info;
|
||||||
struct perf_event_context *ctx = event->ctx;
|
struct perf_event_context *ctx = event->ctx;
|
||||||
struct perf_event *leader = event->group_leader;
|
struct perf_event *leader = event->group_leader;
|
||||||
int cpu = smp_processor_id();
|
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -808,7 +805,7 @@ static void __perf_install_in_context(void *info)
|
||||||
if (!group_can_go_on(event, cpuctx, 1))
|
if (!group_can_go_on(event, cpuctx, 1))
|
||||||
err = -EEXIST;
|
err = -EEXIST;
|
||||||
else
|
else
|
||||||
err = event_sched_in(event, cpuctx, ctx, cpu);
|
err = event_sched_in(event, cpuctx, ctx);
|
||||||
|
|
||||||
if (err) {
|
if (err) {
|
||||||
/*
|
/*
|
||||||
|
@ -950,11 +947,9 @@ static void __perf_event_enable(void *info)
|
||||||
} else {
|
} else {
|
||||||
perf_disable();
|
perf_disable();
|
||||||
if (event == leader)
|
if (event == leader)
|
||||||
err = group_sched_in(event, cpuctx, ctx,
|
err = group_sched_in(event, cpuctx, ctx);
|
||||||
smp_processor_id());
|
|
||||||
else
|
else
|
||||||
err = event_sched_in(event, cpuctx, ctx,
|
err = event_sched_in(event, cpuctx, ctx);
|
||||||
smp_processor_id());
|
|
||||||
perf_enable();
|
perf_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1281,19 +1276,18 @@ static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
|
||||||
|
|
||||||
static void
|
static void
|
||||||
ctx_pinned_sched_in(struct perf_event_context *ctx,
|
ctx_pinned_sched_in(struct perf_event_context *ctx,
|
||||||
struct perf_cpu_context *cpuctx,
|
struct perf_cpu_context *cpuctx)
|
||||||
int cpu)
|
|
||||||
{
|
{
|
||||||
struct perf_event *event;
|
struct perf_event *event;
|
||||||
|
|
||||||
list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
|
list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
|
||||||
if (event->state <= PERF_EVENT_STATE_OFF)
|
if (event->state <= PERF_EVENT_STATE_OFF)
|
||||||
continue;
|
continue;
|
||||||
if (event->cpu != -1 && event->cpu != cpu)
|
if (event->cpu != -1 && event->cpu != smp_processor_id())
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (group_can_go_on(event, cpuctx, 1))
|
if (group_can_go_on(event, cpuctx, 1))
|
||||||
group_sched_in(event, cpuctx, ctx, cpu);
|
group_sched_in(event, cpuctx, ctx);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If this pinned group hasn't been scheduled,
|
* If this pinned group hasn't been scheduled,
|
||||||
|
@ -1308,8 +1302,7 @@ ctx_pinned_sched_in(struct perf_event_context *ctx,
|
||||||
|
|
||||||
static void
|
static void
|
||||||
ctx_flexible_sched_in(struct perf_event_context *ctx,
|
ctx_flexible_sched_in(struct perf_event_context *ctx,
|
||||||
struct perf_cpu_context *cpuctx,
|
struct perf_cpu_context *cpuctx)
|
||||||
int cpu)
|
|
||||||
{
|
{
|
||||||
struct perf_event *event;
|
struct perf_event *event;
|
||||||
int can_add_hw = 1;
|
int can_add_hw = 1;
|
||||||
|
@ -1322,11 +1315,11 @@ ctx_flexible_sched_in(struct perf_event_context *ctx,
|
||||||
* Listen to the 'cpu' scheduling filter constraint
|
* Listen to the 'cpu' scheduling filter constraint
|
||||||
* of events:
|
* of events:
|
||||||
*/
|
*/
|
||||||
if (event->cpu != -1 && event->cpu != cpu)
|
if (event->cpu != -1 && event->cpu != smp_processor_id())
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (group_can_go_on(event, cpuctx, can_add_hw))
|
if (group_can_go_on(event, cpuctx, can_add_hw))
|
||||||
if (group_sched_in(event, cpuctx, ctx, cpu))
|
if (group_sched_in(event, cpuctx, ctx))
|
||||||
can_add_hw = 0;
|
can_add_hw = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1336,8 +1329,6 @@ ctx_sched_in(struct perf_event_context *ctx,
|
||||||
struct perf_cpu_context *cpuctx,
|
struct perf_cpu_context *cpuctx,
|
||||||
enum event_type_t event_type)
|
enum event_type_t event_type)
|
||||||
{
|
{
|
||||||
int cpu = smp_processor_id();
|
|
||||||
|
|
||||||
raw_spin_lock(&ctx->lock);
|
raw_spin_lock(&ctx->lock);
|
||||||
ctx->is_active = 1;
|
ctx->is_active = 1;
|
||||||
if (likely(!ctx->nr_events))
|
if (likely(!ctx->nr_events))
|
||||||
|
@ -1352,11 +1343,11 @@ ctx_sched_in(struct perf_event_context *ctx,
|
||||||
* in order to give them the best chance of going on.
|
* in order to give them the best chance of going on.
|
||||||
*/
|
*/
|
||||||
if (event_type & EVENT_PINNED)
|
if (event_type & EVENT_PINNED)
|
||||||
ctx_pinned_sched_in(ctx, cpuctx, cpu);
|
ctx_pinned_sched_in(ctx, cpuctx);
|
||||||
|
|
||||||
/* Then walk through the lower prio flexible groups */
|
/* Then walk through the lower prio flexible groups */
|
||||||
if (event_type & EVENT_FLEXIBLE)
|
if (event_type & EVENT_FLEXIBLE)
|
||||||
ctx_flexible_sched_in(ctx, cpuctx, cpu);
|
ctx_flexible_sched_in(ctx, cpuctx);
|
||||||
|
|
||||||
perf_enable();
|
perf_enable();
|
||||||
out:
|
out:
|
||||||
|
|
Loading…
Reference in New Issue