perf_counter: Change pctrl() behaviour
Instead of en/dis-abling all counters acting on a particular task, en/dis- able all counters we created. [ v2: fix crash on first counter enable ] Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: John Kacur <jkacur@redhat.com> LKML-Reference: <20090523163012.916937244@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
aa9c67f53d
commit
082ff5a276
|
@ -108,6 +108,15 @@ extern struct group_info init_groups;
|
|||
|
||||
extern struct cred init_cred;
|
||||
|
||||
#ifdef CONFIG_PERF_COUNTERS
|
||||
# define INIT_PERF_COUNTERS(tsk) \
|
||||
.perf_counter_mutex = \
|
||||
__MUTEX_INITIALIZER(tsk.perf_counter_mutex), \
|
||||
.perf_counter_list = LIST_HEAD_INIT(tsk.perf_counter_list),
|
||||
#else
|
||||
# define INIT_PERF_COUNTERS(tsk)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* INIT_TASK is used to set up the first task table, touch at
|
||||
* your own risk!. Base=0, limit=0x1fffff (=2MB)
|
||||
|
@ -171,6 +180,7 @@ extern struct cred init_cred;
|
|||
}, \
|
||||
.dirties = INIT_PROP_LOCAL_SINGLE(dirties), \
|
||||
INIT_IDS \
|
||||
INIT_PERF_COUNTERS(tsk) \
|
||||
INIT_TRACE_IRQFLAGS \
|
||||
INIT_LOCKDEP \
|
||||
INIT_FTRACE_GRAPH \
|
||||
|
|
|
@ -469,6 +469,9 @@ struct perf_counter {
|
|||
int oncpu;
|
||||
int cpu;
|
||||
|
||||
struct list_head owner_entry;
|
||||
struct task_struct *owner;
|
||||
|
||||
/* mmap bits */
|
||||
struct mutex mmap_mutex;
|
||||
atomic_t mmap_count;
|
||||
|
|
|
@ -1389,6 +1389,8 @@ struct task_struct {
|
|||
#endif
|
||||
#ifdef CONFIG_PERF_COUNTERS
|
||||
struct perf_counter_context *perf_counter_ctxp;
|
||||
struct mutex perf_counter_mutex;
|
||||
struct list_head perf_counter_list;
|
||||
#endif
|
||||
#ifdef CONFIG_NUMA
|
||||
struct mempolicy *mempolicy;
|
||||
|
|
|
@ -1076,79 +1076,26 @@ static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
|
|||
__perf_counter_sched_in(ctx, cpuctx, cpu);
|
||||
}
|
||||
|
||||
int perf_counter_task_disable(void)
|
||||
int perf_counter_task_enable(void)
|
||||
{
|
||||
struct task_struct *curr = current;
|
||||
struct perf_counter_context *ctx = curr->perf_counter_ctxp;
|
||||
struct perf_counter *counter;
|
||||
unsigned long flags;
|
||||
|
||||
if (!ctx || !ctx->nr_counters)
|
||||
return 0;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
__perf_counter_task_sched_out(ctx);
|
||||
|
||||
spin_lock(&ctx->lock);
|
||||
|
||||
/*
|
||||
* Disable all the counters:
|
||||
*/
|
||||
perf_disable();
|
||||
|
||||
list_for_each_entry(counter, &ctx->counter_list, list_entry) {
|
||||
if (counter->state != PERF_COUNTER_STATE_ERROR) {
|
||||
update_group_times(counter);
|
||||
counter->state = PERF_COUNTER_STATE_OFF;
|
||||
}
|
||||
}
|
||||
|
||||
perf_enable();
|
||||
|
||||
spin_unlock_irqrestore(&ctx->lock, flags);
|
||||
mutex_lock(¤t->perf_counter_mutex);
|
||||
list_for_each_entry(counter, ¤t->perf_counter_list, owner_entry)
|
||||
perf_counter_enable(counter);
|
||||
mutex_unlock(¤t->perf_counter_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int perf_counter_task_enable(void)
|
||||
int perf_counter_task_disable(void)
|
||||
{
|
||||
struct task_struct *curr = current;
|
||||
struct perf_counter_context *ctx = curr->perf_counter_ctxp;
|
||||
struct perf_counter *counter;
|
||||
unsigned long flags;
|
||||
int cpu;
|
||||
|
||||
if (!ctx || !ctx->nr_counters)
|
||||
return 0;
|
||||
|
||||
local_irq_save(flags);
|
||||
cpu = smp_processor_id();
|
||||
|
||||
__perf_counter_task_sched_out(ctx);
|
||||
|
||||
spin_lock(&ctx->lock);
|
||||
|
||||
/*
|
||||
* Disable all the counters:
|
||||
*/
|
||||
perf_disable();
|
||||
|
||||
list_for_each_entry(counter, &ctx->counter_list, list_entry) {
|
||||
if (counter->state > PERF_COUNTER_STATE_OFF)
|
||||
continue;
|
||||
counter->state = PERF_COUNTER_STATE_INACTIVE;
|
||||
counter->tstamp_enabled =
|
||||
ctx->time - counter->total_time_enabled;
|
||||
counter->hw_event.disabled = 0;
|
||||
}
|
||||
perf_enable();
|
||||
|
||||
spin_unlock(&ctx->lock);
|
||||
|
||||
perf_counter_task_sched_in(curr, cpu);
|
||||
|
||||
local_irq_restore(flags);
|
||||
mutex_lock(¤t->perf_counter_mutex);
|
||||
list_for_each_entry(counter, ¤t->perf_counter_list, owner_entry)
|
||||
perf_counter_disable(counter);
|
||||
mutex_unlock(¤t->perf_counter_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1416,6 +1363,11 @@ static int perf_release(struct inode *inode, struct file *file)
|
|||
perf_counter_remove_from_context(counter);
|
||||
mutex_unlock(&ctx->mutex);
|
||||
|
||||
mutex_lock(&counter->owner->perf_counter_mutex);
|
||||
list_del_init(&counter->owner_entry);
|
||||
mutex_unlock(&counter->owner->perf_counter_mutex);
|
||||
put_task_struct(counter->owner);
|
||||
|
||||
free_counter(counter);
|
||||
put_context(ctx);
|
||||
|
||||
|
@ -3272,6 +3224,12 @@ SYSCALL_DEFINE5(perf_counter_open,
|
|||
perf_install_in_context(ctx, counter, cpu);
|
||||
mutex_unlock(&ctx->mutex);
|
||||
|
||||
counter->owner = current;
|
||||
get_task_struct(current);
|
||||
mutex_lock(¤t->perf_counter_mutex);
|
||||
list_add_tail(&counter->owner_entry, ¤t->perf_counter_list);
|
||||
mutex_unlock(¤t->perf_counter_mutex);
|
||||
|
||||
fput_light(counter_file, fput_needed2);
|
||||
|
||||
out_fput:
|
||||
|
@ -3488,6 +3446,9 @@ void perf_counter_init_task(struct task_struct *child)
|
|||
|
||||
child->perf_counter_ctxp = NULL;
|
||||
|
||||
mutex_init(&child->perf_counter_mutex);
|
||||
INIT_LIST_HEAD(&child->perf_counter_list);
|
||||
|
||||
/*
|
||||
* This is executed from the parent task context, so inherit
|
||||
* counters that have been marked for cloning.
|
||||
|
|
Loading…
Reference in New Issue