perf: Add helper function to return number of counters
The number of counters for the registered pmu is needed in a few places so provide a helper function that returns this number. Signed-off-by: Matt Fleming <matt@console-pimps.org> Tested-by: Will Deacon <will.deacon@arm.com> Acked-by: Paul Mundt <lethal@linux-sh.org> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Robert Richter <robert.richter@amd.com>
This commit is contained in:
parent
4cbe75be5c
commit
3bf101ba42
|
@ -123,6 +123,12 @@ armpmu_get_max_events(void)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(armpmu_get_max_events);
|
EXPORT_SYMBOL_GPL(armpmu_get_max_events);
|
||||||
|
|
||||||
|
int perf_num_counters(void)
|
||||||
|
{
|
||||||
|
return armpmu_get_max_events();
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(perf_num_counters);
|
||||||
|
|
||||||
#define HW_OP_UNSUPPORTED 0xFFFF
|
#define HW_OP_UNSUPPORTED 0xFFFF
|
||||||
|
|
||||||
#define C(_x) \
|
#define C(_x) \
|
||||||
|
|
|
@ -43,7 +43,7 @@ static DEFINE_MUTEX(op_arm_mutex);
|
||||||
|
|
||||||
static struct op_counter_config *counter_config;
|
static struct op_counter_config *counter_config;
|
||||||
static struct perf_event **perf_events[nr_cpumask_bits];
|
static struct perf_event **perf_events[nr_cpumask_bits];
|
||||||
static int perf_num_counters;
|
static int num_counters;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Overflow callback for oprofile.
|
* Overflow callback for oprofile.
|
||||||
|
@ -54,11 +54,11 @@ static void op_overflow_handler(struct perf_event *event, int unused,
|
||||||
int id;
|
int id;
|
||||||
u32 cpu = smp_processor_id();
|
u32 cpu = smp_processor_id();
|
||||||
|
|
||||||
for (id = 0; id < perf_num_counters; ++id)
|
for (id = 0; id < num_counters; ++id)
|
||||||
if (perf_events[cpu][id] == event)
|
if (perf_events[cpu][id] == event)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (id != perf_num_counters)
|
if (id != num_counters)
|
||||||
oprofile_add_sample(regs, id);
|
oprofile_add_sample(regs, id);
|
||||||
else
|
else
|
||||||
pr_warning("oprofile: ignoring spurious overflow "
|
pr_warning("oprofile: ignoring spurious overflow "
|
||||||
|
@ -76,7 +76,7 @@ static void op_perf_setup(void)
|
||||||
u32 size = sizeof(struct perf_event_attr);
|
u32 size = sizeof(struct perf_event_attr);
|
||||||
struct perf_event_attr *attr;
|
struct perf_event_attr *attr;
|
||||||
|
|
||||||
for (i = 0; i < perf_num_counters; ++i) {
|
for (i = 0; i < num_counters; ++i) {
|
||||||
attr = &counter_config[i].attr;
|
attr = &counter_config[i].attr;
|
||||||
memset(attr, 0, size);
|
memset(attr, 0, size);
|
||||||
attr->type = PERF_TYPE_RAW;
|
attr->type = PERF_TYPE_RAW;
|
||||||
|
@ -131,7 +131,7 @@ static int op_perf_start(void)
|
||||||
int cpu, event, ret = 0;
|
int cpu, event, ret = 0;
|
||||||
|
|
||||||
for_each_online_cpu(cpu) {
|
for_each_online_cpu(cpu) {
|
||||||
for (event = 0; event < perf_num_counters; ++event) {
|
for (event = 0; event < num_counters; ++event) {
|
||||||
ret = op_create_counter(cpu, event);
|
ret = op_create_counter(cpu, event);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -150,7 +150,7 @@ static void op_perf_stop(void)
|
||||||
int cpu, event;
|
int cpu, event;
|
||||||
|
|
||||||
for_each_online_cpu(cpu)
|
for_each_online_cpu(cpu)
|
||||||
for (event = 0; event < perf_num_counters; ++event)
|
for (event = 0; event < num_counters; ++event)
|
||||||
op_destroy_counter(cpu, event);
|
op_destroy_counter(cpu, event);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -179,7 +179,7 @@ static int op_arm_create_files(struct super_block *sb, struct dentry *root)
|
||||||
{
|
{
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
for (i = 0; i < perf_num_counters; i++) {
|
for (i = 0; i < num_counters; i++) {
|
||||||
struct dentry *dir;
|
struct dentry *dir;
|
||||||
char buf[4];
|
char buf[4];
|
||||||
|
|
||||||
|
@ -353,14 +353,19 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
|
||||||
|
|
||||||
memset(&perf_events, 0, sizeof(perf_events));
|
memset(&perf_events, 0, sizeof(perf_events));
|
||||||
|
|
||||||
perf_num_counters = armpmu_get_max_events();
|
num_counters = perf_num_counters();
|
||||||
|
if (num_counters <= 0) {
|
||||||
|
pr_info("oprofile: no performance counters\n");
|
||||||
|
ret = -ENODEV;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
counter_config = kcalloc(perf_num_counters,
|
counter_config = kcalloc(num_counters,
|
||||||
sizeof(struct op_counter_config), GFP_KERNEL);
|
sizeof(struct op_counter_config), GFP_KERNEL);
|
||||||
|
|
||||||
if (!counter_config) {
|
if (!counter_config) {
|
||||||
pr_info("oprofile: failed to allocate %d "
|
pr_info("oprofile: failed to allocate %d "
|
||||||
"counters\n", perf_num_counters);
|
"counters\n", num_counters);
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
@ -370,11 +375,11 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
for_each_possible_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
perf_events[cpu] = kcalloc(perf_num_counters,
|
perf_events[cpu] = kcalloc(num_counters,
|
||||||
sizeof(struct perf_event *), GFP_KERNEL);
|
sizeof(struct perf_event *), GFP_KERNEL);
|
||||||
if (!perf_events[cpu]) {
|
if (!perf_events[cpu]) {
|
||||||
pr_info("oprofile: failed to allocate %d perf events "
|
pr_info("oprofile: failed to allocate %d perf events "
|
||||||
"for cpu %d\n", perf_num_counters, cpu);
|
"for cpu %d\n", num_counters, cpu);
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
@ -409,7 +414,7 @@ void __exit oprofile_arch_exit(void)
|
||||||
struct perf_event *event;
|
struct perf_event *event;
|
||||||
|
|
||||||
for_each_possible_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
for (id = 0; id < perf_num_counters; ++id) {
|
for (id = 0; id < num_counters; ++id) {
|
||||||
event = perf_events[cpu][id];
|
event = perf_events[cpu][id];
|
||||||
if (event)
|
if (event)
|
||||||
perf_event_release_kernel(event);
|
perf_event_release_kernel(event);
|
||||||
|
|
|
@ -59,6 +59,15 @@ static inline int sh_pmu_initialized(void)
|
||||||
return !!sh_pmu;
|
return !!sh_pmu;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int perf_num_counters(void)
|
||||||
|
{
|
||||||
|
if (!sh_pmu)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
return sh_pmu->num_events;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(perf_num_counters);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Release the PMU if this is the last perf_event.
|
* Release the PMU if this is the last perf_event.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -849,6 +849,7 @@ extern int perf_max_events;
|
||||||
|
|
||||||
extern const struct pmu *hw_perf_event_init(struct perf_event *event);
|
extern const struct pmu *hw_perf_event_init(struct perf_event *event);
|
||||||
|
|
||||||
|
extern int perf_num_counters(void);
|
||||||
extern void perf_event_task_sched_in(struct task_struct *task);
|
extern void perf_event_task_sched_in(struct task_struct *task);
|
||||||
extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
|
extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
|
||||||
extern void perf_event_task_tick(struct task_struct *task);
|
extern void perf_event_task_tick(struct task_struct *task);
|
||||||
|
|
Loading…
Reference in New Issue