arm_pmu: Change API to support 64bit counter values
Convert the {read/write}_counter APIs to handle 64bit values to enable supporting chained event counters. The backends still use 32bit values and we pass them 32bit values only. So in effect there are no functional changes. Cc: Will Deacon <will.deacon@arm.com> Acked-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Julien Thierry <julien.thierry@arm.com> Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
This commit is contained in:
parent
8d3e994241
commit
3a95200d3f
|
@ -233,7 +233,7 @@ armv6_pmcr_counter_has_overflowed(unsigned long pmcr,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline u32 armv6pmu_read_counter(struct perf_event *event)
|
||||
static inline u64 armv6pmu_read_counter(struct perf_event *event)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int counter = hwc->idx;
|
||||
|
@ -251,7 +251,7 @@ static inline u32 armv6pmu_read_counter(struct perf_event *event)
|
|||
return value;
|
||||
}
|
||||
|
||||
static inline void armv6pmu_write_counter(struct perf_event *event, u32 value)
|
||||
static inline void armv6pmu_write_counter(struct perf_event *event, u64 value)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int counter = hwc->idx;
|
||||
|
|
|
@ -743,7 +743,7 @@ static inline void armv7_pmnc_select_counter(int idx)
|
|||
isb();
|
||||
}
|
||||
|
||||
static inline u32 armv7pmu_read_counter(struct perf_event *event)
|
||||
static inline u64 armv7pmu_read_counter(struct perf_event *event)
|
||||
{
|
||||
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
|
@ -763,7 +763,7 @@ static inline u32 armv7pmu_read_counter(struct perf_event *event)
|
|||
return value;
|
||||
}
|
||||
|
||||
static inline void armv7pmu_write_counter(struct perf_event *event, u32 value)
|
||||
static inline void armv7pmu_write_counter(struct perf_event *event, u64 value)
|
||||
{
|
||||
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
|
|
|
@ -316,7 +316,7 @@ static void xscale1pmu_stop(struct arm_pmu *cpu_pmu)
|
|||
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
||||
}
|
||||
|
||||
static inline u32 xscale1pmu_read_counter(struct perf_event *event)
|
||||
static inline u64 xscale1pmu_read_counter(struct perf_event *event)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int counter = hwc->idx;
|
||||
|
@ -337,7 +337,7 @@ static inline u32 xscale1pmu_read_counter(struct perf_event *event)
|
|||
return val;
|
||||
}
|
||||
|
||||
static inline void xscale1pmu_write_counter(struct perf_event *event, u32 val)
|
||||
static inline void xscale1pmu_write_counter(struct perf_event *event, u64 val)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int counter = hwc->idx;
|
||||
|
@ -678,7 +678,7 @@ static void xscale2pmu_stop(struct arm_pmu *cpu_pmu)
|
|||
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
||||
}
|
||||
|
||||
static inline u32 xscale2pmu_read_counter(struct perf_event *event)
|
||||
static inline u64 xscale2pmu_read_counter(struct perf_event *event)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int counter = hwc->idx;
|
||||
|
@ -705,7 +705,7 @@ static inline u32 xscale2pmu_read_counter(struct perf_event *event)
|
|||
return val;
|
||||
}
|
||||
|
||||
static inline void xscale2pmu_write_counter(struct perf_event *event, u32 val)
|
||||
static inline void xscale2pmu_write_counter(struct perf_event *event, u64 val)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int counter = hwc->idx;
|
||||
|
|
|
@ -512,7 +512,7 @@ static inline int armv8pmu_select_counter(int idx)
|
|||
return idx;
|
||||
}
|
||||
|
||||
static inline u32 armv8pmu_read_counter(struct perf_event *event)
|
||||
static inline u64 armv8pmu_read_counter(struct perf_event *event)
|
||||
{
|
||||
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
|
@ -530,7 +530,7 @@ static inline u32 armv8pmu_read_counter(struct perf_event *event)
|
|||
return value;
|
||||
}
|
||||
|
||||
static inline void armv8pmu_write_counter(struct perf_event *event, u32 value)
|
||||
static inline void armv8pmu_write_counter(struct perf_event *event, u64 value)
|
||||
{
|
||||
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
|
@ -545,9 +545,8 @@ static inline void armv8pmu_write_counter(struct perf_event *event, u32 value)
|
|||
* count using the lower 32bits and we want an interrupt when
|
||||
* it overflows.
|
||||
*/
|
||||
u64 value64 = 0xffffffff00000000ULL | value;
|
||||
|
||||
write_sysreg(value64, pmccntr_el0);
|
||||
value |= 0xffffffff00000000ULL;
|
||||
write_sysreg(value, pmccntr_el0);
|
||||
} else if (armv8pmu_select_counter(idx) == idx)
|
||||
write_sysreg(value, pmxevcntr_el0);
|
||||
}
|
||||
|
|
|
@ -87,8 +87,8 @@ struct arm_pmu {
|
|||
struct perf_event *event);
|
||||
int (*set_event_filter)(struct hw_perf_event *evt,
|
||||
struct perf_event_attr *attr);
|
||||
u32 (*read_counter)(struct perf_event *event);
|
||||
void (*write_counter)(struct perf_event *event, u32 val);
|
||||
u64 (*read_counter)(struct perf_event *event);
|
||||
void (*write_counter)(struct perf_event *event, u64 val);
|
||||
void (*start)(struct arm_pmu *);
|
||||
void (*stop)(struct arm_pmu *);
|
||||
void (*reset)(void *);
|
||||
|
|
Loading…
Reference in New Issue