perf_counter: theres more to overflow than writing events
Prepare for more generic overflow handling. The new perf_counter_overflow() method will handle the generic bits of the counter overflow, and can return a !0 return value, in which case the counter should be (soft) disabled, so that it won't count until it's properly disabled. XXX: do powerpc and swcounter Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> LKML-Reference: <20090406094517.812109629@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
b6276f353b
commit
f6c7d5fe58
|
@ -732,7 +732,7 @@ static void record_and_restart(struct perf_counter *counter, long val,
|
|||
* Finally record data if requested.
|
||||
*/
|
||||
if (record)
|
||||
perf_counter_output(counter, 1, regs);
|
||||
perf_counter_overflow(counter, 1, regs);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -800,7 +800,8 @@ again:
|
|||
continue;
|
||||
|
||||
perf_save_and_restart(counter);
|
||||
perf_counter_output(counter, nmi, regs);
|
||||
if (perf_counter_overflow(counter, nmi, regs))
|
||||
__pmc_generic_disable(counter, &counter->hw, bit);
|
||||
}
|
||||
|
||||
hw_perf_ack_status(ack);
|
||||
|
|
|
@ -491,8 +491,8 @@ extern int hw_perf_group_sched_in(struct perf_counter *group_leader,
|
|||
struct perf_counter_context *ctx, int cpu);
|
||||
extern void perf_counter_update_userpage(struct perf_counter *counter);
|
||||
|
||||
extern void perf_counter_output(struct perf_counter *counter,
|
||||
int nmi, struct pt_regs *regs);
|
||||
extern int perf_counter_overflow(struct perf_counter *counter,
|
||||
int nmi, struct pt_regs *regs);
|
||||
/*
|
||||
* Return 1 for a software counter, 0 for a hardware counter
|
||||
*/
|
||||
|
|
|
@ -1800,8 +1800,8 @@ static void perf_output_end(struct perf_output_handle *handle)
|
|||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
void perf_counter_output(struct perf_counter *counter,
|
||||
int nmi, struct pt_regs *regs)
|
||||
static void perf_counter_output(struct perf_counter *counter,
|
||||
int nmi, struct pt_regs *regs)
|
||||
{
|
||||
int ret;
|
||||
u64 record_type = counter->hw_event.record_type;
|
||||
|
@ -2033,6 +2033,17 @@ void perf_counter_munmap(unsigned long addr, unsigned long len,
|
|||
perf_counter_mmap_event(&mmap_event);
|
||||
}
|
||||
|
||||
/*
|
||||
* Generic counter overflow handling.
|
||||
*/
|
||||
|
||||
int perf_counter_overflow(struct perf_counter *counter,
|
||||
int nmi, struct pt_regs *regs)
|
||||
{
|
||||
perf_counter_output(counter, nmi, regs);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Generic software counter infrastructure
|
||||
*/
|
||||
|
@ -2077,6 +2088,7 @@ static void perf_swcounter_set_period(struct perf_counter *counter)
|
|||
|
||||
static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
|
||||
{
|
||||
enum hrtimer_restart ret = HRTIMER_RESTART;
|
||||
struct perf_counter *counter;
|
||||
struct pt_regs *regs;
|
||||
|
||||
|
@ -2092,12 +2104,14 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
|
|||
!counter->hw_event.exclude_user)
|
||||
regs = task_pt_regs(current);
|
||||
|
||||
if (regs)
|
||||
perf_counter_output(counter, 0, regs);
|
||||
if (regs) {
|
||||
if (perf_counter_overflow(counter, 0, regs))
|
||||
ret = HRTIMER_NORESTART;
|
||||
}
|
||||
|
||||
hrtimer_forward_now(hrtimer, ns_to_ktime(counter->hw.irq_period));
|
||||
|
||||
return HRTIMER_RESTART;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void perf_swcounter_overflow(struct perf_counter *counter,
|
||||
|
@ -2105,7 +2119,10 @@ static void perf_swcounter_overflow(struct perf_counter *counter,
|
|||
{
|
||||
perf_swcounter_update(counter);
|
||||
perf_swcounter_set_period(counter);
|
||||
perf_counter_output(counter, nmi, regs);
|
||||
if (perf_counter_overflow(counter, nmi, regs))
|
||||
/* soft-disable the counter */
|
||||
;
|
||||
|
||||
}
|
||||
|
||||
static int perf_swcounter_match(struct perf_counter *counter,
|
||||
|
|
Loading…
Reference in New Issue