tracing/perf: Expand TRACE_EVENT(sched_stat_runtime)
To simplify the review of the next patches:
1. We are going to reimplent __perf_task/counter and embedd them
into TP_ARGS(). expand TRACE_EVENT(sched_stat_runtime) into
DECLARE_EVENT_CLASS() + DEFINE_EVENT(), this way they can use
different TP_ARGS's.
2. Change perf_trace_##call() macro to do perf_fetch_caller_regs()
right before perf_trace_buf_prepare().
This way it evaluates TP_ARGS() asap, the next patch explores
this fact.
Note: after 87f44bbc
perf_trace_buf_prepare() doesn't need
"struct pt_regs *regs", perhaps it makes sense to remove this
argument. And perhaps we can teach perf_trace_buf_submit()
to accept regs == NULL and do fetch_caller_regs(CALLER_ADDR1)
in this case.
3. Cosmetic, but the typecast from "void*" buys nothing. It just
adds the noise, remove it.
Link: http://lkml.kernel.org/r/20130806160841.GA2736@redhat.com
Acked-by: Peter Zijlstra <peterz@infradead.org>
Tested-by: David Ahern <dsahern@gmail.com>
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
This commit is contained in:
parent
d4e4ab86bc
commit
36009d07b7
|
@ -372,7 +372,7 @@ DEFINE_EVENT(sched_stat_template, sched_stat_blocked,
|
|||
* Tracepoint for accounting runtime (time the task is executing
|
||||
* on a CPU).
|
||||
*/
|
||||
TRACE_EVENT(sched_stat_runtime,
|
||||
DECLARE_EVENT_CLASS(sched_stat_runtime,
|
||||
|
||||
TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
|
||||
|
||||
|
@ -401,6 +401,10 @@ TRACE_EVENT(sched_stat_runtime,
|
|||
(unsigned long long)__entry->vruntime)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime,
|
||||
TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
|
||||
TP_ARGS(tsk, runtime, vruntime));
|
||||
|
||||
/*
|
||||
* Tracepoint for showing priority inheritance modifying a tasks
|
||||
* priority.
|
||||
|
|
|
@ -663,15 +663,14 @@ perf_trace_##call(void *__data, proto) \
|
|||
int __data_size; \
|
||||
int rctx; \
|
||||
\
|
||||
perf_fetch_caller_regs(&__regs); \
|
||||
\
|
||||
__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
|
||||
__entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
|
||||
sizeof(u64)); \
|
||||
__entry_size -= sizeof(u32); \
|
||||
\
|
||||
entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \
|
||||
__entry_size, event_call->event.type, &__regs, &rctx); \
|
||||
perf_fetch_caller_regs(&__regs); \
|
||||
entry = perf_trace_buf_prepare(__entry_size, \
|
||||
event_call->event.type, &__regs, &rctx); \
|
||||
if (!entry) \
|
||||
return; \
|
||||
\
|
||||
|
|
Loading…
Reference in New Issue