perf intel-pt: Insert callchain context into synthesized callchains
In the absence of a fallback, callchains must encode also the callchain context. Do that now there is no fallback. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Reviewed-by: Jiri Olsa <jolsa@kernel.org> Cc: Andi Kleen <ak@linux.intel.com> Cc: David S. Miller <davem@davemloft.net> Cc: Leo Yan <leo.yan@linaro.org> Cc: Mathieu Poirier <mathieu.poirier@linaro.org> Cc: stable@vger.kernel.org # 4.19 Link: http://lkml.kernel.org/r/100ea2ec-ed14-b56d-d810-e0a6d2f4b069@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
parent
4f8f382e63
commit
242483068b
|
@ -759,7 +759,8 @@ static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
|
|||
if (pt->synth_opts.callchain) {
|
||||
size_t sz = sizeof(struct ip_callchain);
|
||||
|
||||
sz += pt->synth_opts.callchain_sz * sizeof(u64);
|
||||
/* Add 1 to callchain_sz for callchain context */
|
||||
sz += (pt->synth_opts.callchain_sz + 1) * sizeof(u64);
|
||||
ptq->chain = zalloc(sz);
|
||||
if (!ptq->chain)
|
||||
goto out_free;
|
||||
|
@ -1160,7 +1161,8 @@ static void intel_pt_prep_sample(struct intel_pt *pt,
|
|||
|
||||
if (pt->synth_opts.callchain) {
|
||||
thread_stack__sample(ptq->thread, ptq->chain,
|
||||
pt->synth_opts.callchain_sz, sample->ip);
|
||||
pt->synth_opts.callchain_sz + 1,
|
||||
sample->ip, pt->kernel_start);
|
||||
sample->callchain = ptq->chain;
|
||||
}
|
||||
|
||||
|
|
|
@ -310,20 +310,46 @@ void thread_stack__free(struct thread *thread)
|
|||
}
|
||||
}
|
||||
|
||||
void thread_stack__sample(struct thread *thread, struct ip_callchain *chain,
|
||||
size_t sz, u64 ip)
|
||||
static inline u64 callchain_context(u64 ip, u64 kernel_start)
|
||||
{
|
||||
size_t i;
|
||||
return ip < kernel_start ? PERF_CONTEXT_USER : PERF_CONTEXT_KERNEL;
|
||||
}
|
||||
|
||||
if (!thread || !thread->ts)
|
||||
chain->nr = 1;
|
||||
else
|
||||
chain->nr = min(sz, thread->ts->cnt + 1);
|
||||
void thread_stack__sample(struct thread *thread, struct ip_callchain *chain,
|
||||
size_t sz, u64 ip, u64 kernel_start)
|
||||
{
|
||||
u64 context = callchain_context(ip, kernel_start);
|
||||
u64 last_context;
|
||||
size_t i, j;
|
||||
|
||||
chain->ips[0] = ip;
|
||||
if (sz < 2) {
|
||||
chain->nr = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 1; i < chain->nr; i++)
|
||||
chain->ips[i] = thread->ts->stack[thread->ts->cnt - i].ret_addr;
|
||||
chain->ips[0] = context;
|
||||
chain->ips[1] = ip;
|
||||
|
||||
if (!thread || !thread->ts) {
|
||||
chain->nr = 2;
|
||||
return;
|
||||
}
|
||||
|
||||
last_context = context;
|
||||
|
||||
for (i = 2, j = 1; i < sz && j <= thread->ts->cnt; i++, j++) {
|
||||
ip = thread->ts->stack[thread->ts->cnt - j].ret_addr;
|
||||
context = callchain_context(ip, kernel_start);
|
||||
if (context != last_context) {
|
||||
if (i >= sz - 1)
|
||||
break;
|
||||
chain->ips[i++] = context;
|
||||
last_context = context;
|
||||
}
|
||||
chain->ips[i] = ip;
|
||||
}
|
||||
|
||||
chain->nr = i;
|
||||
}
|
||||
|
||||
struct call_return_processor *
|
||||
|
|
|
@ -84,7 +84,7 @@ int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip,
|
|||
u64 to_ip, u16 insn_len, u64 trace_nr);
|
||||
void thread_stack__set_trace_nr(struct thread *thread, u64 trace_nr);
|
||||
void thread_stack__sample(struct thread *thread, struct ip_callchain *chain,
|
||||
size_t sz, u64 ip);
|
||||
size_t sz, u64 ip, u64 kernel_start);
|
||||
int thread_stack__flush(struct thread *thread);
|
||||
void thread_stack__free(struct thread *thread);
|
||||
size_t thread_stack__depth(struct thread *thread);
|
||||
|
|
Loading…
Reference in New Issue