2019-08-29 14:57:00 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/* Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd. */
|
|
|
|
|
|
|
|
#include <linux/perf_event.h>
|
|
|
|
#include <linux/uaccess.h>
|
|
|
|
|
2020-11-13 14:42:21 +08:00
|
|
|
#include <asm/stacktrace.h>
|
2019-08-29 14:57:00 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Get the return address for a single stackframe and return a pointer to the
|
|
|
|
* next frame tail.
|
|
|
|
*/
|
|
|
|
static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry,
|
|
|
|
unsigned long fp, unsigned long reg_ra)
|
|
|
|
{
|
|
|
|
struct stackframe buftail;
|
|
|
|
unsigned long ra = 0;
|
2022-02-15 03:22:10 +08:00
|
|
|
unsigned long __user *user_frame_tail =
|
|
|
|
(unsigned long __user *)(fp - sizeof(struct stackframe));
|
2019-08-29 14:57:00 +08:00
|
|
|
|
|
|
|
/* Check accessibility of one struct frame_tail beyond */
|
|
|
|
if (!access_ok(user_frame_tail, sizeof(buftail)))
|
|
|
|
return 0;
|
|
|
|
if (__copy_from_user_inatomic(&buftail, user_frame_tail,
|
|
|
|
sizeof(buftail)))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (reg_ra != 0)
|
|
|
|
ra = reg_ra;
|
|
|
|
else
|
|
|
|
ra = buftail.ra;
|
|
|
|
|
|
|
|
fp = buftail.fp;
|
|
|
|
if (ra != 0)
|
|
|
|
perf_callchain_store(entry, ra);
|
|
|
|
else
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return fp;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This will be called when the target is in user mode
|
|
|
|
* This function will only be called when we use
|
|
|
|
* "PERF_SAMPLE_CALLCHAIN" in
|
|
|
|
* kernel/events/core.c:perf_prepare_sample()
|
|
|
|
*
|
|
|
|
* How to trigger perf_callchain_[user/kernel] :
|
|
|
|
* $ perf record -e cpu-clock --call-graph fp ./program
|
|
|
|
* $ perf report --call-graph
|
|
|
|
*
|
|
|
|
* On RISC-V platform, the program being sampled and the C library
|
|
|
|
* need to be compiled with -fno-omit-frame-pointer, otherwise
|
|
|
|
* the user stack will not contain function frame.
|
|
|
|
*/
|
|
|
|
void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
|
|
|
|
struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
unsigned long fp = 0;
|
|
|
|
|
|
|
|
fp = regs->s0;
|
2019-10-28 20:10:32 +08:00
|
|
|
perf_callchain_store(entry, regs->epc);
|
2019-08-29 14:57:00 +08:00
|
|
|
|
|
|
|
fp = user_backtrace(entry, fp, regs->ra);
|
|
|
|
while (fp && !(fp & 0x3) && entry->nr < entry->max_stack)
|
|
|
|
fp = user_backtrace(entry, fp, 0);
|
|
|
|
}
|
|
|
|
|
2020-11-13 14:42:22 +08:00
|
|
|
static bool fill_callchain(void *entry, unsigned long pc)
|
2019-08-29 14:57:00 +08:00
|
|
|
{
|
2022-03-11 14:58:15 +08:00
|
|
|
return perf_callchain_store(entry, pc) == 0;
|
2019-08-29 14:57:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
|
|
|
|
struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
walk_stackframe(NULL, regs, fill_callchain, entry);
|
|
|
|
}
|