2020-10-11 11:03:51 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2017-12-18 17:52:48 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2013 Linaro Limited
|
|
|
|
* Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
|
|
|
|
* Copyright (C) 2017 Andes Technology Corporation
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/ftrace.h>
|
2018-02-13 13:13:17 +08:00
|
|
|
#include <linux/uaccess.h>
|
2020-04-21 15:30:01 +08:00
|
|
|
#include <linux/memory.h>
|
2018-02-13 13:13:17 +08:00
|
|
|
#include <asm/cacheflush.h>
|
2020-03-10 00:55:44 +08:00
|
|
|
#include <asm/patch.h>
|
2017-12-18 17:52:48 +08:00
|
|
|
|
2018-02-13 13:13:17 +08:00
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
2020-04-21 15:30:01 +08:00
|
|
|
int ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex)
|
|
|
|
{
|
|
|
|
mutex_lock(&text_mutex);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int ftrace_arch_code_modify_post_process(void) __releases(&text_mutex)
|
|
|
|
{
|
|
|
|
mutex_unlock(&text_mutex);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-02-13 13:13:17 +08:00
|
|
|
static int ftrace_check_current_call(unsigned long hook_pos,
|
|
|
|
unsigned int *expected)
|
|
|
|
{
|
|
|
|
unsigned int replaced[2];
|
|
|
|
unsigned int nops[2] = {NOP4, NOP4};
|
|
|
|
|
|
|
|
/* we expect nops at the hook position */
|
|
|
|
if (!expected)
|
|
|
|
expected = nops;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Read the text we want to modify;
|
|
|
|
* return must be -EFAULT on read error
|
|
|
|
*/
|
2020-06-17 15:37:53 +08:00
|
|
|
if (copy_from_kernel_nofault(replaced, (void *)hook_pos,
|
|
|
|
MCOUNT_INSN_SIZE))
|
2018-02-13 13:13:17 +08:00
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make sure it is what we expect it to be;
|
|
|
|
* return must be -EINVAL on failed comparison
|
|
|
|
*/
|
|
|
|
if (memcmp(expected, replaced, sizeof(replaced))) {
|
2019-01-18 22:03:04 +08:00
|
|
|
pr_err("%p: expected (%08x %08x) but got (%08x %08x)\n",
|
2018-02-13 13:13:17 +08:00
|
|
|
(void *)hook_pos, expected[0], expected[1], replaced[0],
|
|
|
|
replaced[1]);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target,
|
|
|
|
bool enable)
|
|
|
|
{
|
|
|
|
unsigned int call[2];
|
|
|
|
unsigned int nops[2] = {NOP4, NOP4};
|
|
|
|
|
|
|
|
make_call(hook_pos, target, call);
|
|
|
|
|
2020-03-10 00:55:44 +08:00
|
|
|
/* Replace the auipc-jalr pair at once. Return -EPERM on write error. */
|
2020-04-21 15:29:59 +08:00
|
|
|
if (patch_text_nosync
|
2020-03-10 00:55:44 +08:00
|
|
|
((void *)hook_pos, enable ? call : nops, MCOUNT_INSN_SIZE))
|
2018-02-13 13:13:17 +08:00
|
|
|
return -EPERM;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
riscv: Using PATCHABLE_FUNCTION_ENTRY instead of MCOUNT
This patch changes the current detour mechanism of dynamic ftrace
which has been discussed during LPC 2020 RISCV-MC [1].
Before the patch, we used mcount for detour:
<funca>:
addi sp,sp,-16
sd ra,8(sp)
sd s0,0(sp)
addi s0,sp,16
mv a5,ra
mv a0,a5
auipc ra,0x0 -> nop
jalr -296(ra) <_mcount@plt> ->nop
...
After the patch, we use nop call site area for detour:
<funca>:
nop -> REG_S ra, -SZREG(sp)
nop -> auipc ra, 0x?
nop -> jalr ?(ra)
nop -> REG_L ra, -SZREG(sp)
...
The mcount mechanism is mixed with gcc function prologue which is
not very clear. The patchable function entry just put 16 bytes nop
before the front of the function prologue which could be filled
with a separated detour mechanism.
[1] https://www.linuxplumbersconf.org/event/7/contributions/807/
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
2020-12-18 00:01:41 +08:00
|
|
|
/*
|
|
|
|
* Put 5 instructions with 16 bytes at the front of function within
|
|
|
|
* patchable function entry nops' area.
|
|
|
|
*
|
|
|
|
* 0: REG_S ra, -SZREG(sp)
|
|
|
|
* 1: auipc ra, 0x?
|
|
|
|
* 2: jalr -?(ra)
|
|
|
|
* 3: REG_L ra, -SZREG(sp)
|
|
|
|
*
|
|
|
|
* So the opcodes is:
|
|
|
|
* 0: 0xfe113c23 (sd)/0xfe112e23 (sw)
|
|
|
|
* 1: 0x???????? -> auipc
|
|
|
|
* 2: 0x???????? -> jalr
|
|
|
|
* 3: 0xff813083 (ld)/0xffc12083 (lw)
|
|
|
|
*/
|
|
|
|
#if __riscv_xlen == 64
|
|
|
|
#define INSN0 0xfe113c23
|
|
|
|
#define INSN3 0xff813083
|
|
|
|
#elif __riscv_xlen == 32
|
|
|
|
#define INSN0 0xfe112e23
|
|
|
|
#define INSN3 0xffc12083
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define FUNC_ENTRY_SIZE 16
|
|
|
|
#define FUNC_ENTRY_JMP 4
|
|
|
|
|
2018-02-13 13:13:17 +08:00
|
|
|
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
|
|
|
|
{
|
riscv: Using PATCHABLE_FUNCTION_ENTRY instead of MCOUNT
This patch changes the current detour mechanism of dynamic ftrace
which has been discussed during LPC 2020 RISCV-MC [1].
Before the patch, we used mcount for detour:
<funca>:
addi sp,sp,-16
sd ra,8(sp)
sd s0,0(sp)
addi s0,sp,16
mv a5,ra
mv a0,a5
auipc ra,0x0 -> nop
jalr -296(ra) <_mcount@plt> ->nop
...
After the patch, we use nop call site area for detour:
<funca>:
nop -> REG_S ra, -SZREG(sp)
nop -> auipc ra, 0x?
nop -> jalr ?(ra)
nop -> REG_L ra, -SZREG(sp)
...
The mcount mechanism is mixed with gcc function prologue which is
not very clear. The patchable function entry just put 16 bytes nop
before the front of the function prologue which could be filled
with a separated detour mechanism.
[1] https://www.linuxplumbersconf.org/event/7/contributions/807/
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
2020-12-18 00:01:41 +08:00
|
|
|
unsigned int call[4] = {INSN0, 0, 0, INSN3};
|
|
|
|
unsigned long target = addr;
|
|
|
|
unsigned long caller = rec->ip + FUNC_ENTRY_JMP;
|
2018-02-13 13:13:17 +08:00
|
|
|
|
riscv: Using PATCHABLE_FUNCTION_ENTRY instead of MCOUNT
This patch changes the current detour mechanism of dynamic ftrace
which has been discussed during LPC 2020 RISCV-MC [1].
Before the patch, we used mcount for detour:
<funca>:
addi sp,sp,-16
sd ra,8(sp)
sd s0,0(sp)
addi s0,sp,16
mv a5,ra
mv a0,a5
auipc ra,0x0 -> nop
jalr -296(ra) <_mcount@plt> ->nop
...
After the patch, we use nop call site area for detour:
<funca>:
nop -> REG_S ra, -SZREG(sp)
nop -> auipc ra, 0x?
nop -> jalr ?(ra)
nop -> REG_L ra, -SZREG(sp)
...
The mcount mechanism is mixed with gcc function prologue which is
not very clear. The patchable function entry just put 16 bytes nop
before the front of the function prologue which could be filled
with a separated detour mechanism.
[1] https://www.linuxplumbersconf.org/event/7/contributions/807/
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
2020-12-18 00:01:41 +08:00
|
|
|
call[1] = to_auipc_insn((unsigned int)(target - caller));
|
|
|
|
call[2] = to_jalr_insn((unsigned int)(target - caller));
|
2018-02-13 13:13:17 +08:00
|
|
|
|
riscv: Using PATCHABLE_FUNCTION_ENTRY instead of MCOUNT
This patch changes the current detour mechanism of dynamic ftrace
which has been discussed during LPC 2020 RISCV-MC [1].
Before the patch, we used mcount for detour:
<funca>:
addi sp,sp,-16
sd ra,8(sp)
sd s0,0(sp)
addi s0,sp,16
mv a5,ra
mv a0,a5
auipc ra,0x0 -> nop
jalr -296(ra) <_mcount@plt> ->nop
...
After the patch, we use nop call site area for detour:
<funca>:
nop -> REG_S ra, -SZREG(sp)
nop -> auipc ra, 0x?
nop -> jalr ?(ra)
nop -> REG_L ra, -SZREG(sp)
...
The mcount mechanism is mixed with gcc function prologue which is
not very clear. The patchable function entry just put 16 bytes nop
before the front of the function prologue which could be filled
with a separated detour mechanism.
[1] https://www.linuxplumbersconf.org/event/7/contributions/807/
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
2020-12-18 00:01:41 +08:00
|
|
|
if (patch_text_nosync((void *)rec->ip, call, FUNC_ENTRY_SIZE))
|
|
|
|
return -EPERM;
|
|
|
|
|
|
|
|
return 0;
|
2018-02-13 13:13:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
|
|
|
|
unsigned long addr)
|
|
|
|
{
|
riscv: Using PATCHABLE_FUNCTION_ENTRY instead of MCOUNT
This patch changes the current detour mechanism of dynamic ftrace
which has been discussed during LPC 2020 RISCV-MC [1].
Before the patch, we used mcount for detour:
<funca>:
addi sp,sp,-16
sd ra,8(sp)
sd s0,0(sp)
addi s0,sp,16
mv a5,ra
mv a0,a5
auipc ra,0x0 -> nop
jalr -296(ra) <_mcount@plt> ->nop
...
After the patch, we use nop call site area for detour:
<funca>:
nop -> REG_S ra, -SZREG(sp)
nop -> auipc ra, 0x?
nop -> jalr ?(ra)
nop -> REG_L ra, -SZREG(sp)
...
The mcount mechanism is mixed with gcc function prologue which is
not very clear. The patchable function entry just put 16 bytes nop
before the front of the function prologue which could be filled
with a separated detour mechanism.
[1] https://www.linuxplumbersconf.org/event/7/contributions/807/
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
2020-12-18 00:01:41 +08:00
|
|
|
unsigned int nops[4] = {NOP4, NOP4, NOP4, NOP4};
|
2018-02-13 13:13:17 +08:00
|
|
|
|
riscv: Using PATCHABLE_FUNCTION_ENTRY instead of MCOUNT
This patch changes the current detour mechanism of dynamic ftrace
which has been discussed during LPC 2020 RISCV-MC [1].
Before the patch, we used mcount for detour:
<funca>:
addi sp,sp,-16
sd ra,8(sp)
sd s0,0(sp)
addi s0,sp,16
mv a5,ra
mv a0,a5
auipc ra,0x0 -> nop
jalr -296(ra) <_mcount@plt> ->nop
...
After the patch, we use nop call site area for detour:
<funca>:
nop -> REG_S ra, -SZREG(sp)
nop -> auipc ra, 0x?
nop -> jalr ?(ra)
nop -> REG_L ra, -SZREG(sp)
...
The mcount mechanism is mixed with gcc function prologue which is
not very clear. The patchable function entry just put 16 bytes nop
before the front of the function prologue which could be filled
with a separated detour mechanism.
[1] https://www.linuxplumbersconf.org/event/7/contributions/807/
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
2020-12-18 00:01:41 +08:00
|
|
|
if (patch_text_nosync((void *)rec->ip, nops, FUNC_ENTRY_SIZE))
|
|
|
|
return -EPERM;
|
2018-02-13 13:13:17 +08:00
|
|
|
|
riscv: Using PATCHABLE_FUNCTION_ENTRY instead of MCOUNT
This patch changes the current detour mechanism of dynamic ftrace
which has been discussed during LPC 2020 RISCV-MC [1].
Before the patch, we used mcount for detour:
<funca>:
addi sp,sp,-16
sd ra,8(sp)
sd s0,0(sp)
addi s0,sp,16
mv a5,ra
mv a0,a5
auipc ra,0x0 -> nop
jalr -296(ra) <_mcount@plt> ->nop
...
After the patch, we use nop call site area for detour:
<funca>:
nop -> REG_S ra, -SZREG(sp)
nop -> auipc ra, 0x?
nop -> jalr ?(ra)
nop -> REG_L ra, -SZREG(sp)
...
The mcount mechanism is mixed with gcc function prologue which is
not very clear. The patchable function entry just put 16 bytes nop
before the front of the function prologue which could be filled
with a separated detour mechanism.
[1] https://www.linuxplumbersconf.org/event/7/contributions/807/
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
2020-12-18 00:01:41 +08:00
|
|
|
return 0;
|
2018-02-13 13:13:17 +08:00
|
|
|
}
|
|
|
|
|
2020-08-25 08:21:22 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This is called early on, and isn't wrapped by
|
|
|
|
* ftrace_arch_code_modify_{prepare,post_process}() and therefor doesn't hold
|
|
|
|
* text_mutex, which triggers a lockdep failure. SMP isn't running so we could
|
|
|
|
* just directly poke the text, but it's simpler to just take the lock
|
|
|
|
* ourselves.
|
|
|
|
*/
|
|
|
|
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
|
|
|
|
{
|
|
|
|
int out;
|
|
|
|
|
|
|
|
ftrace_arch_code_modify_prepare();
|
|
|
|
out = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
|
|
|
|
ftrace_arch_code_modify_post_process();
|
|
|
|
|
|
|
|
return out;
|
|
|
|
}
|
|
|
|
|
2018-02-13 13:13:17 +08:00
|
|
|
int ftrace_update_ftrace_func(ftrace_func_t func)
|
|
|
|
{
|
|
|
|
int ret = __ftrace_modify_call((unsigned long)&ftrace_call,
|
|
|
|
(unsigned long)func, true);
|
|
|
|
if (!ret) {
|
|
|
|
ret = __ftrace_modify_call((unsigned long)&ftrace_regs_call,
|
|
|
|
(unsigned long)func, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int __init ftrace_dyn_arch_init(void)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2018-02-13 13:13:20 +08:00
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
|
|
|
|
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
|
|
|
|
unsigned long addr)
|
|
|
|
{
|
|
|
|
unsigned int call[2];
|
riscv: Using PATCHABLE_FUNCTION_ENTRY instead of MCOUNT
This patch changes the current detour mechanism of dynamic ftrace
which has been discussed during LPC 2020 RISCV-MC [1].
Before the patch, we used mcount for detour:
<funca>:
addi sp,sp,-16
sd ra,8(sp)
sd s0,0(sp)
addi s0,sp,16
mv a5,ra
mv a0,a5
auipc ra,0x0 -> nop
jalr -296(ra) <_mcount@plt> ->nop
...
After the patch, we use nop call site area for detour:
<funca>:
nop -> REG_S ra, -SZREG(sp)
nop -> auipc ra, 0x?
nop -> jalr ?(ra)
nop -> REG_L ra, -SZREG(sp)
...
The mcount mechanism is mixed with gcc function prologue which is
not very clear. The patchable function entry just put 16 bytes nop
before the front of the function prologue which could be filled
with a separated detour mechanism.
[1] https://www.linuxplumbersconf.org/event/7/contributions/807/
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
2020-12-18 00:01:41 +08:00
|
|
|
unsigned long caller = rec->ip + FUNC_ENTRY_JMP;
|
2018-02-13 13:13:20 +08:00
|
|
|
int ret;
|
|
|
|
|
riscv: Using PATCHABLE_FUNCTION_ENTRY instead of MCOUNT
This patch changes the current detour mechanism of dynamic ftrace
which has been discussed during LPC 2020 RISCV-MC [1].
Before the patch, we used mcount for detour:
<funca>:
addi sp,sp,-16
sd ra,8(sp)
sd s0,0(sp)
addi s0,sp,16
mv a5,ra
mv a0,a5
auipc ra,0x0 -> nop
jalr -296(ra) <_mcount@plt> ->nop
...
After the patch, we use nop call site area for detour:
<funca>:
nop -> REG_S ra, -SZREG(sp)
nop -> auipc ra, 0x?
nop -> jalr ?(ra)
nop -> REG_L ra, -SZREG(sp)
...
The mcount mechanism is mixed with gcc function prologue which is
not very clear. The patchable function entry just put 16 bytes nop
before the front of the function prologue which could be filled
with a separated detour mechanism.
[1] https://www.linuxplumbersconf.org/event/7/contributions/807/
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
2020-12-18 00:01:41 +08:00
|
|
|
make_call(caller, old_addr, call);
|
|
|
|
ret = ftrace_check_current_call(caller, call);
|
2018-02-13 13:13:20 +08:00
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
riscv: Using PATCHABLE_FUNCTION_ENTRY instead of MCOUNT
This patch changes the current detour mechanism of dynamic ftrace
which has been discussed during LPC 2020 RISCV-MC [1].
Before the patch, we used mcount for detour:
<funca>:
addi sp,sp,-16
sd ra,8(sp)
sd s0,0(sp)
addi s0,sp,16
mv a5,ra
mv a0,a5
auipc ra,0x0 -> nop
jalr -296(ra) <_mcount@plt> ->nop
...
After the patch, we use nop call site area for detour:
<funca>:
nop -> REG_S ra, -SZREG(sp)
nop -> auipc ra, 0x?
nop -> jalr ?(ra)
nop -> REG_L ra, -SZREG(sp)
...
The mcount mechanism is mixed with gcc function prologue which is
not very clear. The patchable function entry just put 16 bytes nop
before the front of the function prologue which could be filled
with a separated detour mechanism.
[1] https://www.linuxplumbersconf.org/event/7/contributions/807/
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
2020-12-18 00:01:41 +08:00
|
|
|
return __ftrace_modify_call(caller, addr, true);
|
2018-02-13 13:13:20 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2018-02-13 13:13:17 +08:00
|
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
2017-12-18 17:52:48 +08:00
|
|
|
/*
|
2018-02-13 13:13:17 +08:00
|
|
|
* Most of this function is copied from arm64.
|
2017-12-18 17:52:48 +08:00
|
|
|
*/
|
|
|
|
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
|
|
|
|
unsigned long frame_pointer)
|
|
|
|
{
|
|
|
|
unsigned long return_hooker = (unsigned long)&return_to_handler;
|
|
|
|
unsigned long old;
|
|
|
|
|
|
|
|
if (unlikely(atomic_read(¤t->tracing_graph_pause)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We don't suffer access faults, so no extra fault-recovery assembly
|
|
|
|
* is needed here.
|
|
|
|
*/
|
|
|
|
old = *parent;
|
|
|
|
|
2019-12-23 16:46:13 +08:00
|
|
|
if (!function_graph_enter(old, self_addr, frame_pointer, parent))
|
2018-11-19 06:31:44 +08:00
|
|
|
*parent = return_hooker;
|
2017-12-18 17:52:48 +08:00
|
|
|
}
|
2018-02-13 13:13:18 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
|
|
|
extern void ftrace_graph_call(void);
|
riscv: Using PATCHABLE_FUNCTION_ENTRY instead of MCOUNT
This patch changes the current detour mechanism of dynamic ftrace
which has been discussed during LPC 2020 RISCV-MC [1].
Before the patch, we used mcount for detour:
<funca>:
addi sp,sp,-16
sd ra,8(sp)
sd s0,0(sp)
addi s0,sp,16
mv a5,ra
mv a0,a5
auipc ra,0x0 -> nop
jalr -296(ra) <_mcount@plt> ->nop
...
After the patch, we use nop call site area for detour:
<funca>:
nop -> REG_S ra, -SZREG(sp)
nop -> auipc ra, 0x?
nop -> jalr ?(ra)
nop -> REG_L ra, -SZREG(sp)
...
The mcount mechanism is mixed with gcc function prologue which is
not very clear. The patchable function entry just put 16 bytes nop
before the front of the function prologue which could be filled
with a separated detour mechanism.
[1] https://www.linuxplumbersconf.org/event/7/contributions/807/
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
2020-12-18 00:01:41 +08:00
|
|
|
extern void ftrace_graph_regs_call(void);
|
2018-02-13 13:13:18 +08:00
|
|
|
int ftrace_enable_ftrace_graph_caller(void)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
riscv: Using PATCHABLE_FUNCTION_ENTRY instead of MCOUNT
This patch changes the current detour mechanism of dynamic ftrace
which has been discussed during LPC 2020 RISCV-MC [1].
Before the patch, we used mcount for detour:
<funca>:
addi sp,sp,-16
sd ra,8(sp)
sd s0,0(sp)
addi s0,sp,16
mv a5,ra
mv a0,a5
auipc ra,0x0 -> nop
jalr -296(ra) <_mcount@plt> ->nop
...
After the patch, we use nop call site area for detour:
<funca>:
nop -> REG_S ra, -SZREG(sp)
nop -> auipc ra, 0x?
nop -> jalr ?(ra)
nop -> REG_L ra, -SZREG(sp)
...
The mcount mechanism is mixed with gcc function prologue which is
not very clear. The patchable function entry just put 16 bytes nop
before the front of the function prologue which could be filled
with a separated detour mechanism.
[1] https://www.linuxplumbersconf.org/event/7/contributions/807/
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
2020-12-18 00:01:41 +08:00
|
|
|
ret = __ftrace_modify_call((unsigned long)&ftrace_graph_call,
|
|
|
|
(unsigned long)&prepare_ftrace_return, true);
|
2018-02-13 13:13:18 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
riscv: Using PATCHABLE_FUNCTION_ENTRY instead of MCOUNT
This patch changes the current detour mechanism of dynamic ftrace
which has been discussed during LPC 2020 RISCV-MC [1].
Before the patch, we used mcount for detour:
<funca>:
addi sp,sp,-16
sd ra,8(sp)
sd s0,0(sp)
addi s0,sp,16
mv a5,ra
mv a0,a5
auipc ra,0x0 -> nop
jalr -296(ra) <_mcount@plt> ->nop
...
After the patch, we use nop call site area for detour:
<funca>:
nop -> REG_S ra, -SZREG(sp)
nop -> auipc ra, 0x?
nop -> jalr ?(ra)
nop -> REG_L ra, -SZREG(sp)
...
The mcount mechanism is mixed with gcc function prologue which is
not very clear. The patchable function entry just put 16 bytes nop
before the front of the function prologue which could be filled
with a separated detour mechanism.
[1] https://www.linuxplumbersconf.org/event/7/contributions/807/
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
2020-12-18 00:01:41 +08:00
|
|
|
return __ftrace_modify_call((unsigned long)&ftrace_graph_regs_call,
|
2018-02-13 13:13:18 +08:00
|
|
|
(unsigned long)&prepare_ftrace_return, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
int ftrace_disable_ftrace_graph_caller(void)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
riscv: Using PATCHABLE_FUNCTION_ENTRY instead of MCOUNT
This patch changes the current detour mechanism of dynamic ftrace
which has been discussed during LPC 2020 RISCV-MC [1].
Before the patch, we used mcount for detour:
<funca>:
addi sp,sp,-16
sd ra,8(sp)
sd s0,0(sp)
addi s0,sp,16
mv a5,ra
mv a0,a5
auipc ra,0x0 -> nop
jalr -296(ra) <_mcount@plt> ->nop
...
After the patch, we use nop call site area for detour:
<funca>:
nop -> REG_S ra, -SZREG(sp)
nop -> auipc ra, 0x?
nop -> jalr ?(ra)
nop -> REG_L ra, -SZREG(sp)
...
The mcount mechanism is mixed with gcc function prologue which is
not very clear. The patchable function entry just put 16 bytes nop
before the front of the function prologue which could be filled
with a separated detour mechanism.
[1] https://www.linuxplumbersconf.org/event/7/contributions/807/
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
2020-12-18 00:01:41 +08:00
|
|
|
ret = __ftrace_modify_call((unsigned long)&ftrace_graph_call,
|
|
|
|
(unsigned long)&prepare_ftrace_return, false);
|
2018-02-13 13:13:18 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
riscv: Using PATCHABLE_FUNCTION_ENTRY instead of MCOUNT
This patch changes the current detour mechanism of dynamic ftrace
which has been discussed during LPC 2020 RISCV-MC [1].
Before the patch, we used mcount for detour:
<funca>:
addi sp,sp,-16
sd ra,8(sp)
sd s0,0(sp)
addi s0,sp,16
mv a5,ra
mv a0,a5
auipc ra,0x0 -> nop
jalr -296(ra) <_mcount@plt> ->nop
...
After the patch, we use nop call site area for detour:
<funca>:
nop -> REG_S ra, -SZREG(sp)
nop -> auipc ra, 0x?
nop -> jalr ?(ra)
nop -> REG_L ra, -SZREG(sp)
...
The mcount mechanism is mixed with gcc function prologue which is
not very clear. The patchable function entry just put 16 bytes nop
before the front of the function prologue which could be filled
with a separated detour mechanism.
[1] https://www.linuxplumbersconf.org/event/7/contributions/807/
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
2020-12-18 00:01:41 +08:00
|
|
|
return __ftrace_modify_call((unsigned long)&ftrace_graph_regs_call,
|
2018-02-13 13:13:18 +08:00
|
|
|
(unsigned long)&prepare_ftrace_return, false);
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_DYNAMIC_FTRACE */
|
|
|
|
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|