powerpc/ftrace: Separate the heuristics for checking call sites
In __ftrace_make_nop() (the 64-bit version), we have code to deal with two ftrace ABIs. There is the original ABI, which looks mostly like a function call, and then the mprofile-kernel ABI which is just a branch. The code tries to handle both cases, by looking for the presence of a load to restore the TOC pointer (PPC_INST_LD_TOC). If we detect the TOC load, we assume the call site is for an mcount() call using the old ABI. That means we patch the mcount() call with a b +8, to branch over the TOC load. However if the kernel was built with mprofile-kernel, then there will never be a call site using the original ftrace ABI. If for some reason we do see a TOC load, then it's there for a good reason, and we should not jump over it. So split the code, using the existing CC_USING_MPROFILE_KERNEL. Kernels built with mprofile-kernel will only look for, and expect, the new ABI, and similarly for the original ABI. Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
parent
b1923caa6e
commit
9d63610951
|
@ -144,6 +144,21 @@ __ftrace_make_nop(struct module *mod,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
#ifdef CC_USING_MPROFILE_KERNEL
|
||||
/* When using -mkernel_profile there is no load to jump over */
|
||||
pop = PPC_INST_NOP;
|
||||
|
||||
if (probe_kernel_read(&op, (void *)(ip - 4), 4)) {
|
||||
pr_err("Fetching instruction at %lx failed.\n", ip - 4);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
/* We expect either a mflr r0, or a std r0, LRSAVE(r1) */
|
||||
if (op != PPC_INST_MFLR && op != PPC_INST_STD_LR) {
|
||||
pr_err("Unexpected instruction %08x around bl _mcount\n", op);
|
||||
return -EINVAL;
|
||||
}
|
||||
#else
|
||||
/*
|
||||
* Our original call site looks like:
|
||||
*
|
||||
|
@ -170,24 +185,10 @@ __ftrace_make_nop(struct module *mod,
|
|||
}
|
||||
|
||||
if (op != PPC_INST_LD_TOC) {
|
||||
unsigned int inst;
|
||||
|
||||
if (probe_kernel_read(&inst, (void *)(ip - 4), 4)) {
|
||||
pr_err("Fetching instruction at %lx failed.\n", ip - 4);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
/* We expect either a mlfr r0, or a std r0, LRSAVE(r1) */
|
||||
if (inst != PPC_INST_MFLR && inst != PPC_INST_STD_LR) {
|
||||
pr_err("Unexpected instructions around bl _mcount\n"
|
||||
"when enabling dynamic ftrace!\t"
|
||||
"(%08x,bl,%08x)\n", inst, op);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* When using -mkernel_profile there is no load to jump over */
|
||||
pop = PPC_INST_NOP;
|
||||
pr_err("Expected %08x found %08x\n", PPC_INST_LD_TOC, op);
|
||||
return -EINVAL;
|
||||
}
|
||||
#endif /* CC_USING_MPROFILE_KERNEL */
|
||||
|
||||
if (patch_instruction((unsigned int *)ip, pop)) {
|
||||
pr_err("Patching NOP failed.\n");
|
||||
|
|
Loading…
Reference in New Issue