tracing/kprobe: bpf: Check error injectable event is on function entry
Check whether error injectable event is on function entry or not. Currently it checks the event is ftrace-based kprobes or not, but that is wrong. It should check if the event is on the entry of target function. Since error injection will override a function to just return with modified return value, that operation must be done before the target function starts making stackframe. As a side effect, bpf error injection is no need to depend on function-tracer. It can work with sw-breakpoint based kprobe events too. Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org> Reviewed-by: Josef Bacik <jbacik@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
daaf24c634
commit
b4da3340ea
|
@ -67,9 +67,7 @@ extern const int kretprobe_blacklist_size;
|
|||
void arch_remove_kprobe(struct kprobe *p);
|
||||
asmlinkage void kretprobe_trampoline(void);
|
||||
|
||||
#ifdef CONFIG_KPROBES_ON_FTRACE
|
||||
extern void arch_ftrace_kprobe_override_function(struct pt_regs *regs);
|
||||
#endif
|
||||
extern void arch_kprobe_override_function(struct pt_regs *regs);
|
||||
|
||||
/* Architecture specific copy of original instruction*/
|
||||
struct arch_specific_insn {
|
||||
|
|
|
@ -1183,3 +1183,17 @@ int arch_trampoline_kprobe(struct kprobe *p)
|
|||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
asmlinkage void override_func(void);
|
||||
asm(
|
||||
".type override_func, @function\n"
|
||||
"override_func:\n"
|
||||
" ret\n"
|
||||
".size override_func, .-override_func\n"
|
||||
);
|
||||
|
||||
void arch_kprobe_override_function(struct pt_regs *regs)
|
||||
{
|
||||
regs->ip = (unsigned long)&override_func;
|
||||
}
|
||||
NOKPROBE_SYMBOL(arch_kprobe_override_function);
|
||||
|
|
|
@ -97,17 +97,3 @@ int arch_prepare_kprobe_ftrace(struct kprobe *p)
|
|||
p->ainsn.boostable = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
asmlinkage void override_func(void);
|
||||
asm(
|
||||
".type override_func, @function\n"
|
||||
"override_func:\n"
|
||||
" ret\n"
|
||||
".size override_func, .-override_func\n"
|
||||
);
|
||||
|
||||
void arch_ftrace_kprobe_override_function(struct pt_regs *regs)
|
||||
{
|
||||
regs->ip = (unsigned long)&override_func;
|
||||
}
|
||||
NOKPROBE_SYMBOL(arch_ftrace_kprobe_override_function);
|
||||
|
|
|
@ -533,9 +533,7 @@ config FUNCTION_PROFILER
|
|||
config BPF_KPROBE_OVERRIDE
|
||||
bool "Enable BPF programs to override a kprobed function"
|
||||
depends on BPF_EVENTS
|
||||
depends on KPROBES_ON_FTRACE
|
||||
depends on HAVE_KPROBE_OVERRIDE
|
||||
depends on DYNAMIC_FTRACE_WITH_REGS
|
||||
default n
|
||||
help
|
||||
Allows BPF to override the execution of a probed function and
|
||||
|
|
|
@ -85,7 +85,7 @@ BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
|
|||
{
|
||||
__this_cpu_write(bpf_kprobe_override, 1);
|
||||
regs_set_return_value(regs, rc);
|
||||
arch_ftrace_kprobe_override_function(regs);
|
||||
arch_kprobe_override_function(regs);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -800,11 +800,11 @@ int perf_event_attach_bpf_prog(struct perf_event *event,
|
|||
int ret = -EEXIST;
|
||||
|
||||
/*
|
||||
* Kprobe override only works for ftrace based kprobes, and only if they
|
||||
* are on the opt-in list.
|
||||
* Kprobe override only works if they are on the function entry,
|
||||
* and only if they are on the opt-in list.
|
||||
*/
|
||||
if (prog->kprobe_override &&
|
||||
(!trace_kprobe_ftrace(event->tp_event) ||
|
||||
(!trace_kprobe_on_func_entry(event->tp_event) ||
|
||||
!trace_kprobe_error_injectable(event->tp_event)))
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
@ -88,13 +88,16 @@ static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
|
|||
return nhit;
|
||||
}
|
||||
|
||||
int trace_kprobe_ftrace(struct trace_event_call *call)
|
||||
bool trace_kprobe_on_func_entry(struct trace_event_call *call)
|
||||
{
|
||||
struct trace_kprobe *tk = (struct trace_kprobe *)call->data;
|
||||
return kprobe_ftrace(&tk->rp.kp);
|
||||
|
||||
return kprobe_on_func_entry(tk->rp.kp.addr,
|
||||
tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
|
||||
tk->rp.kp.addr ? 0 : tk->rp.kp.offset);
|
||||
}
|
||||
|
||||
int trace_kprobe_error_injectable(struct trace_event_call *call)
|
||||
bool trace_kprobe_error_injectable(struct trace_event_call *call)
|
||||
{
|
||||
struct trace_kprobe *tk = (struct trace_kprobe *)call->data;
|
||||
unsigned long addr;
|
||||
|
|
|
@ -252,8 +252,8 @@ struct symbol_cache;
|
|||
unsigned long update_symbol_cache(struct symbol_cache *sc);
|
||||
void free_symbol_cache(struct symbol_cache *sc);
|
||||
struct symbol_cache *alloc_symbol_cache(const char *sym, long offset);
|
||||
int trace_kprobe_ftrace(struct trace_event_call *call);
|
||||
int trace_kprobe_error_injectable(struct trace_event_call *call);
|
||||
bool trace_kprobe_on_func_entry(struct trace_event_call *call);
|
||||
bool trace_kprobe_error_injectable(struct trace_event_call *call);
|
||||
#else
|
||||
/* uprobes do not support symbol fetch methods */
|
||||
#define fetch_symbol_u8 NULL
|
||||
|
@ -280,14 +280,14 @@ alloc_symbol_cache(const char *sym, long offset)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static inline int trace_kprobe_ftrace(struct trace_event_call *call)
|
||||
static inline bool trace_kprobe_on_func_entry(struct trace_event_call *call)
|
||||
{
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline int trace_kprobe_error_injectable(struct trace_event_call *call)
|
||||
static inline bool trace_kprobe_error_injectable(struct trace_event_call *call)
|
||||
{
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
#endif /* CONFIG_KPROBE_EVENTS */
|
||||
|
||||
|
|
Loading…
Reference in New Issue