bpf: disable preemption for bpf progs attached to uprobe
trace_call_bpf() no longer disables preemption on its own. All callers of this function has to do it explicitly. Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
1b7a51a63b
commit
70ed0706a4
|
@ -1333,8 +1333,15 @@ static void __uprobe_perf_func(struct trace_uprobe *tu,
|
||||||
int size, esize;
|
int size, esize;
|
||||||
int rctx;
|
int rctx;
|
||||||
|
|
||||||
if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
|
if (bpf_prog_array_valid(call)) {
|
||||||
return;
|
u32 ret;
|
||||||
|
|
||||||
|
preempt_disable();
|
||||||
|
ret = trace_call_bpf(call, regs);
|
||||||
|
preempt_enable();
|
||||||
|
if (!ret)
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
|
esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue