2018-08-16 23:23:53 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2009-03-20 03:26:15 +08:00
|
|
|
/*
|
2010-03-05 12:35:37 +08:00
|
|
|
* trace event based perf event profiling/tracing
|
2009-03-20 03:26:15 +08:00
|
|
|
*
|
2015-11-16 18:08:45 +08:00
|
|
|
* Copyright (C) 2009 Red Hat Inc, Peter Zijlstra
|
2010-03-03 14:16:16 +08:00
|
|
|
* Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
|
2009-03-20 03:26:15 +08:00
|
|
|
*/
|
|
|
|
|
2009-08-24 12:19:47 +08:00
|
|
|
#include <linux/module.h>
|
2010-01-28 09:32:29 +08:00
|
|
|
#include <linux/kprobes.h>
|
perf_event: Add support for LSM and SELinux checks
In current mainline, the degree of access to perf_event_open(2) system
call depends on the perf_event_paranoid sysctl. This has a number of
limitations:
1. The sysctl is only a single value. Many types of accesses are controlled
based on the single value thus making the control very limited and
coarse grained.
2. The sysctl is global, so if the sysctl is changed, then that means
all processes get access to perf_event_open(2) opening the door to
security issues.
This patch adds LSM and SELinux access checking which will be used in
Android to access perf_event_open(2) for the purposes of attaching BPF
programs to tracepoints, perf profiling and other operations from
userspace. These operations are intended for production systems.
5 new LSM hooks are added:
1. perf_event_open: This controls access during the perf_event_open(2)
syscall itself. The hook is called from all the places that the
perf_event_paranoid sysctl is checked to keep it consistent with the
systctl. The hook gets passed a 'type' argument which controls CPU,
kernel and tracepoint accesses (in this context, CPU, kernel and
tracepoint have the same semantics as the perf_event_paranoid sysctl).
Additionally, I added an 'open' type which is similar to
perf_event_paranoid sysctl == 3 patch carried in Android and several other
distros but was rejected in mainline [1] in 2016.
2. perf_event_alloc: This allocates a new security object for the event
which stores the current SID within the event. It will be useful when
the perf event's FD is passed through IPC to another process which may
try to read the FD. Appropriate security checks will limit access.
3. perf_event_free: Called when the event is closed.
4. perf_event_read: Called from the read(2) and mmap(2) syscalls for the event.
5. perf_event_write: Called from the ioctl(2) syscalls for the event.
[1] https://lwn.net/Articles/696240/
Since Peter had suggest LSM hooks in 2016 [1], I am adding his
Suggested-by tag below.
To use this patch, we set the perf_event_paranoid sysctl to -1 and then
apply selinux checking as appropriate (default deny everything, and then
add policy rules to give access to domains that need it). In the future
we can remove the perf_event_paranoid sysctl altogether.
Suggested-by: Peter Zijlstra <peterz@infradead.org>
Co-developed-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: James Morris <jmorris@namei.org>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: rostedt@goodmis.org
Cc: Yonghong Song <yhs@fb.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: jeffv@google.com
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: primiano@google.com
Cc: Song Liu <songliubraving@fb.com>
Cc: rsavitski@google.com
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Matthew Garrett <matthewgarrett@google.com>
Link: https://lkml.kernel.org/r/20191014170308.70668-1-joel@joelfernandes.org
2019-10-15 01:03:08 +08:00
|
|
|
#include <linux/security.h>
|
2009-03-20 03:26:15 +08:00
|
|
|
#include "trace.h"
|
2017-12-07 06:45:15 +08:00
|
|
|
#include "trace_probe.h"
|
2009-03-20 03:26:15 +08:00
|
|
|
|
2010-08-11 11:47:59 +08:00
|
|
|
static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
|
2009-09-18 12:10:28 +08:00
|
|
|
|
2010-03-23 07:08:59 +08:00
|
|
|
/*
|
|
|
|
* Force it to be aligned to unsigned long to avoid misaligned accesses
|
2021-03-24 01:49:35 +08:00
|
|
|
* surprises
|
2010-03-23 07:08:59 +08:00
|
|
|
*/
|
|
|
|
typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
|
|
|
|
perf_trace_t;
|
2009-11-22 12:26:55 +08:00
|
|
|
|
2009-09-18 12:10:28 +08:00
|
|
|
/* Count the events in use (per event id, not per instance) */
|
2010-03-05 12:35:37 +08:00
|
|
|
static int total_ref_count;
|
2009-09-18 12:10:28 +08:00
|
|
|
|
2015-05-05 23:45:27 +08:00
|
|
|
static int perf_trace_event_perm(struct trace_event_call *tp_event,
|
2010-11-18 08:39:17 +08:00
|
|
|
struct perf_event *p_event)
|
|
|
|
{
|
perf_event: Add support for LSM and SELinux checks
In current mainline, the degree of access to perf_event_open(2) system
call depends on the perf_event_paranoid sysctl. This has a number of
limitations:
1. The sysctl is only a single value. Many types of accesses are controlled
based on the single value thus making the control very limited and
coarse grained.
2. The sysctl is global, so if the sysctl is changed, then that means
all processes get access to perf_event_open(2) opening the door to
security issues.
This patch adds LSM and SELinux access checking which will be used in
Android to access perf_event_open(2) for the purposes of attaching BPF
programs to tracepoints, perf profiling and other operations from
userspace. These operations are intended for production systems.
5 new LSM hooks are added:
1. perf_event_open: This controls access during the perf_event_open(2)
syscall itself. The hook is called from all the places that the
perf_event_paranoid sysctl is checked to keep it consistent with the
systctl. The hook gets passed a 'type' argument which controls CPU,
kernel and tracepoint accesses (in this context, CPU, kernel and
tracepoint have the same semantics as the perf_event_paranoid sysctl).
Additionally, I added an 'open' type which is similar to
perf_event_paranoid sysctl == 3 patch carried in Android and several other
distros but was rejected in mainline [1] in 2016.
2. perf_event_alloc: This allocates a new security object for the event
which stores the current SID within the event. It will be useful when
the perf event's FD is passed through IPC to another process which may
try to read the FD. Appropriate security checks will limit access.
3. perf_event_free: Called when the event is closed.
4. perf_event_read: Called from the read(2) and mmap(2) syscalls for the event.
5. perf_event_write: Called from the ioctl(2) syscalls for the event.
[1] https://lwn.net/Articles/696240/
Since Peter had suggest LSM hooks in 2016 [1], I am adding his
Suggested-by tag below.
To use this patch, we set the perf_event_paranoid sysctl to -1 and then
apply selinux checking as appropriate (default deny everything, and then
add policy rules to give access to domains that need it). In the future
we can remove the perf_event_paranoid sysctl altogether.
Suggested-by: Peter Zijlstra <peterz@infradead.org>
Co-developed-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: James Morris <jmorris@namei.org>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: rostedt@goodmis.org
Cc: Yonghong Song <yhs@fb.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: jeffv@google.com
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: primiano@google.com
Cc: Song Liu <songliubraving@fb.com>
Cc: rsavitski@google.com
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Matthew Garrett <matthewgarrett@google.com>
Link: https://lkml.kernel.org/r/20191014170308.70668-1-joel@joelfernandes.org
2019-10-15 01:03:08 +08:00
|
|
|
int ret;
|
|
|
|
|
2013-11-14 23:23:04 +08:00
|
|
|
if (tp_event->perf_perm) {
|
perf_event: Add support for LSM and SELinux checks
In current mainline, the degree of access to perf_event_open(2) system
call depends on the perf_event_paranoid sysctl. This has a number of
limitations:
1. The sysctl is only a single value. Many types of accesses are controlled
based on the single value thus making the control very limited and
coarse grained.
2. The sysctl is global, so if the sysctl is changed, then that means
all processes get access to perf_event_open(2) opening the door to
security issues.
This patch adds LSM and SELinux access checking which will be used in
Android to access perf_event_open(2) for the purposes of attaching BPF
programs to tracepoints, perf profiling and other operations from
userspace. These operations are intended for production systems.
5 new LSM hooks are added:
1. perf_event_open: This controls access during the perf_event_open(2)
syscall itself. The hook is called from all the places that the
perf_event_paranoid sysctl is checked to keep it consistent with the
systctl. The hook gets passed a 'type' argument which controls CPU,
kernel and tracepoint accesses (in this context, CPU, kernel and
tracepoint have the same semantics as the perf_event_paranoid sysctl).
Additionally, I added an 'open' type which is similar to
perf_event_paranoid sysctl == 3 patch carried in Android and several other
distros but was rejected in mainline [1] in 2016.
2. perf_event_alloc: This allocates a new security object for the event
which stores the current SID within the event. It will be useful when
the perf event's FD is passed through IPC to another process which may
try to read the FD. Appropriate security checks will limit access.
3. perf_event_free: Called when the event is closed.
4. perf_event_read: Called from the read(2) and mmap(2) syscalls for the event.
5. perf_event_write: Called from the ioctl(2) syscalls for the event.
[1] https://lwn.net/Articles/696240/
Since Peter had suggest LSM hooks in 2016 [1], I am adding his
Suggested-by tag below.
To use this patch, we set the perf_event_paranoid sysctl to -1 and then
apply selinux checking as appropriate (default deny everything, and then
add policy rules to give access to domains that need it). In the future
we can remove the perf_event_paranoid sysctl altogether.
Suggested-by: Peter Zijlstra <peterz@infradead.org>
Co-developed-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: James Morris <jmorris@namei.org>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: rostedt@goodmis.org
Cc: Yonghong Song <yhs@fb.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: jeffv@google.com
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: primiano@google.com
Cc: Song Liu <songliubraving@fb.com>
Cc: rsavitski@google.com
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Matthew Garrett <matthewgarrett@google.com>
Link: https://lkml.kernel.org/r/20191014170308.70668-1-joel@joelfernandes.org
2019-10-15 01:03:08 +08:00
|
|
|
ret = tp_event->perf_perm(tp_event, p_event);
|
2013-11-14 23:23:04 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-07-16 20:33:29 +08:00
|
|
|
/*
|
|
|
|
* We checked and allowed to create parent,
|
|
|
|
* allow children without checking.
|
|
|
|
*/
|
|
|
|
if (p_event->parent)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* It's ok to check current process (owner) permissions in here,
|
|
|
|
* because code below is called only via perf_event_open syscall.
|
|
|
|
*/
|
|
|
|
|
2012-02-15 22:51:52 +08:00
|
|
|
/* The ftrace function trace is allowed only for root. */
|
2014-03-02 23:56:38 +08:00
|
|
|
if (ftrace_event_is_function(tp_event)) {
|
perf_event: Add support for LSM and SELinux checks
In current mainline, the degree of access to perf_event_open(2) system
call depends on the perf_event_paranoid sysctl. This has a number of
limitations:
1. The sysctl is only a single value. Many types of accesses are controlled
based on the single value thus making the control very limited and
coarse grained.
2. The sysctl is global, so if the sysctl is changed, then that means
all processes get access to perf_event_open(2) opening the door to
security issues.
This patch adds LSM and SELinux access checking which will be used in
Android to access perf_event_open(2) for the purposes of attaching BPF
programs to tracepoints, perf profiling and other operations from
userspace. These operations are intended for production systems.
5 new LSM hooks are added:
1. perf_event_open: This controls access during the perf_event_open(2)
syscall itself. The hook is called from all the places that the
perf_event_paranoid sysctl is checked to keep it consistent with the
systctl. The hook gets passed a 'type' argument which controls CPU,
kernel and tracepoint accesses (in this context, CPU, kernel and
tracepoint have the same semantics as the perf_event_paranoid sysctl).
Additionally, I added an 'open' type which is similar to
perf_event_paranoid sysctl == 3 patch carried in Android and several other
distros but was rejected in mainline [1] in 2016.
2. perf_event_alloc: This allocates a new security object for the event
which stores the current SID within the event. It will be useful when
the perf event's FD is passed through IPC to another process which may
try to read the FD. Appropriate security checks will limit access.
3. perf_event_free: Called when the event is closed.
4. perf_event_read: Called from the read(2) and mmap(2) syscalls for the event.
5. perf_event_write: Called from the ioctl(2) syscalls for the event.
[1] https://lwn.net/Articles/696240/
Since Peter had suggest LSM hooks in 2016 [1], I am adding his
Suggested-by tag below.
To use this patch, we set the perf_event_paranoid sysctl to -1 and then
apply selinux checking as appropriate (default deny everything, and then
add policy rules to give access to domains that need it). In the future
we can remove the perf_event_paranoid sysctl altogether.
Suggested-by: Peter Zijlstra <peterz@infradead.org>
Co-developed-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: James Morris <jmorris@namei.org>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: rostedt@goodmis.org
Cc: Yonghong Song <yhs@fb.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: jeffv@google.com
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: primiano@google.com
Cc: Song Liu <songliubraving@fb.com>
Cc: rsavitski@google.com
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Matthew Garrett <matthewgarrett@google.com>
Link: https://lkml.kernel.org/r/20191014170308.70668-1-joel@joelfernandes.org
2019-10-15 01:03:08 +08:00
|
|
|
ret = perf_allow_tracepoint(&p_event->attr);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2014-03-02 23:56:38 +08:00
|
|
|
|
2016-03-16 22:34:29 +08:00
|
|
|
if (!is_sampling_event(p_event))
|
|
|
|
return 0;
|
|
|
|
|
2014-03-02 23:56:38 +08:00
|
|
|
/*
|
|
|
|
* We don't allow user space callchains for function trace
|
|
|
|
* event, due to issues with page faults while tracing page
|
|
|
|
* fault handler and its overall trickiness nature.
|
|
|
|
*/
|
|
|
|
if (!p_event->attr.exclude_callchain_user)
|
|
|
|
return -EINVAL;
|
2014-03-02 23:56:39 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Same reason to disable user stack dump as for user space
|
|
|
|
* callchains above.
|
|
|
|
*/
|
|
|
|
if (p_event->attr.sample_type & PERF_SAMPLE_STACK_USER)
|
|
|
|
return -EINVAL;
|
2014-03-02 23:56:38 +08:00
|
|
|
}
|
2012-02-15 22:51:52 +08:00
|
|
|
|
2010-11-18 08:39:17 +08:00
|
|
|
/* No tracing, just counting, so no obvious leak */
|
|
|
|
if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Some events are ok to be traced by non-root users... */
|
|
|
|
if (p_event->attach_state == PERF_ATTACH_TASK) {
|
|
|
|
if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY)
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ...otherwise raw tracepoint data can be a severe data leak,
|
|
|
|
* only allow root to have these.
|
|
|
|
*/
|
perf_event: Add support for LSM and SELinux checks
In current mainline, the degree of access to perf_event_open(2) system
call depends on the perf_event_paranoid sysctl. This has a number of
limitations:
1. The sysctl is only a single value. Many types of accesses are controlled
based on the single value thus making the control very limited and
coarse grained.
2. The sysctl is global, so if the sysctl is changed, then that means
all processes get access to perf_event_open(2) opening the door to
security issues.
This patch adds LSM and SELinux access checking which will be used in
Android to access perf_event_open(2) for the purposes of attaching BPF
programs to tracepoints, perf profiling and other operations from
userspace. These operations are intended for production systems.
5 new LSM hooks are added:
1. perf_event_open: This controls access during the perf_event_open(2)
syscall itself. The hook is called from all the places that the
perf_event_paranoid sysctl is checked to keep it consistent with the
systctl. The hook gets passed a 'type' argument which controls CPU,
kernel and tracepoint accesses (in this context, CPU, kernel and
tracepoint have the same semantics as the perf_event_paranoid sysctl).
Additionally, I added an 'open' type which is similar to
perf_event_paranoid sysctl == 3 patch carried in Android and several other
distros but was rejected in mainline [1] in 2016.
2. perf_event_alloc: This allocates a new security object for the event
which stores the current SID within the event. It will be useful when
the perf event's FD is passed through IPC to another process which may
try to read the FD. Appropriate security checks will limit access.
3. perf_event_free: Called when the event is closed.
4. perf_event_read: Called from the read(2) and mmap(2) syscalls for the event.
5. perf_event_write: Called from the ioctl(2) syscalls for the event.
[1] https://lwn.net/Articles/696240/
Since Peter had suggest LSM hooks in 2016 [1], I am adding his
Suggested-by tag below.
To use this patch, we set the perf_event_paranoid sysctl to -1 and then
apply selinux checking as appropriate (default deny everything, and then
add policy rules to give access to domains that need it). In the future
we can remove the perf_event_paranoid sysctl altogether.
Suggested-by: Peter Zijlstra <peterz@infradead.org>
Co-developed-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: James Morris <jmorris@namei.org>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: rostedt@goodmis.org
Cc: Yonghong Song <yhs@fb.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: jeffv@google.com
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: primiano@google.com
Cc: Song Liu <songliubraving@fb.com>
Cc: rsavitski@google.com
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Matthew Garrett <matthewgarrett@google.com>
Link: https://lkml.kernel.org/r/20191014170308.70668-1-joel@joelfernandes.org
2019-10-15 01:03:08 +08:00
|
|
|
ret = perf_allow_tracepoint(&p_event->attr);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2010-11-18 08:39:17 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-05-05 23:45:27 +08:00
|
|
|
static int perf_trace_event_reg(struct trace_event_call *tp_event,
|
2012-02-15 22:51:49 +08:00
|
|
|
struct perf_event *p_event)
|
2009-09-18 06:54:43 +08:00
|
|
|
{
|
2010-08-11 11:47:59 +08:00
|
|
|
struct hlist_head __percpu *list;
|
2012-02-15 22:51:49 +08:00
|
|
|
int ret = -ENOMEM;
|
2010-05-19 20:02:22 +08:00
|
|
|
int cpu;
|
2009-09-18 12:10:28 +08:00
|
|
|
|
2010-05-19 20:02:22 +08:00
|
|
|
p_event->tp_event = tp_event;
|
|
|
|
if (tp_event->perf_refcount++ > 0)
|
2009-09-18 06:54:43 +08:00
|
|
|
return 0;
|
|
|
|
|
2010-05-19 20:02:22 +08:00
|
|
|
list = alloc_percpu(struct hlist_head);
|
|
|
|
if (!list)
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
for_each_possible_cpu(cpu)
|
|
|
|
INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
|
2009-09-18 12:10:28 +08:00
|
|
|
|
2010-05-19 20:02:22 +08:00
|
|
|
tp_event->perf_events = list;
|
2009-09-18 06:54:43 +08:00
|
|
|
|
2010-03-05 12:35:37 +08:00
|
|
|
if (!total_ref_count) {
|
2010-08-11 11:47:59 +08:00
|
|
|
char __percpu *buf;
|
2010-05-19 16:52:27 +08:00
|
|
|
int i;
|
2009-09-18 12:10:28 +08:00
|
|
|
|
2010-08-15 02:45:13 +08:00
|
|
|
for (i = 0; i < PERF_NR_CONTEXTS; i++) {
|
2010-08-11 11:47:59 +08:00
|
|
|
buf = (char __percpu *)alloc_percpu(perf_trace_t);
|
2010-05-19 16:52:27 +08:00
|
|
|
if (!buf)
|
2010-05-19 20:02:22 +08:00
|
|
|
goto fail;
|
2009-09-18 12:10:28 +08:00
|
|
|
|
2010-05-19 20:02:22 +08:00
|
|
|
perf_trace_buf[i] = buf;
|
2010-05-19 16:52:27 +08:00
|
|
|
}
|
2009-09-18 12:10:28 +08:00
|
|
|
}
|
|
|
|
|
2012-02-15 22:51:49 +08:00
|
|
|
ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER, NULL);
|
2010-05-19 20:02:22 +08:00
|
|
|
if (ret)
|
|
|
|
goto fail;
|
2009-09-18 12:10:28 +08:00
|
|
|
|
2010-05-19 20:02:22 +08:00
|
|
|
total_ref_count++;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail:
|
2010-03-05 12:35:37 +08:00
|
|
|
if (!total_ref_count) {
|
2010-05-19 16:52:27 +08:00
|
|
|
int i;
|
|
|
|
|
2010-08-15 02:45:13 +08:00
|
|
|
for (i = 0; i < PERF_NR_CONTEXTS; i++) {
|
2010-05-19 16:52:27 +08:00
|
|
|
free_percpu(perf_trace_buf[i]);
|
|
|
|
perf_trace_buf[i] = NULL;
|
|
|
|
}
|
2009-10-03 20:55:18 +08:00
|
|
|
}
|
2010-05-19 20:02:22 +08:00
|
|
|
|
|
|
|
if (!--tp_event->perf_refcount) {
|
|
|
|
free_percpu(tp_event->perf_events);
|
|
|
|
tp_event->perf_events = NULL;
|
2009-10-03 20:55:18 +08:00
|
|
|
}
|
2009-09-18 12:10:28 +08:00
|
|
|
|
|
|
|
return ret;
|
2009-09-18 06:54:43 +08:00
|
|
|
}
|
|
|
|
|
2012-02-15 22:51:49 +08:00
|
|
|
static void perf_trace_event_unreg(struct perf_event *p_event)
|
|
|
|
{
|
2015-05-05 23:45:27 +08:00
|
|
|
struct trace_event_call *tp_event = p_event->tp_event;
|
2012-02-15 22:51:49 +08:00
|
|
|
int i;
|
|
|
|
|
|
|
|
if (--tp_event->perf_refcount > 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ensure our callback won't be called anymore. The buffers
|
|
|
|
* will be freed after that.
|
|
|
|
*/
|
|
|
|
tracepoint_synchronize_unregister();
|
|
|
|
|
|
|
|
free_percpu(tp_event->perf_events);
|
|
|
|
tp_event->perf_events = NULL;
|
|
|
|
|
|
|
|
if (!--total_ref_count) {
|
|
|
|
for (i = 0; i < PERF_NR_CONTEXTS; i++) {
|
|
|
|
free_percpu(perf_trace_buf[i]);
|
|
|
|
perf_trace_buf[i] = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
module_put(tp_event->mod);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int perf_trace_event_open(struct perf_event *p_event)
|
|
|
|
{
|
2015-05-05 23:45:27 +08:00
|
|
|
struct trace_event_call *tp_event = p_event->tp_event;
|
2012-02-15 22:51:49 +08:00
|
|
|
return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void perf_trace_event_close(struct perf_event *p_event)
|
|
|
|
{
|
2015-05-05 23:45:27 +08:00
|
|
|
struct trace_event_call *tp_event = p_event->tp_event;
|
2012-02-15 22:51:49 +08:00
|
|
|
tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event);
|
|
|
|
}
|
|
|
|
|
2015-05-05 23:45:27 +08:00
|
|
|
static int perf_trace_event_init(struct trace_event_call *tp_event,
|
2012-02-15 22:51:49 +08:00
|
|
|
struct perf_event *p_event)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = perf_trace_event_perm(tp_event, p_event);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = perf_trace_event_reg(tp_event, p_event);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = perf_trace_event_open(p_event);
|
|
|
|
if (ret) {
|
|
|
|
perf_trace_event_unreg(p_event);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-05-19 20:02:22 +08:00
|
|
|
int perf_trace_init(struct perf_event *p_event)
|
2009-03-20 03:26:15 +08:00
|
|
|
{
|
2015-05-05 23:45:27 +08:00
|
|
|
struct trace_event_call *tp_event;
|
2013-11-16 01:39:45 +08:00
|
|
|
u64 event_id = p_event->attr.config;
|
2009-05-06 10:33:45 +08:00
|
|
|
int ret = -EINVAL;
|
2009-03-20 03:26:15 +08:00
|
|
|
|
2009-05-06 10:33:45 +08:00
|
|
|
mutex_lock(&event_mutex);
|
2010-05-19 20:02:22 +08:00
|
|
|
list_for_each_entry(tp_event, &ftrace_events, list) {
|
2010-05-21 23:49:57 +08:00
|
|
|
if (tp_event->event.type == event_id &&
|
2010-06-08 23:22:06 +08:00
|
|
|
tp_event->class && tp_event->class->reg &&
|
2010-05-19 20:02:22 +08:00
|
|
|
try_module_get(tp_event->mod)) {
|
|
|
|
ret = perf_trace_event_init(tp_event, p_event);
|
2010-09-01 18:58:43 +08:00
|
|
|
if (ret)
|
|
|
|
module_put(tp_event->mod);
|
2009-05-06 10:33:45 +08:00
|
|
|
break;
|
|
|
|
}
|
2009-03-20 03:26:15 +08:00
|
|
|
}
|
2009-05-06 10:33:45 +08:00
|
|
|
mutex_unlock(&event_mutex);
|
2009-03-20 03:26:15 +08:00
|
|
|
|
2009-05-06 10:33:45 +08:00
|
|
|
return ret;
|
2009-03-20 03:26:15 +08:00
|
|
|
}
|
|
|
|
|
2012-02-15 22:51:49 +08:00
|
|
|
void perf_trace_destroy(struct perf_event *p_event)
|
|
|
|
{
|
|
|
|
mutex_lock(&event_mutex);
|
|
|
|
perf_trace_event_close(p_event);
|
|
|
|
perf_trace_event_unreg(p_event);
|
|
|
|
mutex_unlock(&event_mutex);
|
|
|
|
}
|
|
|
|
|
2017-12-07 06:45:15 +08:00
|
|
|
#ifdef CONFIG_KPROBE_EVENTS
|
|
|
|
int perf_kprobe_init(struct perf_event *p_event, bool is_retprobe)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
char *func = NULL;
|
|
|
|
struct trace_event_call *tp_event;
|
|
|
|
|
|
|
|
if (p_event->attr.kprobe_func) {
|
|
|
|
func = kzalloc(KSYM_NAME_LEN, GFP_KERNEL);
|
|
|
|
if (!func)
|
|
|
|
return -ENOMEM;
|
|
|
|
ret = strncpy_from_user(
|
|
|
|
func, u64_to_user_ptr(p_event->attr.kprobe_func),
|
|
|
|
KSYM_NAME_LEN);
|
2018-04-09 20:16:54 +08:00
|
|
|
if (ret == KSYM_NAME_LEN)
|
|
|
|
ret = -E2BIG;
|
2017-12-07 06:45:15 +08:00
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (func[0] == '\0') {
|
|
|
|
kfree(func);
|
|
|
|
func = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
tp_event = create_local_trace_kprobe(
|
|
|
|
func, (void *)(unsigned long)(p_event->attr.kprobe_addr),
|
|
|
|
p_event->attr.probe_offset, is_retprobe);
|
|
|
|
if (IS_ERR(tp_event)) {
|
|
|
|
ret = PTR_ERR(tp_event);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
tracing: Fix race in perf_trace_buf initialization
A race condition exists while initialiazing perf_trace_buf from
perf_trace_init() and perf_kprobe_init().
CPU0 CPU1
perf_trace_init()
mutex_lock(&event_mutex)
perf_trace_event_init()
perf_trace_event_reg()
total_ref_count == 0
buf = alloc_percpu()
perf_trace_buf[i] = buf
tp_event->class->reg() //fails perf_kprobe_init()
goto fail perf_trace_event_init()
perf_trace_event_reg()
fail:
total_ref_count == 0
total_ref_count == 0
buf = alloc_percpu()
perf_trace_buf[i] = buf
tp_event->class->reg()
total_ref_count++
free_percpu(perf_trace_buf[i])
perf_trace_buf[i] = NULL
Any subsequent call to perf_trace_event_reg() will observe total_ref_count > 0,
causing the perf_trace_buf to be always NULL. This can result in perf_trace_buf
getting accessed from perf_trace_buf_alloc() without being initialized. Acquiring
event_mutex in perf_kprobe_init() before calling perf_trace_event_init() should
fix this race.
The race caused the following bug:
Unable to handle kernel paging request at virtual address 0000003106f2003c
Mem abort info:
ESR = 0x96000045
Exception class = DABT (current EL), IL = 32 bits
SET = 0, FnV = 0
EA = 0, S1PTW = 0
Data abort info:
ISV = 0, ISS = 0x00000045
CM = 0, WnR = 1
user pgtable: 4k pages, 39-bit VAs, pgdp = ffffffc034b9b000
[0000003106f2003c] pgd=0000000000000000, pud=0000000000000000
Internal error: Oops: 96000045 [#1] PREEMPT SMP
Process syz-executor (pid: 18393, stack limit = 0xffffffc093190000)
pstate: 80400005 (Nzcv daif +PAN -UAO)
pc : __memset+0x20/0x1ac
lr : memset+0x3c/0x50
sp : ffffffc09319fc50
__memset+0x20/0x1ac
perf_trace_buf_alloc+0x140/0x1a0
perf_trace_sys_enter+0x158/0x310
syscall_trace_enter+0x348/0x7c0
el0_svc_common+0x11c/0x368
el0_svc_handler+0x12c/0x198
el0_svc+0x8/0xc
Ramdumps showed the following:
total_ref_count = 3
perf_trace_buf = (
0x0 -> NULL,
0x0 -> NULL,
0x0 -> NULL,
0x0 -> NULL)
Link: http://lkml.kernel.org/r/1571120245-4186-1-git-send-email-prsood@codeaurora.org
Cc: stable@vger.kernel.org
Fixes: e12f03d7031a9 ("perf/core: Implement the 'perf_kprobe' PMU")
Acked-by: Song Liu <songliubraving@fb.com>
Signed-off-by: Prateek Sood <prsood@codeaurora.org>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2019-10-15 14:17:25 +08:00
|
|
|
mutex_lock(&event_mutex);
|
2017-12-07 06:45:15 +08:00
|
|
|
ret = perf_trace_event_init(tp_event, p_event);
|
|
|
|
if (ret)
|
|
|
|
destroy_local_trace_kprobe(tp_event);
|
tracing: Fix race in perf_trace_buf initialization
A race condition exists while initialiazing perf_trace_buf from
perf_trace_init() and perf_kprobe_init().
CPU0 CPU1
perf_trace_init()
mutex_lock(&event_mutex)
perf_trace_event_init()
perf_trace_event_reg()
total_ref_count == 0
buf = alloc_percpu()
perf_trace_buf[i] = buf
tp_event->class->reg() //fails perf_kprobe_init()
goto fail perf_trace_event_init()
perf_trace_event_reg()
fail:
total_ref_count == 0
total_ref_count == 0
buf = alloc_percpu()
perf_trace_buf[i] = buf
tp_event->class->reg()
total_ref_count++
free_percpu(perf_trace_buf[i])
perf_trace_buf[i] = NULL
Any subsequent call to perf_trace_event_reg() will observe total_ref_count > 0,
causing the perf_trace_buf to be always NULL. This can result in perf_trace_buf
getting accessed from perf_trace_buf_alloc() without being initialized. Acquiring
event_mutex in perf_kprobe_init() before calling perf_trace_event_init() should
fix this race.
The race caused the following bug:
Unable to handle kernel paging request at virtual address 0000003106f2003c
Mem abort info:
ESR = 0x96000045
Exception class = DABT (current EL), IL = 32 bits
SET = 0, FnV = 0
EA = 0, S1PTW = 0
Data abort info:
ISV = 0, ISS = 0x00000045
CM = 0, WnR = 1
user pgtable: 4k pages, 39-bit VAs, pgdp = ffffffc034b9b000
[0000003106f2003c] pgd=0000000000000000, pud=0000000000000000
Internal error: Oops: 96000045 [#1] PREEMPT SMP
Process syz-executor (pid: 18393, stack limit = 0xffffffc093190000)
pstate: 80400005 (Nzcv daif +PAN -UAO)
pc : __memset+0x20/0x1ac
lr : memset+0x3c/0x50
sp : ffffffc09319fc50
__memset+0x20/0x1ac
perf_trace_buf_alloc+0x140/0x1a0
perf_trace_sys_enter+0x158/0x310
syscall_trace_enter+0x348/0x7c0
el0_svc_common+0x11c/0x368
el0_svc_handler+0x12c/0x198
el0_svc+0x8/0xc
Ramdumps showed the following:
total_ref_count = 3
perf_trace_buf = (
0x0 -> NULL,
0x0 -> NULL,
0x0 -> NULL,
0x0 -> NULL)
Link: http://lkml.kernel.org/r/1571120245-4186-1-git-send-email-prsood@codeaurora.org
Cc: stable@vger.kernel.org
Fixes: e12f03d7031a9 ("perf/core: Implement the 'perf_kprobe' PMU")
Acked-by: Song Liu <songliubraving@fb.com>
Signed-off-by: Prateek Sood <prsood@codeaurora.org>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2019-10-15 14:17:25 +08:00
|
|
|
mutex_unlock(&event_mutex);
|
2017-12-07 06:45:15 +08:00
|
|
|
out:
|
|
|
|
kfree(func);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void perf_kprobe_destroy(struct perf_event *p_event)
|
|
|
|
{
|
tracing: Fix race in perf_trace_buf initialization
A race condition exists while initialiazing perf_trace_buf from
perf_trace_init() and perf_kprobe_init().
CPU0 CPU1
perf_trace_init()
mutex_lock(&event_mutex)
perf_trace_event_init()
perf_trace_event_reg()
total_ref_count == 0
buf = alloc_percpu()
perf_trace_buf[i] = buf
tp_event->class->reg() //fails perf_kprobe_init()
goto fail perf_trace_event_init()
perf_trace_event_reg()
fail:
total_ref_count == 0
total_ref_count == 0
buf = alloc_percpu()
perf_trace_buf[i] = buf
tp_event->class->reg()
total_ref_count++
free_percpu(perf_trace_buf[i])
perf_trace_buf[i] = NULL
Any subsequent call to perf_trace_event_reg() will observe total_ref_count > 0,
causing the perf_trace_buf to be always NULL. This can result in perf_trace_buf
getting accessed from perf_trace_buf_alloc() without being initialized. Acquiring
event_mutex in perf_kprobe_init() before calling perf_trace_event_init() should
fix this race.
The race caused the following bug:
Unable to handle kernel paging request at virtual address 0000003106f2003c
Mem abort info:
ESR = 0x96000045
Exception class = DABT (current EL), IL = 32 bits
SET = 0, FnV = 0
EA = 0, S1PTW = 0
Data abort info:
ISV = 0, ISS = 0x00000045
CM = 0, WnR = 1
user pgtable: 4k pages, 39-bit VAs, pgdp = ffffffc034b9b000
[0000003106f2003c] pgd=0000000000000000, pud=0000000000000000
Internal error: Oops: 96000045 [#1] PREEMPT SMP
Process syz-executor (pid: 18393, stack limit = 0xffffffc093190000)
pstate: 80400005 (Nzcv daif +PAN -UAO)
pc : __memset+0x20/0x1ac
lr : memset+0x3c/0x50
sp : ffffffc09319fc50
__memset+0x20/0x1ac
perf_trace_buf_alloc+0x140/0x1a0
perf_trace_sys_enter+0x158/0x310
syscall_trace_enter+0x348/0x7c0
el0_svc_common+0x11c/0x368
el0_svc_handler+0x12c/0x198
el0_svc+0x8/0xc
Ramdumps showed the following:
total_ref_count = 3
perf_trace_buf = (
0x0 -> NULL,
0x0 -> NULL,
0x0 -> NULL,
0x0 -> NULL)
Link: http://lkml.kernel.org/r/1571120245-4186-1-git-send-email-prsood@codeaurora.org
Cc: stable@vger.kernel.org
Fixes: e12f03d7031a9 ("perf/core: Implement the 'perf_kprobe' PMU")
Acked-by: Song Liu <songliubraving@fb.com>
Signed-off-by: Prateek Sood <prsood@codeaurora.org>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2019-10-15 14:17:25 +08:00
|
|
|
mutex_lock(&event_mutex);
|
2017-12-07 06:45:15 +08:00
|
|
|
perf_trace_event_close(p_event);
|
|
|
|
perf_trace_event_unreg(p_event);
|
tracing: Fix race in perf_trace_buf initialization
A race condition exists while initialiazing perf_trace_buf from
perf_trace_init() and perf_kprobe_init().
CPU0 CPU1
perf_trace_init()
mutex_lock(&event_mutex)
perf_trace_event_init()
perf_trace_event_reg()
total_ref_count == 0
buf = alloc_percpu()
perf_trace_buf[i] = buf
tp_event->class->reg() //fails perf_kprobe_init()
goto fail perf_trace_event_init()
perf_trace_event_reg()
fail:
total_ref_count == 0
total_ref_count == 0
buf = alloc_percpu()
perf_trace_buf[i] = buf
tp_event->class->reg()
total_ref_count++
free_percpu(perf_trace_buf[i])
perf_trace_buf[i] = NULL
Any subsequent call to perf_trace_event_reg() will observe total_ref_count > 0,
causing the perf_trace_buf to be always NULL. This can result in perf_trace_buf
getting accessed from perf_trace_buf_alloc() without being initialized. Acquiring
event_mutex in perf_kprobe_init() before calling perf_trace_event_init() should
fix this race.
The race caused the following bug:
Unable to handle kernel paging request at virtual address 0000003106f2003c
Mem abort info:
ESR = 0x96000045
Exception class = DABT (current EL), IL = 32 bits
SET = 0, FnV = 0
EA = 0, S1PTW = 0
Data abort info:
ISV = 0, ISS = 0x00000045
CM = 0, WnR = 1
user pgtable: 4k pages, 39-bit VAs, pgdp = ffffffc034b9b000
[0000003106f2003c] pgd=0000000000000000, pud=0000000000000000
Internal error: Oops: 96000045 [#1] PREEMPT SMP
Process syz-executor (pid: 18393, stack limit = 0xffffffc093190000)
pstate: 80400005 (Nzcv daif +PAN -UAO)
pc : __memset+0x20/0x1ac
lr : memset+0x3c/0x50
sp : ffffffc09319fc50
__memset+0x20/0x1ac
perf_trace_buf_alloc+0x140/0x1a0
perf_trace_sys_enter+0x158/0x310
syscall_trace_enter+0x348/0x7c0
el0_svc_common+0x11c/0x368
el0_svc_handler+0x12c/0x198
el0_svc+0x8/0xc
Ramdumps showed the following:
total_ref_count = 3
perf_trace_buf = (
0x0 -> NULL,
0x0 -> NULL,
0x0 -> NULL,
0x0 -> NULL)
Link: http://lkml.kernel.org/r/1571120245-4186-1-git-send-email-prsood@codeaurora.org
Cc: stable@vger.kernel.org
Fixes: e12f03d7031a9 ("perf/core: Implement the 'perf_kprobe' PMU")
Acked-by: Song Liu <songliubraving@fb.com>
Signed-off-by: Prateek Sood <prsood@codeaurora.org>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
2019-10-15 14:17:25 +08:00
|
|
|
mutex_unlock(&event_mutex);
|
2017-12-07 06:45:15 +08:00
|
|
|
|
|
|
|
destroy_local_trace_kprobe(p_event->tp_event);
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_KPROBE_EVENTS */
|
|
|
|
|
2017-12-07 06:45:16 +08:00
|
|
|
#ifdef CONFIG_UPROBE_EVENTS
|
2018-10-02 13:36:36 +08:00
|
|
|
int perf_uprobe_init(struct perf_event *p_event,
|
|
|
|
unsigned long ref_ctr_offset, bool is_retprobe)
|
2017-12-07 06:45:16 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
char *path = NULL;
|
|
|
|
struct trace_event_call *tp_event;
|
|
|
|
|
|
|
|
if (!p_event->attr.uprobe_path)
|
|
|
|
return -EINVAL;
|
2019-02-21 00:54:43 +08:00
|
|
|
|
|
|
|
path = strndup_user(u64_to_user_ptr(p_event->attr.uprobe_path),
|
|
|
|
PATH_MAX);
|
|
|
|
if (IS_ERR(path)) {
|
|
|
|
ret = PTR_ERR(path);
|
|
|
|
return (ret == -EINVAL) ? -E2BIG : ret;
|
|
|
|
}
|
2017-12-07 06:45:16 +08:00
|
|
|
if (path[0] == '\0') {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2018-10-02 13:36:36 +08:00
|
|
|
tp_event = create_local_trace_uprobe(path, p_event->attr.probe_offset,
|
|
|
|
ref_ctr_offset, is_retprobe);
|
2017-12-07 06:45:16 +08:00
|
|
|
if (IS_ERR(tp_event)) {
|
|
|
|
ret = PTR_ERR(tp_event);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* local trace_uprobe need to hold event_mutex to call
|
|
|
|
* uprobe_buffer_enable() and uprobe_buffer_disable().
|
|
|
|
* event_mutex is not required for local trace_kprobes.
|
|
|
|
*/
|
|
|
|
mutex_lock(&event_mutex);
|
|
|
|
ret = perf_trace_event_init(tp_event, p_event);
|
|
|
|
if (ret)
|
|
|
|
destroy_local_trace_uprobe(tp_event);
|
|
|
|
mutex_unlock(&event_mutex);
|
|
|
|
out:
|
|
|
|
kfree(path);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void perf_uprobe_destroy(struct perf_event *p_event)
|
|
|
|
{
|
|
|
|
mutex_lock(&event_mutex);
|
|
|
|
perf_trace_event_close(p_event);
|
|
|
|
perf_trace_event_unreg(p_event);
|
|
|
|
mutex_unlock(&event_mutex);
|
|
|
|
destroy_local_trace_uprobe(p_event->tp_event);
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_UPROBE_EVENTS */
|
|
|
|
|
perf: Rework the PMU methods
Replace pmu::{enable,disable,start,stop,unthrottle} with
pmu::{add,del,start,stop}, all of which take a flags argument.
The new interface extends the capability to stop a counter while
keeping it scheduled on the PMU. We replace the throttled state with
the generic stopped state.
This also allows us to efficiently stop/start counters over certain
code paths (like IRQ handlers).
It also allows scheduling a counter without it starting, allowing for
a generic frozen state (useful for rotating stopped counters).
The stopped state is implemented in two different ways, depending on
how the architecture implemented the throttled state:
1) We disable the counter:
a) the pmu has per-counter enable bits, we flip that
b) we program a NOP event, preserving the counter state
2) We store the counter state and ignore all read/overflow events
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-06-16 20:37:10 +08:00
|
|
|
int perf_trace_add(struct perf_event *p_event, int flags)
|
2009-09-18 06:54:43 +08:00
|
|
|
{
|
2015-05-05 23:45:27 +08:00
|
|
|
struct trace_event_call *tp_event = p_event->tp_event;
|
2009-09-18 12:10:28 +08:00
|
|
|
|
perf: Rework the PMU methods
Replace pmu::{enable,disable,start,stop,unthrottle} with
pmu::{add,del,start,stop}, all of which take a flags argument.
The new interface extends the capability to stop a counter while
keeping it scheduled on the PMU. We replace the throttled state with
the generic stopped state.
This also allows us to efficiently stop/start counters over certain
code paths (like IRQ handlers).
It also allows scheduling a counter without it starting, allowing for
a generic frozen state (useful for rotating stopped counters).
The stopped state is implemented in two different ways, depending on
how the architecture implemented the throttled state:
1) We disable the counter:
a) the pmu has per-counter enable bits, we flip that
b) we program a NOP event, preserving the counter state
2) We store the counter state and ignore all read/overflow events
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-06-16 20:37:10 +08:00
|
|
|
if (!(flags & PERF_EF_START))
|
|
|
|
p_event->hw.state = PERF_HES_STOPPED;
|
|
|
|
|
2017-10-10 23:15:47 +08:00
|
|
|
/*
|
|
|
|
* If TRACE_REG_PERF_ADD returns false; no custom action was performed
|
|
|
|
* and we need to take the default action of enqueueing our event on
|
|
|
|
* the right per-cpu hlist.
|
|
|
|
*/
|
|
|
|
if (!tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event)) {
|
|
|
|
struct hlist_head __percpu *pcpu_list;
|
|
|
|
struct hlist_head *list;
|
|
|
|
|
|
|
|
pcpu_list = tp_event->perf_events;
|
|
|
|
if (WARN_ON_ONCE(!pcpu_list))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
list = this_cpu_ptr(pcpu_list);
|
|
|
|
hlist_add_head_rcu(&p_event->hlist_entry, list);
|
|
|
|
}
|
2009-09-18 12:10:28 +08:00
|
|
|
|
2017-10-10 23:15:47 +08:00
|
|
|
return 0;
|
2010-05-19 20:02:22 +08:00
|
|
|
}
|
2009-09-18 12:10:28 +08:00
|
|
|
|
perf: Rework the PMU methods
Replace pmu::{enable,disable,start,stop,unthrottle} with
pmu::{add,del,start,stop}, all of which take a flags argument.
The new interface extends the capability to stop a counter while
keeping it scheduled on the PMU. We replace the throttled state with
the generic stopped state.
This also allows us to efficiently stop/start counters over certain
code paths (like IRQ handlers).
It also allows scheduling a counter without it starting, allowing for
a generic frozen state (useful for rotating stopped counters).
The stopped state is implemented in two different ways, depending on
how the architecture implemented the throttled state:
1) We disable the counter:
a) the pmu has per-counter enable bits, we flip that
b) we program a NOP event, preserving the counter state
2) We store the counter state and ignore all read/overflow events
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-06-16 20:37:10 +08:00
|
|
|
void perf_trace_del(struct perf_event *p_event, int flags)
|
2010-05-19 20:02:22 +08:00
|
|
|
{
|
2015-05-05 23:45:27 +08:00
|
|
|
struct trace_event_call *tp_event = p_event->tp_event;
|
2017-10-10 23:15:47 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If TRACE_REG_PERF_DEL returns false; no custom action was performed
|
|
|
|
* and we need to take the default action of dequeueing our event from
|
|
|
|
* the right per-cpu hlist.
|
|
|
|
*/
|
|
|
|
if (!tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event))
|
|
|
|
hlist_del_rcu(&p_event->hlist_entry);
|
2009-09-18 06:54:43 +08:00
|
|
|
}
|
|
|
|
|
2016-04-07 09:43:24 +08:00
|
|
|
void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp)
|
2010-01-28 09:32:29 +08:00
|
|
|
{
|
2010-05-19 20:02:22 +08:00
|
|
|
char *raw_data;
|
2016-04-07 09:43:24 +08:00
|
|
|
int rctx;
|
2010-01-28 09:32:29 +08:00
|
|
|
|
2010-03-23 07:08:59 +08:00
|
|
|
BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
|
|
|
|
|
2013-06-18 01:02:11 +08:00
|
|
|
if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
|
2016-04-07 09:43:24 +08:00
|
|
|
"perf buffer not large enough"))
|
2013-06-18 01:02:11 +08:00
|
|
|
return NULL;
|
|
|
|
|
2016-04-07 09:43:24 +08:00
|
|
|
*rctxp = rctx = perf_swevent_get_recursion_context();
|
|
|
|
if (rctx < 0)
|
2010-05-19 20:02:22 +08:00
|
|
|
return NULL;
|
2010-01-28 09:32:29 +08:00
|
|
|
|
2014-12-16 19:47:34 +08:00
|
|
|
if (regs)
|
2016-04-07 09:43:24 +08:00
|
|
|
*regs = this_cpu_ptr(&__perf_regs[rctx]);
|
|
|
|
raw_data = this_cpu_ptr(perf_trace_buf[rctx]);
|
2010-01-28 09:32:29 +08:00
|
|
|
|
|
|
|
/* zero the dead bytes from align to not leak stack to user */
|
2010-03-23 07:08:59 +08:00
|
|
|
memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
|
2016-04-07 09:43:24 +08:00
|
|
|
return raw_data;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(perf_trace_buf_alloc);
|
|
|
|
NOKPROBE_SYMBOL(perf_trace_buf_alloc);
|
|
|
|
|
|
|
|
void perf_trace_buf_update(void *record, u16 type)
|
|
|
|
{
|
|
|
|
struct trace_entry *entry = record;
|
2010-01-28 09:32:29 +08:00
|
|
|
|
2021-01-26 03:45:08 +08:00
|
|
|
tracing_generic_entry_update(entry, type, tracing_gen_ctx());
|
2010-01-28 09:32:29 +08:00
|
|
|
}
|
2016-04-07 09:43:24 +08:00
|
|
|
NOKPROBE_SYMBOL(perf_trace_buf_update);
|
2012-02-15 22:51:52 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_FUNCTION_TRACER
|
|
|
|
static void
|
2011-08-09 04:57:47 +08:00
|
|
|
perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
|
2020-10-29 05:42:17 +08:00
|
|
|
struct ftrace_ops *ops, struct ftrace_regs *fregs)
|
2012-02-15 22:51:52 +08:00
|
|
|
{
|
|
|
|
struct ftrace_entry *entry;
|
2017-10-10 23:15:47 +08:00
|
|
|
struct perf_event *event;
|
|
|
|
struct hlist_head head;
|
2012-02-15 22:51:52 +08:00
|
|
|
struct pt_regs regs;
|
|
|
|
int rctx;
|
2020-11-06 10:32:43 +08:00
|
|
|
int bit;
|
2012-02-15 22:51:52 +08:00
|
|
|
|
2020-11-06 10:32:44 +08:00
|
|
|
if (!rcu_is_watching())
|
|
|
|
return;
|
|
|
|
|
2017-10-10 23:15:47 +08:00
|
|
|
if ((unsigned long)ops->private != smp_processor_id())
|
2013-06-18 01:02:04 +08:00
|
|
|
return;
|
|
|
|
|
2020-11-06 10:32:46 +08:00
|
|
|
bit = ftrace_test_recursion_trylock(ip, parent_ip);
|
2020-11-06 10:32:43 +08:00
|
|
|
if (bit < 0)
|
|
|
|
return;
|
|
|
|
|
2017-10-10 23:15:47 +08:00
|
|
|
event = container_of(ops, struct perf_event, ftrace_ops);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* @event->hlist entry is NULL (per INIT_HLIST_NODE), and all
|
|
|
|
* the perf code does is hlist_for_each_entry_rcu(), so we can
|
|
|
|
* get away with simply setting the @head.first pointer in order
|
|
|
|
* to create a singular list.
|
|
|
|
*/
|
|
|
|
head.first = &event->hlist_entry;
|
|
|
|
|
2012-02-15 22:51:52 +08:00
|
|
|
#define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
|
|
|
|
sizeof(u64)) - sizeof(u32))
|
|
|
|
|
|
|
|
BUILD_BUG_ON(ENTRY_SIZE > PERF_MAX_TRACE_SIZE);
|
|
|
|
|
2016-04-07 09:43:22 +08:00
|
|
|
memset(®s, 0, sizeof(regs));
|
2012-02-15 22:51:52 +08:00
|
|
|
perf_fetch_caller_regs(®s);
|
|
|
|
|
2016-04-07 09:43:24 +08:00
|
|
|
entry = perf_trace_buf_alloc(ENTRY_SIZE, NULL, &rctx);
|
2012-02-15 22:51:52 +08:00
|
|
|
if (!entry)
|
2020-11-06 10:32:43 +08:00
|
|
|
goto out;
|
2012-02-15 22:51:52 +08:00
|
|
|
|
|
|
|
entry->ip = ip;
|
|
|
|
entry->parent_ip = parent_ip;
|
2016-04-07 09:43:24 +08:00
|
|
|
perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, TRACE_FN,
|
2017-10-10 23:15:47 +08:00
|
|
|
1, ®s, &head, NULL);
|
2012-02-15 22:51:52 +08:00
|
|
|
|
2020-11-06 10:32:43 +08:00
|
|
|
out:
|
|
|
|
ftrace_test_recursion_unlock(bit);
|
2012-02-15 22:51:52 +08:00
|
|
|
#undef ENTRY_SIZE
|
|
|
|
}
|
|
|
|
|
|
|
|
static int perf_ftrace_function_register(struct perf_event *event)
|
|
|
|
{
|
|
|
|
struct ftrace_ops *ops = &event->ftrace_ops;
|
|
|
|
|
2017-10-10 23:15:47 +08:00
|
|
|
ops->func = perf_ftrace_function_call;
|
|
|
|
ops->private = (void *)(unsigned long)nr_cpu_ids;
|
|
|
|
|
2012-02-15 22:51:52 +08:00
|
|
|
return register_ftrace_function(ops);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int perf_ftrace_function_unregister(struct perf_event *event)
|
|
|
|
{
|
|
|
|
struct ftrace_ops *ops = &event->ftrace_ops;
|
ftrace, perf: Add filter support for function trace event
Adding support to filter function trace event via perf
interface. It is now possible to use filter interface
in the perf tool like:
perf record -e ftrace:function --filter="(ip == mm_*)" ls
The filter syntax is restricted to the the 'ip' field only,
and following operators are accepted '==' '!=' '||', ending
up with the filter strings like:
ip == f1[, ]f2 ... || ip != f3[, ]f4 ...
with comma ',' or space ' ' as a function separator. If the
space ' ' is used as a separator, the right side of the
assignment needs to be enclosed in double quotes '"', e.g.:
perf record -e ftrace:function --filter '(ip == do_execve,sys_*,ext*)' ls
perf record -e ftrace:function --filter '(ip == "do_execve,sys_*,ext*")' ls
perf record -e ftrace:function --filter '(ip == "do_execve sys_* ext*")' ls
The '==' operator adds trace filter with same effect as would
be added via set_ftrace_filter file.
The '!=' operator adds trace filter with same effect as would
be added via set_ftrace_notrace file.
The right side of the '!=', '==' operators is list of functions
or regexp. to be added to filter separated by space.
The '||' operator is used for connecting multiple filter definitions
together. It is possible to have more than one '==' and '!='
operators within one filter string.
Link: http://lkml.kernel.org/r/1329317514-8131-8-git-send-email-jolsa@redhat.com
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2012-02-15 22:51:54 +08:00
|
|
|
int ret = unregister_ftrace_function(ops);
|
|
|
|
ftrace_free_filter(ops);
|
|
|
|
return ret;
|
2012-02-15 22:51:52 +08:00
|
|
|
}
|
|
|
|
|
2015-05-05 23:45:27 +08:00
|
|
|
int perf_ftrace_event_register(struct trace_event_call *call,
|
2012-02-15 22:51:52 +08:00
|
|
|
enum trace_reg type, void *data)
|
|
|
|
{
|
2017-10-10 23:15:47 +08:00
|
|
|
struct perf_event *event = data;
|
|
|
|
|
2012-02-15 22:51:52 +08:00
|
|
|
switch (type) {
|
|
|
|
case TRACE_REG_REGISTER:
|
|
|
|
case TRACE_REG_UNREGISTER:
|
|
|
|
break;
|
|
|
|
case TRACE_REG_PERF_REGISTER:
|
|
|
|
case TRACE_REG_PERF_UNREGISTER:
|
|
|
|
return 0;
|
|
|
|
case TRACE_REG_PERF_OPEN:
|
|
|
|
return perf_ftrace_function_register(data);
|
|
|
|
case TRACE_REG_PERF_CLOSE:
|
|
|
|
return perf_ftrace_function_unregister(data);
|
|
|
|
case TRACE_REG_PERF_ADD:
|
2017-10-10 23:15:47 +08:00
|
|
|
event->ftrace_ops.private = (void *)(unsigned long)smp_processor_id();
|
|
|
|
return 1;
|
2012-02-15 22:51:52 +08:00
|
|
|
case TRACE_REG_PERF_DEL:
|
2017-10-10 23:15:47 +08:00
|
|
|
event->ftrace_ops.private = (void *)(unsigned long)nr_cpu_ids;
|
|
|
|
return 1;
|
2012-02-15 22:51:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_FUNCTION_TRACER */
|