perf/core improvements and fixes:
perf record: Stephane Eranian: - Fix priv level with branch sampling for paranoid=2, i.e. the kernel checks if perf_event_attr_attr.exclude_hv is set in addition to .exclude_kernel, so reset both to zero. Arnaldo Carvalho de Melo: - Don't warn about not being able to read kernel maps (kallsyms, etc) when kernel samples aren't being collected. perf list: Kim Phillips: - Allow plurals for metric, metricgroup., i.e.: $ perf list metrics was showing nothing, which is very confusing, make it work like: $ perf stat metric perf stat: Andi Kleen: - Free memory access/leaks detected via valgrind, related to metrics. Libraries: libperf: Jiri Olsa: - Move more stuff from tools/perf, this time a first stab at moving perf_mmap methods. libtracevent: Steven Rostedt (VMware): - Round up in tep_print_event() time precision. Tzvetomir Stoyanov (VMware): - Man pages for event print and related and plugins APIs. - Move traceevent plugins in its own subdirectory. Feature detection: Thomas Richter: - Add detection of java-11-openjdk-devel package, in addition to the older versions supported. Architecture specific: S/390: Thomas Richter (2): - Include JVMTI support for s390 Vendor events: AMD: Kim Phillips: - Add L3 cache events for Family 17h. - Remove redundant '['. PowerPC: Mamatha Inamdar: - Remove P8 HW events which are not supported. Cleanups: Arnaldo Carvalho de Melo: - Remove needless headers, add needed ones, move things around to reduce the headers dependency tree, speeding up builds by not doing needless compiles when unrelated stuff gets changed. - Ditch unused code that was dragging headers. Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> -----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQR2GiIUctdOfX2qHhGyPKLppCJ+JwUCXYwFwAAKCRCyPKLppCJ+ J7u8AP0Tc71ZJJe5HxaYu3wVajBMc5MP1s5DH2MmM7t7sSQZ8gEA6zys9M0lQBCZ 5VitiNOzKzdaLPMGg/sJYV12p5qa4g8= =CEAV -----END PGP SIGNATURE----- Merge tag 'perf-core-for-mingo-5.5-20190925' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/urgent Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo: perf record: Stephane Eranian: - Fix priv level with branch sampling for paranoid=2, i.e. the kernel checks if perf_event_attr_attr.exclude_hv is set in addition to .exclude_kernel, so reset both to zero. Arnaldo Carvalho de Melo: - Don't warn about not being able to read kernel maps (kallsyms, etc) when kernel samples aren't being collected. perf list: Kim Phillips: - Allow plurals for metric, metricgroup., i.e.: $ perf list metrics was showing nothing, which is very confusing, make it work like: $ perf stat metric perf stat: Andi Kleen: - Free memory access/leaks detected via valgrind, related to metrics. Libraries: libperf: Jiri Olsa: - Move more stuff from tools/perf, this time a first stab at moving perf_mmap methods. libtracevent: Steven Rostedt (VMware): - Round up in tep_print_event() time precision. Tzvetomir Stoyanov (VMware): - Man pages for event print and related and plugins APIs. - Move traceevent plugins in its own subdirectory. Feature detection: Thomas Richter: - Add detection of java-11-openjdk-devel package, in addition to the older versions supported. Architecture specific: S/390: Thomas Richter (2): - Include JVMTI support for s390 Vendor events: AMD: Kim Phillips: - Add L3 cache events for Family 17h. - Remove redundant '['. PowerPC: Mamatha Inamdar: - Remove P8 HW events which are not supported. Cleanups: Arnaldo Carvalho de Melo: - Remove needless headers, add needed ones, move things around to reduce the headers dependency tree, speeding up builds by not doing needless compiles when unrelated stuff gets changed. - Ditch unused code that was dragging headers. Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
b11f7244ef
|
@ -3,6 +3,7 @@
|
|||
#define _TOOLS_ASM_BUG_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <stdio.h>
|
||||
|
||||
#define __WARN_printf(arg...) do { fprintf(stderr, arg); } while (0)
|
||||
|
||||
|
|
|
@ -6,14 +6,3 @@ libtraceevent-y += parse-utils.o
|
|||
libtraceevent-y += kbuffer-parse.o
|
||||
libtraceevent-y += tep_strerror.o
|
||||
libtraceevent-y += event-parse-api.o
|
||||
|
||||
plugin_jbd2-y += plugin_jbd2.o
|
||||
plugin_hrtimer-y += plugin_hrtimer.o
|
||||
plugin_kmem-y += plugin_kmem.o
|
||||
plugin_kvm-y += plugin_kvm.o
|
||||
plugin_mac80211-y += plugin_mac80211.o
|
||||
plugin_sched_switch-y += plugin_sched_switch.o
|
||||
plugin_function-y += plugin_function.o
|
||||
plugin_xen-y += plugin_xen.o
|
||||
plugin_scsi-y += plugin_scsi.o
|
||||
plugin_cfg80211-y += plugin_cfg80211.o
|
||||
|
|
|
@ -0,0 +1,130 @@
|
|||
libtraceevent(3)
|
||||
================
|
||||
|
||||
NAME
|
||||
----
|
||||
tep_print_event - Writes event information into a trace sequence.
|
||||
|
||||
SYNOPSIS
|
||||
--------
|
||||
[verse]
|
||||
--
|
||||
*#include <event-parse.h>*
|
||||
*#include <trace-seq.h>*
|
||||
|
||||
void *tep_print_event*(struct tep_handle pass:[*]_tep_, struct trace_seqpass:[*]_s_, struct tep_record pass:[*]_record_, const char pass:[*]_fmt_, _..._)
|
||||
--
|
||||
|
||||
DESCRIPTION
|
||||
-----------
|
||||
|
||||
The _tep_print_event()_ function parses the event information of the given
|
||||
_record_ and writes it into the trace sequence _s_, according to the format
|
||||
string _fmt_. The desired information is specified after the format string.
|
||||
The _fmt_ is printf-like format string, following arguments are supported:
|
||||
[verse]
|
||||
--
|
||||
TEP_PRINT_PID, "%d" - PID of the event.
|
||||
TEP_PRINT_CPU, "%d" - Event CPU.
|
||||
TEP_PRINT_COMM, "%s" - Event command string.
|
||||
TEP_PRINT_NAME, "%s" - Event name.
|
||||
TEP_PRINT_LATENCY, "%s" - Latency of the event. It prints 4 or more
|
||||
fields - interrupt state, scheduling state,
|
||||
current context, and preemption count.
|
||||
Field 1 is the interrupt enabled state:
|
||||
d : Interrupts are disabled
|
||||
. : Interrupts are enabled
|
||||
X : The architecture does not support this
|
||||
information
|
||||
Field 2 is the "need resched" state.
|
||||
N : The task is set to call the scheduler when
|
||||
possible, as another higher priority task
|
||||
may need to be scheduled in.
|
||||
. : The task is not set to call the scheduler.
|
||||
Field 3 is the context state.
|
||||
. : Normal context
|
||||
s : Soft interrupt context
|
||||
h : Hard interrupt context
|
||||
H : Hard interrupt context which triggered
|
||||
during soft interrupt context.
|
||||
z : NMI context
|
||||
Z : NMI context which triggered during hard
|
||||
interrupt context
|
||||
Field 4 is the preemption count.
|
||||
. : The preempt count is zero.
|
||||
On preemptible kernels (where the task can be scheduled
|
||||
out in arbitrary locations while in kernel context), the
|
||||
preempt count, when non zero, will prevent the kernel
|
||||
from scheduling out the current task. The preempt count
|
||||
number is displayed when it is not zero.
|
||||
Depending on the kernel, it may show other fields
|
||||
(lock depth, or migration disabled, which are unique to
|
||||
specialized kernels).
|
||||
TEP_PRINT_TIME, %d - event time stamp. A divisor and precision can be
|
||||
specified as part of this format string:
|
||||
"%precision.divisord". Example:
|
||||
"%3.1000d" - divide the time by 1000 and print the first
|
||||
3 digits before the dot. Thus, the time stamp
|
||||
"123456000" will be printed as "123.456"
|
||||
TEP_PRINT_INFO, "%s" - event information.
|
||||
TEP_PRINT_INFO_RAW, "%s" - event information, in raw format.
|
||||
|
||||
--
|
||||
EXAMPLE
|
||||
-------
|
||||
[source,c]
|
||||
--
|
||||
#include <event-parse.h>
|
||||
#include <trace-seq.h>
|
||||
...
|
||||
struct trace_seq seq;
|
||||
trace_seq_init(&seq);
|
||||
struct tep_handle *tep = tep_alloc();
|
||||
...
|
||||
void print_my_event(struct tep_record *record)
|
||||
{
|
||||
trace_seq_reset(&seq);
|
||||
tep_print_event(tep, s, record, "%16s-%-5d [%03d] %s %6.1000d %s %s",
|
||||
TEP_PRINT_COMM, TEP_PRINT_PID, TEP_PRINT_CPU,
|
||||
TEP_PRINT_LATENCY, TEP_PRINT_TIME, TEP_PRINT_NAME,
|
||||
TEP_PRINT_INFO);
|
||||
}
|
||||
...
|
||||
--
|
||||
|
||||
FILES
|
||||
-----
|
||||
[verse]
|
||||
--
|
||||
*event-parse.h*
|
||||
Header file to include in order to have access to the library APIs.
|
||||
*trace-seq.h*
|
||||
Header file to include in order to have access to trace sequences related APIs.
|
||||
Trace sequences are used to allow a function to call several other functions
|
||||
to create a string of data to use.
|
||||
*-ltraceevent*
|
||||
Linker switch to add when building a program that uses the library.
|
||||
--
|
||||
|
||||
SEE ALSO
|
||||
--------
|
||||
_libtraceevent(3)_, _trace-cmd(1)_
|
||||
|
||||
AUTHOR
|
||||
------
|
||||
[verse]
|
||||
--
|
||||
*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
|
||||
*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
|
||||
--
|
||||
REPORTING BUGS
|
||||
--------------
|
||||
Report bugs to <linux-trace-devel@vger.kernel.org>
|
||||
|
||||
LICENSE
|
||||
-------
|
||||
libtraceevent is Free Software licensed under the GNU LGPL 2.1
|
||||
|
||||
RESOURCES
|
||||
---------
|
||||
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
|
|
@ -3,7 +3,7 @@ libtraceevent(3)
|
|||
|
||||
NAME
|
||||
----
|
||||
tep_alloc, tep_free,tep_ref, tep_unref,tep_ref_get - Create, destroy, manage
|
||||
tep_alloc, tep_free,tep_ref, tep_unref,tep_get_ref - Create, destroy, manage
|
||||
references of trace event parser context.
|
||||
|
||||
SYNOPSIS
|
||||
|
@ -16,7 +16,7 @@ struct tep_handle pass:[*]*tep_alloc*(void);
|
|||
void *tep_free*(struct tep_handle pass:[*]_tep_);
|
||||
void *tep_ref*(struct tep_handle pass:[*]_tep_);
|
||||
void *tep_unref*(struct tep_handle pass:[*]_tep_);
|
||||
int *tep_ref_get*(struct tep_handle pass:[*]_tep_);
|
||||
int *tep_get_ref*(struct tep_handle pass:[*]_tep_);
|
||||
--
|
||||
|
||||
DESCRIPTION
|
||||
|
@ -57,9 +57,9 @@ EXAMPLE
|
|||
...
|
||||
struct tep_handle *tep = tep_alloc();
|
||||
...
|
||||
int ref = tep_ref_get(tep);
|
||||
int ref = tep_get_ref(tep);
|
||||
tep_ref(tep);
|
||||
if ( (ref+1) != tep_ref_get(tep)) {
|
||||
if ( (ref+1) != tep_get_ref(tep)) {
|
||||
/* Something wrong happened, the counter is not incremented by 1 */
|
||||
}
|
||||
tep_unref(tep);
|
||||
|
|
|
@ -0,0 +1,99 @@
|
|||
libtraceevent(3)
|
||||
================
|
||||
|
||||
NAME
|
||||
----
|
||||
tep_load_plugins, tep_unload_plugins - Load / unload traceevent plugins.
|
||||
|
||||
SYNOPSIS
|
||||
--------
|
||||
[verse]
|
||||
--
|
||||
*#include <event-parse.h>*
|
||||
|
||||
struct tep_plugin_list pass:[*]*tep_load_plugins*(struct tep_handle pass:[*]_tep_);
|
||||
void *tep_unload_plugins*(struct tep_plugin_list pass:[*]_plugin_list_, struct tep_handle pass:[*]_tep_);
|
||||
--
|
||||
|
||||
DESCRIPTION
|
||||
-----------
|
||||
The _tep_load_plugins()_ function loads all plugins, located in the plugin
|
||||
directories. The _tep_ argument is trace event parser context.
|
||||
The plugin directories are :
|
||||
[verse]
|
||||
--
|
||||
- System's plugin directory, defined at the library compile time. It
|
||||
depends on the library installation prefix and usually is
|
||||
_(install_preffix)/lib/traceevent/plugins_
|
||||
- Directory, defined by the environment variable _TRACEEVENT_PLUGIN_DIR_
|
||||
- User's plugin directory, located at _~/.local/lib/traceevent/plugins_
|
||||
--
|
||||
Loading of plugins can be controlled by the _tep_flags_, using the
|
||||
_tep_set_flag()_ API:
|
||||
[verse]
|
||||
--
|
||||
_TEP_DISABLE_SYS_PLUGINS_ - do not load plugins, located in
|
||||
the system's plugin directory.
|
||||
_TEP_DISABLE_PLUGINS_ - do not load any plugins.
|
||||
--
|
||||
The _tep_set_flag()_ API needs to be called before _tep_load_plugins()_, if
|
||||
loading of all plugins is not the desired case.
|
||||
|
||||
The _tep_unload_plugins()_ function unloads the plugins, previously loaded by
|
||||
_tep_load_plugins()_. The _tep_ argument is trace event parser context. The
|
||||
_plugin_list_ is the list of loaded plugins, returned by
|
||||
the _tep_load_plugins()_ function.
|
||||
|
||||
RETURN VALUE
|
||||
------------
|
||||
The _tep_load_plugins()_ function returns a list of successfully loaded plugins,
|
||||
or NULL in case no plugins are loaded.
|
||||
|
||||
EXAMPLE
|
||||
-------
|
||||
[source,c]
|
||||
--
|
||||
#include <event-parse.h>
|
||||
...
|
||||
struct tep_handle *tep = tep_alloc();
|
||||
...
|
||||
struct tep_plugin_list *plugins = tep_load_plugins(tep);
|
||||
if (plugins == NULL) {
|
||||
/* no plugins are loaded */
|
||||
}
|
||||
...
|
||||
tep_unload_plugins(plugins, tep);
|
||||
--
|
||||
|
||||
FILES
|
||||
-----
|
||||
[verse]
|
||||
--
|
||||
*event-parse.h*
|
||||
Header file to include in order to have access to the library APIs.
|
||||
*-ltraceevent*
|
||||
Linker switch to add when building a program that uses the library.
|
||||
--
|
||||
|
||||
SEE ALSO
|
||||
--------
|
||||
_libtraceevent(3)_, _trace-cmd(1)_, _tep_set_flag(3)_
|
||||
|
||||
AUTHOR
|
||||
------
|
||||
[verse]
|
||||
--
|
||||
*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
|
||||
*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
|
||||
--
|
||||
REPORTING BUGS
|
||||
--------------
|
||||
Report bugs to <linux-trace-devel@vger.kernel.org>
|
||||
|
||||
LICENSE
|
||||
-------
|
||||
libtraceevent is Free Software licensed under the GNU LGPL 2.1
|
||||
|
||||
RESOURCES
|
||||
---------
|
||||
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
|
|
@ -16,7 +16,7 @@ Management of tep handler data structure and access of its members:
|
|||
void *tep_free*(struct tep_handle pass:[*]_tep_);
|
||||
void *tep_ref*(struct tep_handle pass:[*]_tep_);
|
||||
void *tep_unref*(struct tep_handle pass:[*]_tep_);
|
||||
int *tep_ref_get*(struct tep_handle pass:[*]_tep_);
|
||||
int *tep_get_ref*(struct tep_handle pass:[*]_tep_);
|
||||
void *tep_set_flag*(struct tep_handle pass:[*]_tep_, enum tep_flag _flag_);
|
||||
void *tep_clear_flag*(struct tep_handle pass:[*]_tep_, enum tep_flag _flag_);
|
||||
bool *tep_test_flag*(struct tep_handle pass:[*]_tep_, enum tep_flag _flags_);
|
||||
|
@ -26,15 +26,12 @@ Management of tep handler data structure and access of its members:
|
|||
void *tep_set_long_size*(struct tep_handle pass:[*]_tep_, int _long_size_);
|
||||
int *tep_get_page_size*(struct tep_handle pass:[*]_tep_);
|
||||
void *tep_set_page_size*(struct tep_handle pass:[*]_tep_, int _page_size_);
|
||||
bool *tep_is_latency_format*(struct tep_handle pass:[*]_tep_);
|
||||
void *tep_set_latency_format*(struct tep_handle pass:[*]_tep_, int _lat_);
|
||||
int *tep_get_header_page_size*(struct tep_handle pass:[*]_tep_);
|
||||
int *tep_get_header_timestamp_size*(struct tep_handle pass:[*]_tep_);
|
||||
bool *tep_is_old_format*(struct tep_handle pass:[*]_tep_);
|
||||
int *tep_strerror*(struct tep_handle pass:[*]_tep_, enum tep_errno _errnum_, char pass:[*]_buf_, size_t _buflen_);
|
||||
|
||||
Register / unregister APIs:
|
||||
int *tep_register_trace_clock*(struct tep_handle pass:[*]_tep_, const char pass:[*]_trace_clock_);
|
||||
int *tep_register_function*(struct tep_handle pass:[*]_tep_, char pass:[*]_name_, unsigned long long _addr_, char pass:[*]_mod_);
|
||||
int *tep_register_event_handler*(struct tep_handle pass:[*]_tep_, int _id_, const char pass:[*]_sys_name_, const char pass:[*]_event_name_, tep_event_handler_func _func_, void pass:[*]_context_);
|
||||
int *tep_unregister_event_handler*(struct tep_handle pass:[*]tep, int id, const char pass:[*]sys_name, const char pass:[*]event_name, tep_event_handler_func func, void pass:[*]_context_);
|
||||
|
@ -57,14 +54,7 @@ Event related APIs:
|
|||
int *tep_get_events_count*(struct tep_handle pass:[*]_tep_);
|
||||
struct tep_event pass:[*]pass:[*]*tep_list_events*(struct tep_handle pass:[*]_tep_, enum tep_event_sort_type _sort_type_);
|
||||
struct tep_event pass:[*]pass:[*]*tep_list_events_copy*(struct tep_handle pass:[*]_tep_, enum tep_event_sort_type _sort_type_);
|
||||
|
||||
Event printing:
|
||||
void *tep_print_event*(struct tep_handle pass:[*]_tep_, struct trace_seq pass:[*]_s_, struct tep_record pass:[*]_record_, bool _use_trace_clock_);
|
||||
void *tep_print_event_data*(struct tep_handle pass:[*]_tep_, struct trace_seq pass:[*]_s_, struct tep_event pass:[*]_event_, struct tep_record pass:[*]_record_);
|
||||
void *tep_event_info*(struct trace_seq pass:[*]_s_, struct tep_event pass:[*]_event_, struct tep_record pass:[*]_record_);
|
||||
void *tep_print_event_task*(struct tep_handle pass:[*]_tep_, struct trace_seq pass:[*]_s_, struct tep_event pass:[*]_event_, struct tep_record pass:[*]_record_);
|
||||
void *tep_print_event_time*(struct tep_handle pass:[*]_tep_, struct trace_seq pass:[*]_s_, struct tep_event pass:[*]_event_, struct tep_record pass:[*]record, bool _use_trace_clock_);
|
||||
void *tep_set_print_raw*(struct tep_handle pass:[*]_tep_, int _print_raw_);
|
||||
void *tep_print_event*(struct tep_handle pass:[*]_tep_, struct trace_seq pass:[*]_s_, struct tep_record pass:[*]_record_, const char pass:[*]_fmt_, _..._);
|
||||
|
||||
Event finding:
|
||||
struct tep_event pass:[*]*tep_find_event*(struct tep_handle pass:[*]_tep_, int _id_);
|
||||
|
@ -116,7 +106,6 @@ Filter management:
|
|||
int *tep_filter_compare*(struct tep_event_filter pass:[*]_filter1_, struct tep_event_filter pass:[*]_filter2_);
|
||||
|
||||
Parsing various data from the records:
|
||||
void *tep_data_latency_format*(struct tep_handle pass:[*]_tep_, struct trace_seq pass:[*]_s_, struct tep_record pass:[*]_record_);
|
||||
int *tep_data_type*(struct tep_handle pass:[*]_tep_, struct tep_record pass:[*]_rec_);
|
||||
int *tep_data_pid*(struct tep_handle pass:[*]_tep_, struct tep_record pass:[*]_rec_);
|
||||
int *tep_data_preempt_count*(struct tep_handle pass:[*]_tep_, struct tep_record pass:[*]_rec_);
|
||||
|
|
|
@ -58,30 +58,6 @@ export man_dir man_dir_SQ INSTALL
|
|||
export DESTDIR DESTDIR_SQ
|
||||
export EVENT_PARSE_VERSION
|
||||
|
||||
set_plugin_dir := 1
|
||||
|
||||
# Set plugin_dir to preffered global plugin location
|
||||
# If we install under $HOME directory we go under
|
||||
# $(HOME)/.local/lib/traceevent/plugins
|
||||
#
|
||||
# We dont set PLUGIN_DIR in case we install under $HOME
|
||||
# directory, because by default the code looks under:
|
||||
# $(HOME)/.local/lib/traceevent/plugins by default.
|
||||
#
|
||||
ifeq ($(plugin_dir),)
|
||||
ifeq ($(prefix),$(HOME))
|
||||
override plugin_dir = $(HOME)/.local/lib/traceevent/plugins
|
||||
set_plugin_dir := 0
|
||||
else
|
||||
override plugin_dir = $(libdir)/traceevent/plugins
|
||||
endif
|
||||
endif
|
||||
|
||||
ifeq ($(set_plugin_dir),1)
|
||||
PLUGIN_DIR = -DPLUGIN_DIR="$(plugin_dir)"
|
||||
PLUGIN_DIR_SQ = '$(subst ','\'',$(PLUGIN_DIR))'
|
||||
endif
|
||||
|
||||
include ../../scripts/Makefile.include
|
||||
|
||||
# copy a bit from Linux kbuild
|
||||
|
@ -105,7 +81,6 @@ export prefix libdir src obj
|
|||
# Shell quotes
|
||||
libdir_SQ = $(subst ','\'',$(libdir))
|
||||
libdir_relative_SQ = $(subst ','\'',$(libdir_relative))
|
||||
plugin_dir_SQ = $(subst ','\'',$(plugin_dir))
|
||||
|
||||
CONFIG_INCLUDES =
|
||||
CONFIG_LIBS =
|
||||
|
@ -151,29 +126,14 @@ MAKEOVERRIDES=
|
|||
export srctree OUTPUT CC LD CFLAGS V
|
||||
build := -f $(srctree)/tools/build/Makefile.build dir=. obj
|
||||
|
||||
PLUGINS = plugin_jbd2.so
|
||||
PLUGINS += plugin_hrtimer.so
|
||||
PLUGINS += plugin_kmem.so
|
||||
PLUGINS += plugin_kvm.so
|
||||
PLUGINS += plugin_mac80211.so
|
||||
PLUGINS += plugin_sched_switch.so
|
||||
PLUGINS += plugin_function.so
|
||||
PLUGINS += plugin_xen.so
|
||||
PLUGINS += plugin_scsi.so
|
||||
PLUGINS += plugin_cfg80211.so
|
||||
|
||||
PLUGINS := $(addprefix $(OUTPUT),$(PLUGINS))
|
||||
PLUGINS_IN := $(PLUGINS:.so=-in.o)
|
||||
|
||||
TE_IN := $(OUTPUT)libtraceevent-in.o
|
||||
LIB_TARGET := $(addprefix $(OUTPUT),$(LIB_TARGET))
|
||||
DYNAMIC_LIST_FILE := $(OUTPUT)libtraceevent-dynamic-list
|
||||
|
||||
CMD_TARGETS = $(LIB_TARGET) $(PLUGINS) $(DYNAMIC_LIST_FILE)
|
||||
CMD_TARGETS = $(LIB_TARGET)
|
||||
|
||||
TARGETS = $(CMD_TARGETS)
|
||||
|
||||
all: all_cmd
|
||||
all: all_cmd plugins
|
||||
|
||||
all_cmd: $(CMD_TARGETS)
|
||||
|
||||
|
@ -188,17 +148,6 @@ $(OUTPUT)libtraceevent.so.$(EVENT_PARSE_VERSION): $(TE_IN)
|
|||
$(OUTPUT)libtraceevent.a: $(TE_IN)
|
||||
$(QUIET_LINK)$(RM) $@; $(AR) rcs $@ $^
|
||||
|
||||
$(OUTPUT)libtraceevent-dynamic-list: $(PLUGINS)
|
||||
$(QUIET_GEN)$(call do_generate_dynamic_list_file, $(PLUGINS), $@)
|
||||
|
||||
plugins: $(PLUGINS)
|
||||
|
||||
__plugin_obj = $(notdir $@)
|
||||
plugin_obj = $(__plugin_obj:-in.o=)
|
||||
|
||||
$(PLUGINS_IN): force
|
||||
$(Q)$(MAKE) $(build)=$(plugin_obj)
|
||||
|
||||
$(OUTPUT)%.so: $(OUTPUT)%-in.o
|
||||
$(QUIET_LINK)$(CC) $(CFLAGS) -shared $(LDFLAGS) -nostartfiles -o $@ $^
|
||||
|
||||
|
@ -258,25 +207,6 @@ define do_install
|
|||
$(INSTALL) $(if $3,-m $3,) $1 '$(DESTDIR_SQ)$2'
|
||||
endef
|
||||
|
||||
define do_install_plugins
|
||||
for plugin in $1; do \
|
||||
$(call do_install,$$plugin,$(plugin_dir_SQ)); \
|
||||
done
|
||||
endef
|
||||
|
||||
define do_generate_dynamic_list_file
|
||||
symbol_type=`$(NM) -u -D $1 | awk 'NF>1 {print $$1}' | \
|
||||
xargs echo "U w W" | tr 'w ' 'W\n' | sort -u | xargs echo`;\
|
||||
if [ "$$symbol_type" = "U W" ];then \
|
||||
(echo '{'; \
|
||||
$(NM) -u -D $1 | awk 'NF>1 {print "\t"$$2";"}' | sort -u;\
|
||||
echo '};'; \
|
||||
) > $2; \
|
||||
else \
|
||||
(echo Either missing one of [$1] or bad version of $(NM)) 1>&2;\
|
||||
fi
|
||||
endef
|
||||
|
||||
PKG_CONFIG_FILE = libtraceevent.pc
|
||||
define do_install_pkgconfig_file
|
||||
if [ -n "${pkgconfig_dir}" ]; then \
|
||||
|
@ -296,10 +226,6 @@ install_lib: all_cmd install_plugins install_headers install_pkgconfig
|
|||
$(call do_install_mkdir,$(libdir_SQ)); \
|
||||
cp -fpR $(LIB_INSTALL) $(DESTDIR)$(libdir_SQ)
|
||||
|
||||
install_plugins: $(PLUGINS)
|
||||
$(call QUIET_INSTALL, trace_plugins) \
|
||||
$(call do_install_plugins, $(PLUGINS))
|
||||
|
||||
install_pkgconfig:
|
||||
$(call QUIET_INSTALL, $(PKG_CONFIG_FILE)) \
|
||||
$(call do_install_pkgconfig_file,$(prefix))
|
||||
|
@ -313,7 +239,7 @@ install_headers:
|
|||
|
||||
install: install_lib
|
||||
|
||||
clean:
|
||||
clean: clean_plugins
|
||||
$(call QUIET_CLEAN, libtraceevent) \
|
||||
$(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d .*.cmd; \
|
||||
$(RM) TRACEEVENT-CFLAGS tags TAGS; \
|
||||
|
@ -351,7 +277,19 @@ help:
|
|||
@echo ' doc-install - install the man pages'
|
||||
@echo ' doc-uninstall - uninstall the man pages'
|
||||
@echo''
|
||||
PHONY += force plugins
|
||||
|
||||
PHONY += plugins
|
||||
plugins:
|
||||
$(call descend,plugins)
|
||||
|
||||
PHONY += install_plugins
|
||||
install_plugins:
|
||||
$(call descend,plugins,install)
|
||||
|
||||
PHONY += clean_plugins
|
||||
clean_plugins:
|
||||
$(call descend,plugins,clean)
|
||||
|
||||
force:
|
||||
|
||||
# Declare the contents of the .PHONY variable as phony. We keep that
|
||||
|
|
|
@ -5527,8 +5527,10 @@ static void print_event_time(struct tep_handle *tep, struct trace_seq *s,
|
|||
if (divstr && isdigit(*(divstr + 1)))
|
||||
div = atoi(divstr + 1);
|
||||
time = record->ts;
|
||||
if (div)
|
||||
if (div) {
|
||||
time += div / 2;
|
||||
time /= div;
|
||||
}
|
||||
pr = prec;
|
||||
while (pr--)
|
||||
p10 *= 10;
|
||||
|
|
|
@ -441,6 +441,8 @@ int tep_register_print_string(struct tep_handle *tep, const char *fmt,
|
|||
unsigned long long addr);
|
||||
bool tep_is_pid_registered(struct tep_handle *tep, int pid);
|
||||
|
||||
struct tep_event *tep_get_event(struct tep_handle *tep, int index);
|
||||
|
||||
#define TEP_PRINT_INFO "INFO"
|
||||
#define TEP_PRINT_INFO_RAW "INFO_RAW"
|
||||
#define TEP_PRINT_COMM "COMM"
|
||||
|
|
|
@ -0,0 +1,10 @@
|
|||
plugin_jbd2-y += plugin_jbd2.o
|
||||
plugin_hrtimer-y += plugin_hrtimer.o
|
||||
plugin_kmem-y += plugin_kmem.o
|
||||
plugin_kvm-y += plugin_kvm.o
|
||||
plugin_mac80211-y += plugin_mac80211.o
|
||||
plugin_sched_switch-y += plugin_sched_switch.o
|
||||
plugin_function-y += plugin_function.o
|
||||
plugin_xen-y += plugin_xen.o
|
||||
plugin_scsi-y += plugin_scsi.o
|
||||
plugin_cfg80211-y += plugin_cfg80211.o
|
|
@ -0,0 +1,222 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#MAKEFLAGS += --no-print-directory
|
||||
|
||||
|
||||
# Makefiles suck: This macro sets a default value of $(2) for the
|
||||
# variable named by $(1), unless the variable has been set by
|
||||
# environment or command line. This is necessary for CC and AR
|
||||
# because make sets default values, so the simpler ?= approach
|
||||
# won't work as expected.
|
||||
define allow-override
|
||||
$(if $(or $(findstring environment,$(origin $(1))),\
|
||||
$(findstring command line,$(origin $(1)))),,\
|
||||
$(eval $(1) = $(2)))
|
||||
endef
|
||||
|
||||
# Allow setting CC and AR, or setting CROSS_COMPILE as a prefix.
|
||||
$(call allow-override,CC,$(CROSS_COMPILE)gcc)
|
||||
$(call allow-override,AR,$(CROSS_COMPILE)ar)
|
||||
$(call allow-override,NM,$(CROSS_COMPILE)nm)
|
||||
$(call allow-override,PKG_CONFIG,pkg-config)
|
||||
|
||||
EXT = -std=gnu99
|
||||
INSTALL = install
|
||||
|
||||
# Use DESTDIR for installing into a different root directory.
|
||||
# This is useful for building a package. The program will be
|
||||
# installed in this directory as if it was the root directory.
|
||||
# Then the build tool can move it later.
|
||||
DESTDIR ?=
|
||||
DESTDIR_SQ = '$(subst ','\'',$(DESTDIR))'
|
||||
|
||||
LP64 := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1)
|
||||
ifeq ($(LP64), 1)
|
||||
libdir_relative = lib64
|
||||
else
|
||||
libdir_relative = lib
|
||||
endif
|
||||
|
||||
prefix ?= /usr/local
|
||||
libdir = $(prefix)/$(libdir_relative)
|
||||
|
||||
set_plugin_dir := 1
|
||||
|
||||
# Set plugin_dir to preffered global plugin location
|
||||
# If we install under $HOME directory we go under
|
||||
# $(HOME)/.local/lib/traceevent/plugins
|
||||
#
|
||||
# We dont set PLUGIN_DIR in case we install under $HOME
|
||||
# directory, because by default the code looks under:
|
||||
# $(HOME)/.local/lib/traceevent/plugins by default.
|
||||
#
|
||||
ifeq ($(plugin_dir),)
|
||||
ifeq ($(prefix),$(HOME))
|
||||
override plugin_dir = $(HOME)/.local/lib/traceevent/plugins
|
||||
set_plugin_dir := 0
|
||||
else
|
||||
override plugin_dir = $(libdir)/traceevent/plugins
|
||||
endif
|
||||
endif
|
||||
|
||||
ifeq ($(set_plugin_dir),1)
|
||||
PLUGIN_DIR = -DPLUGIN_DIR="$(plugin_dir)"
|
||||
PLUGIN_DIR_SQ = '$(subst ','\'',$(PLUGIN_DIR))'
|
||||
endif
|
||||
|
||||
include ../../../scripts/Makefile.include
|
||||
|
||||
# copy a bit from Linux kbuild
|
||||
|
||||
ifeq ("$(origin V)", "command line")
|
||||
VERBOSE = $(V)
|
||||
endif
|
||||
ifndef VERBOSE
|
||||
VERBOSE = 0
|
||||
endif
|
||||
|
||||
ifeq ($(srctree),)
|
||||
srctree := $(patsubst %/,%,$(dir $(CURDIR)))
|
||||
srctree := $(patsubst %/,%,$(dir $(srctree)))
|
||||
srctree := $(patsubst %/,%,$(dir $(srctree)))
|
||||
srctree := $(patsubst %/,%,$(dir $(srctree)))
|
||||
#$(info Determined 'srctree' to be $(srctree))
|
||||
endif
|
||||
|
||||
export prefix libdir src obj
|
||||
|
||||
# Shell quotes
|
||||
plugin_dir_SQ = $(subst ','\'',$(plugin_dir))
|
||||
|
||||
CONFIG_INCLUDES =
|
||||
CONFIG_LIBS =
|
||||
CONFIG_FLAGS =
|
||||
|
||||
OBJ = $@
|
||||
N =
|
||||
|
||||
INCLUDES = -I. -I.. -I $(srctree)/tools/include $(CONFIG_INCLUDES)
|
||||
|
||||
# Set compile option CFLAGS
|
||||
ifdef EXTRA_CFLAGS
|
||||
CFLAGS := $(EXTRA_CFLAGS)
|
||||
else
|
||||
CFLAGS := -g -Wall
|
||||
endif
|
||||
|
||||
# Append required CFLAGS
|
||||
override CFLAGS += -fPIC
|
||||
override CFLAGS += $(CONFIG_FLAGS) $(INCLUDES) $(PLUGIN_DIR_SQ)
|
||||
override CFLAGS += $(udis86-flags) -D_GNU_SOURCE
|
||||
|
||||
ifeq ($(VERBOSE),1)
|
||||
Q =
|
||||
else
|
||||
Q = @
|
||||
endif
|
||||
|
||||
# Disable command line variables (CFLAGS) override from top
|
||||
# level Makefile (perf), otherwise build Makefile will get
|
||||
# the same command line setup.
|
||||
MAKEOVERRIDES=
|
||||
|
||||
export srctree OUTPUT CC LD CFLAGS V
|
||||
|
||||
build := -f $(srctree)/tools/build/Makefile.build dir=. obj
|
||||
|
||||
DYNAMIC_LIST_FILE := $(OUTPUT)libtraceevent-dynamic-list
|
||||
|
||||
PLUGINS = plugin_jbd2.so
|
||||
PLUGINS += plugin_hrtimer.so
|
||||
PLUGINS += plugin_kmem.so
|
||||
PLUGINS += plugin_kvm.so
|
||||
PLUGINS += plugin_mac80211.so
|
||||
PLUGINS += plugin_sched_switch.so
|
||||
PLUGINS += plugin_function.so
|
||||
PLUGINS += plugin_xen.so
|
||||
PLUGINS += plugin_scsi.so
|
||||
PLUGINS += plugin_cfg80211.so
|
||||
|
||||
PLUGINS := $(addprefix $(OUTPUT),$(PLUGINS))
|
||||
PLUGINS_IN := $(PLUGINS:.so=-in.o)
|
||||
|
||||
plugins: $(PLUGINS) $(DYNAMIC_LIST_FILE)
|
||||
|
||||
__plugin_obj = $(notdir $@)
|
||||
plugin_obj = $(__plugin_obj:-in.o=)
|
||||
|
||||
$(PLUGINS_IN): force
|
||||
$(Q)$(MAKE) $(build)=$(plugin_obj)
|
||||
|
||||
$(OUTPUT)libtraceevent-dynamic-list: $(PLUGINS)
|
||||
$(QUIET_GEN)$(call do_generate_dynamic_list_file, $(PLUGINS), $@)
|
||||
|
||||
$(OUTPUT)%.so: $(OUTPUT)%-in.o
|
||||
$(QUIET_LINK)$(CC) $(CFLAGS) -shared $(LDFLAGS) -nostartfiles -o $@ $^
|
||||
|
||||
define update_dir
|
||||
(echo $1 > $@.tmp; \
|
||||
if [ -r $@ ] && cmp -s $@ $@.tmp; then \
|
||||
rm -f $@.tmp; \
|
||||
else \
|
||||
echo ' UPDATE $@'; \
|
||||
mv -f $@.tmp $@; \
|
||||
fi);
|
||||
endef
|
||||
|
||||
tags: force
|
||||
$(RM) tags
|
||||
find . -name '*.[ch]' | xargs ctags --extra=+f --c-kinds=+px \
|
||||
--regex-c++='/_PE\(([^,)]*).*/TEP_ERRNO__\1/'
|
||||
|
||||
TAGS: force
|
||||
$(RM) TAGS
|
||||
find . -name '*.[ch]' | xargs etags \
|
||||
--regex='/_PE(\([^,)]*\).*/TEP_ERRNO__\1/'
|
||||
|
||||
define do_install_mkdir
|
||||
if [ ! -d '$(DESTDIR_SQ)$1' ]; then \
|
||||
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$1'; \
|
||||
fi
|
||||
endef
|
||||
|
||||
define do_install
|
||||
$(call do_install_mkdir,$2); \
|
||||
$(INSTALL) $(if $3,-m $3,) $1 '$(DESTDIR_SQ)$2'
|
||||
endef
|
||||
|
||||
define do_install_plugins
|
||||
for plugin in $1; do \
|
||||
$(call do_install,$$plugin,$(plugin_dir_SQ)); \
|
||||
done
|
||||
endef
|
||||
|
||||
define do_generate_dynamic_list_file
|
||||
symbol_type=`$(NM) -u -D $1 | awk 'NF>1 {print $$1}' | \
|
||||
xargs echo "U w W" | tr 'w ' 'W\n' | sort -u | xargs echo`;\
|
||||
if [ "$$symbol_type" = "U W" ];then \
|
||||
(echo '{'; \
|
||||
$(NM) -u -D $1 | awk 'NF>1 {print "\t"$$2";"}' | sort -u;\
|
||||
echo '};'; \
|
||||
) > $2; \
|
||||
else \
|
||||
(echo Either missing one of [$1] or bad version of $(NM)) 1>&2;\
|
||||
fi
|
||||
endef
|
||||
|
||||
install: $(PLUGINS)
|
||||
$(call QUIET_INSTALL, trace_plugins) \
|
||||
$(call do_install_plugins, $(PLUGINS))
|
||||
|
||||
clean:
|
||||
$(call QUIET_CLEAN, trace_plugins) \
|
||||
$(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d .*.cmd; \
|
||||
$(RM) $(OUTPUT)libtraceevent-dynamic-list \
|
||||
$(RM) TRACEEVENT-CFLAGS tags TAGS;
|
||||
|
||||
PHONY += force plugins
|
||||
force:
|
||||
|
||||
# Declare the contents of the .PHONY variable as phony. We keep that
|
||||
# information in a variable so we can use it in if_changed and friends.
|
||||
.PHONY: $(PHONY)
|
|
@ -924,7 +924,7 @@ ifndef NO_JVMTI
|
|||
JDIR=$(shell /usr/sbin/update-java-alternatives -l | head -1 | awk '{print $$3}')
|
||||
else
|
||||
ifneq (,$(wildcard /usr/sbin/alternatives))
|
||||
JDIR=$(shell /usr/sbin/alternatives --display java | tail -1 | cut -d' ' -f 5 | sed 's%/jre/bin/java.%%g')
|
||||
JDIR=$(shell /usr/sbin/alternatives --display java | tail -1 | cut -d' ' -f 5 | sed -e 's%/jre/bin/java.%%g' -e 's%/bin/java.%%g')
|
||||
endif
|
||||
endif
|
||||
ifndef JDIR
|
||||
|
|
|
@ -292,7 +292,7 @@ endif
|
|||
LIBTRACEEVENT = $(TE_PATH)libtraceevent.a
|
||||
export LIBTRACEEVENT
|
||||
|
||||
LIBTRACEEVENT_DYNAMIC_LIST = $(TE_PATH)libtraceevent-dynamic-list
|
||||
LIBTRACEEVENT_DYNAMIC_LIST = $(TE_PATH)plugins/libtraceevent-dynamic-list
|
||||
|
||||
#
|
||||
# The static build has no dynsym table, so this does not work for
|
||||
|
@ -737,7 +737,7 @@ libtraceevent_plugins: FORCE
|
|||
$(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) plugins
|
||||
|
||||
$(LIBTRACEEVENT_DYNAMIC_LIST): libtraceevent_plugins
|
||||
$(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) $(OUTPUT)libtraceevent-dynamic-list
|
||||
$(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) $(OUTPUT)plugins/libtraceevent-dynamic-list
|
||||
|
||||
$(LIBTRACEEVENT)-clean:
|
||||
$(call QUIET_CLEAN, libtraceevent)
|
||||
|
|
|
@ -23,9 +23,10 @@
|
|||
#include "../../util/event.h"
|
||||
#include "../../util/evlist.h"
|
||||
#include "../../util/evsel.h"
|
||||
#include "../../util/evsel_config.h"
|
||||
#include "../../util/pmu.h"
|
||||
#include "../../util/cs-etm.h"
|
||||
#include "../../util/util.h" // page_size
|
||||
#include <internal/lib.h> // page_size
|
||||
#include "../../util/session.h"
|
||||
|
||||
#include <errno.h>
|
||||
|
@ -416,7 +417,7 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
|
|||
if (err)
|
||||
goto out;
|
||||
|
||||
tracking_evsel = perf_evlist__last(evlist);
|
||||
tracking_evsel = evlist__last(evlist);
|
||||
perf_evlist__set_tracking_event(evlist, tracking_evsel);
|
||||
|
||||
tracking_evsel->core.attr.freq = 0;
|
||||
|
@ -648,7 +649,7 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
|
|||
if (priv_size != cs_etm_info_priv_size(itr, session->evlist))
|
||||
return -EINVAL;
|
||||
|
||||
if (!session->evlist->nr_mmaps)
|
||||
if (!session->evlist->core.nr_mmaps)
|
||||
return -EINVAL;
|
||||
|
||||
/* If the cpu_map is empty all online CPUs are involved */
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
#include "../../util/evsel.h"
|
||||
#include "../../util/evlist.h"
|
||||
#include "../../util/session.h"
|
||||
#include "../../util/util.h" // page_size
|
||||
#include <internal/lib.h> // page_size
|
||||
#include "../../util/pmu.h"
|
||||
#include "../../util/debug.h"
|
||||
#include "../../util/auxtrace.h"
|
||||
|
@ -51,7 +51,7 @@ static int arm_spe_info_fill(struct auxtrace_record *itr,
|
|||
if (priv_size != ARM_SPE_AUXTRACE_PRIV_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
if (!session->evlist->nr_mmaps)
|
||||
if (!session->evlist->core.nr_mmaps)
|
||||
return -EINVAL;
|
||||
|
||||
auxtrace_info->type = PERF_AUXTRACE_ARM_SPE;
|
||||
|
@ -129,7 +129,7 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
tracking_evsel = perf_evlist__last(evlist);
|
||||
tracking_evsel = evlist__last(evlist);
|
||||
perf_evlist__set_tracking_event(evlist, tracking_evsel);
|
||||
|
||||
tracking_evsel->core.attr.freq = 0;
|
||||
|
|
|
@ -4,6 +4,7 @@ PERF_HAVE_DWARF_REGS := 1
|
|||
endif
|
||||
HAVE_KVM_STAT_SUPPORT := 1
|
||||
PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1
|
||||
PERF_HAVE_JITDUMP := 1
|
||||
|
||||
#
|
||||
# Syscall table generation for perf
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
#include <stdbool.h>
|
||||
#include <stdlib.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/bitops.h>
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
#include <unistd.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include "util.h" // page_size
|
||||
#include <internal/lib.h> // page_size
|
||||
#include "machine.h"
|
||||
#include "api/fs/fs.h"
|
||||
#include "debug.h"
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
#include "evlist.h"
|
||||
#include "evsel.h"
|
||||
#include "arch-tests.h"
|
||||
#include <internal/lib.h> // page_size
|
||||
|
||||
#include <signal.h>
|
||||
#include <sys/mman.h>
|
||||
|
@ -62,9 +63,9 @@ int test__intel_cqm_count_nmi_context(struct test *test __maybe_unused, int subt
|
|||
goto out;
|
||||
}
|
||||
|
||||
evsel = perf_evlist__first(evlist);
|
||||
evsel = evlist__first(evlist);
|
||||
if (!evsel) {
|
||||
pr_debug("perf_evlist__first failed\n");
|
||||
pr_debug("evlist__first failed\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include "thread_map.h"
|
||||
#include "record.h"
|
||||
#include "tsc.h"
|
||||
#include "util/mmap.h"
|
||||
#include "tests/tests.h"
|
||||
|
||||
#include "arch-tests.h"
|
||||
|
@ -65,7 +66,7 @@ int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest __maybe
|
|||
union perf_event *event;
|
||||
u64 test_tsc, comm1_tsc, comm2_tsc;
|
||||
u64 test_time, comm1_time = 0, comm2_time = 0;
|
||||
struct perf_mmap *md;
|
||||
struct mmap *md;
|
||||
|
||||
threads = thread_map__new(-1, getpid(), UINT_MAX);
|
||||
CHECK_NOT_NULL__(threads);
|
||||
|
@ -82,7 +83,7 @@ int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest __maybe
|
|||
|
||||
perf_evlist__config(evlist, &opts, NULL);
|
||||
|
||||
evsel = perf_evlist__first(evlist);
|
||||
evsel = evlist__first(evlist);
|
||||
|
||||
evsel->core.attr.comm = 1;
|
||||
evsel->core.attr.disabled = 1;
|
||||
|
@ -90,9 +91,9 @@ int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest __maybe
|
|||
|
||||
CHECK__(evlist__open(evlist));
|
||||
|
||||
CHECK__(perf_evlist__mmap(evlist, UINT_MAX));
|
||||
CHECK__(evlist__mmap(evlist, UINT_MAX));
|
||||
|
||||
pc = evlist->mmap[0].base;
|
||||
pc = evlist->mmap[0].core.base;
|
||||
ret = perf_read_tsc_conversion(pc, &tc);
|
||||
if (ret) {
|
||||
if (ret == -EOPNOTSUPP) {
|
||||
|
@ -114,7 +115,7 @@ int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest __maybe
|
|||
|
||||
evlist__disable(evlist);
|
||||
|
||||
for (i = 0; i < evlist->nr_mmaps; i++) {
|
||||
for (i = 0; i < evlist->core.nr_mmaps; i++) {
|
||||
md = &evlist->mmap[i];
|
||||
if (perf_mmap__read_init(md) < 0)
|
||||
continue;
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
#include "tests/tests.h"
|
||||
#include "cloexec.h"
|
||||
#include "event.h"
|
||||
#include "util.h" // page_size
|
||||
#include <internal/lib.h> // page_size
|
||||
#include "arch-tests.h"
|
||||
|
||||
static u64 rdpmc(unsigned int counter)
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include "../../util/event.h"
|
||||
#include "../../util/evsel.h"
|
||||
#include "../../util/evlist.h"
|
||||
#include "../../util/mmap.h"
|
||||
#include "../../util/session.h"
|
||||
#include "../../util/pmu.h"
|
||||
#include "../../util/debug.h"
|
||||
|
@ -22,7 +23,7 @@
|
|||
#include "../../util/tsc.h"
|
||||
#include "../../util/auxtrace.h"
|
||||
#include "../../util/intel-bts.h"
|
||||
#include "../../util/util.h" // page_size
|
||||
#include <internal/lib.h> // page_size
|
||||
|
||||
#define KiB(x) ((x) * 1024)
|
||||
#define MiB(x) ((x) * 1024 * 1024)
|
||||
|
@ -74,10 +75,10 @@ static int intel_bts_info_fill(struct auxtrace_record *itr,
|
|||
if (priv_size != INTEL_BTS_AUXTRACE_PRIV_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
if (!session->evlist->nr_mmaps)
|
||||
if (!session->evlist->core.nr_mmaps)
|
||||
return -EINVAL;
|
||||
|
||||
pc = session->evlist->mmap[0].base;
|
||||
pc = session->evlist->mmap[0].core.base;
|
||||
if (pc) {
|
||||
err = perf_read_tsc_conversion(pc, &tc);
|
||||
if (err) {
|
||||
|
@ -230,7 +231,7 @@ static int intel_bts_recording_options(struct auxtrace_record *itr,
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
tracking_evsel = perf_evlist__last(evlist);
|
||||
tracking_evsel = evlist__last(evlist);
|
||||
|
||||
perf_evlist__set_tracking_event(evlist, tracking_evsel);
|
||||
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include "../../util/evlist.h"
|
||||
#include "../../util/evsel.h"
|
||||
#include "../../util/cpumap.h"
|
||||
#include "../../util/mmap.h"
|
||||
#include <subcmd/parse-options.h>
|
||||
#include "../../util/parse-events.h"
|
||||
#include "../../util/pmu.h"
|
||||
|
@ -26,7 +27,7 @@
|
|||
#include "../../util/record.h"
|
||||
#include "../../util/target.h"
|
||||
#include "../../util/tsc.h"
|
||||
#include "../../util/util.h" // page_size
|
||||
#include <internal/lib.h> // page_size
|
||||
#include "../../util/intel-pt.h"
|
||||
|
||||
#define KiB(x) ((x) * 1024)
|
||||
|
@ -351,10 +352,10 @@ static int intel_pt_info_fill(struct auxtrace_record *itr,
|
|||
filter = intel_pt_find_filter(session->evlist, ptr->intel_pt_pmu);
|
||||
filter_str_len = filter ? strlen(filter) : 0;
|
||||
|
||||
if (!session->evlist->nr_mmaps)
|
||||
if (!session->evlist->core.nr_mmaps)
|
||||
return -EINVAL;
|
||||
|
||||
pc = session->evlist->mmap[0].base;
|
||||
pc = session->evlist->mmap[0].core.base;
|
||||
if (pc) {
|
||||
err = perf_read_tsc_conversion(pc, &tc);
|
||||
if (err) {
|
||||
|
@ -416,12 +417,12 @@ static int intel_pt_track_switches(struct evlist *evlist)
|
|||
return err;
|
||||
}
|
||||
|
||||
evsel = perf_evlist__last(evlist);
|
||||
evsel = evlist__last(evlist);
|
||||
|
||||
perf_evsel__set_sample_bit(evsel, CPU);
|
||||
perf_evsel__set_sample_bit(evsel, TIME);
|
||||
|
||||
evsel->system_wide = true;
|
||||
evsel->core.system_wide = true;
|
||||
evsel->no_aux_samples = true;
|
||||
evsel->immediate = true;
|
||||
|
||||
|
@ -716,13 +717,13 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
switch_evsel = perf_evlist__last(evlist);
|
||||
switch_evsel = evlist__last(evlist);
|
||||
|
||||
switch_evsel->core.attr.freq = 0;
|
||||
switch_evsel->core.attr.sample_period = 1;
|
||||
switch_evsel->core.attr.context_switch = 1;
|
||||
|
||||
switch_evsel->system_wide = true;
|
||||
switch_evsel->core.system_wide = true;
|
||||
switch_evsel->no_aux_samples = true;
|
||||
switch_evsel->immediate = true;
|
||||
|
||||
|
@ -774,7 +775,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
tracking_evsel = perf_evlist__last(evlist);
|
||||
tracking_evsel = evlist__last(evlist);
|
||||
|
||||
perf_evlist__set_tracking_event(evlist, tracking_evsel);
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
#include <limits.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "../../util/util.h" // page_size
|
||||
#include <internal/lib.h> // page_size
|
||||
#include "../../util/machine.h"
|
||||
#include "../../util/map.h"
|
||||
#include "../../util/symbol.h"
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include "perf.h"
|
||||
#include "util/evlist.h"
|
||||
#include "util/evsel.h"
|
||||
#include "util/evsel_fprintf.h"
|
||||
#include "util/parse-events.h"
|
||||
#include <subcmd/parse-options.h>
|
||||
#include "util/session.h"
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
#include "util/build-id.h"
|
||||
#include "util/evsel.h"
|
||||
#include "util/evlist.h"
|
||||
#include "util/mmap.h"
|
||||
#include "util/term.h"
|
||||
#include "util/symbol.h"
|
||||
#include "util/thread.h"
|
||||
|
@ -750,7 +751,7 @@ static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx,
|
|||
{
|
||||
struct evlist *evlist = kvm->evlist;
|
||||
union perf_event *event;
|
||||
struct perf_mmap *md;
|
||||
struct mmap *md;
|
||||
u64 timestamp;
|
||||
s64 n = 0;
|
||||
int err;
|
||||
|
@ -801,7 +802,7 @@ static int perf_kvm__mmap_read(struct perf_kvm_stat *kvm)
|
|||
s64 n, ntotal = 0;
|
||||
u64 flush_time = ULLONG_MAX, mmap_time;
|
||||
|
||||
for (i = 0; i < kvm->evlist->nr_mmaps; i++) {
|
||||
for (i = 0; i < kvm->evlist->core.nr_mmaps; i++) {
|
||||
n = perf_kvm__mmap_read_idx(kvm, i, &mmap_time);
|
||||
if (n < 0)
|
||||
return -1;
|
||||
|
@ -966,10 +967,10 @@ static int kvm_events_live_report(struct perf_kvm_stat *kvm)
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (perf_evlist__add_pollfd(kvm->evlist, kvm->timerfd) < 0)
|
||||
if (evlist__add_pollfd(kvm->evlist, kvm->timerfd) < 0)
|
||||
goto out;
|
||||
|
||||
nr_stdin = perf_evlist__add_pollfd(kvm->evlist, fileno(stdin));
|
||||
nr_stdin = evlist__add_pollfd(kvm->evlist, fileno(stdin));
|
||||
if (nr_stdin < 0)
|
||||
goto out;
|
||||
|
||||
|
@ -980,7 +981,7 @@ static int kvm_events_live_report(struct perf_kvm_stat *kvm)
|
|||
evlist__enable(kvm->evlist);
|
||||
|
||||
while (!done) {
|
||||
struct fdarray *fda = &kvm->evlist->pollfd;
|
||||
struct fdarray *fda = &kvm->evlist->core.pollfd;
|
||||
int rc;
|
||||
|
||||
rc = perf_kvm__mmap_read(kvm);
|
||||
|
@ -1060,7 +1061,7 @@ static int kvm_live_open_events(struct perf_kvm_stat *kvm)
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (perf_evlist__mmap(evlist, kvm->opts.mmap_pages) < 0) {
|
||||
if (evlist__mmap(evlist, kvm->opts.mmap_pages) < 0) {
|
||||
ui__error("Failed to mmap the events: %s\n",
|
||||
str_error_r(errno, sbuf, sizeof(sbuf)));
|
||||
evlist__close(evlist);
|
||||
|
|
|
@ -81,9 +81,9 @@ int cmd_list(int argc, const char **argv)
|
|||
long_desc_flag, details_flag);
|
||||
else if (strcmp(argv[i], "sdt") == 0)
|
||||
print_sdt_events(NULL, NULL, raw_dump);
|
||||
else if (strcmp(argv[i], "metric") == 0)
|
||||
else if (strcmp(argv[i], "metric") == 0 || strcmp(argv[i], "metrics") == 0)
|
||||
metricgroup__print(true, false, NULL, raw_dump, details_flag);
|
||||
else if (strcmp(argv[i], "metricgroup") == 0)
|
||||
else if (strcmp(argv[i], "metricgroup") == 0 || strcmp(argv[i], "metricgroups") == 0)
|
||||
metricgroup__print(false, true, NULL, raw_dump, details_flag);
|
||||
else if ((sep = strchr(argv[i], ':')) != NULL) {
|
||||
int sep_idx;
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include "util/evlist.h"
|
||||
#include "util/evsel.h"
|
||||
#include "util/debug.h"
|
||||
#include "util/mmap.h"
|
||||
#include "util/target.h"
|
||||
#include "util/session.h"
|
||||
#include "util/tool.h"
|
||||
|
@ -119,7 +120,7 @@ static bool switch_output_time(struct record *rec)
|
|||
trigger_is_ready(&switch_output_trigger);
|
||||
}
|
||||
|
||||
static int record__write(struct record *rec, struct perf_mmap *map __maybe_unused,
|
||||
static int record__write(struct record *rec, struct mmap *map __maybe_unused,
|
||||
void *bf, size_t size)
|
||||
{
|
||||
struct perf_data_file *file = &rec->session->data->file;
|
||||
|
@ -168,7 +169,7 @@ static int record__aio_write(struct aiocb *cblock, int trace_fd,
|
|||
return rc;
|
||||
}
|
||||
|
||||
static int record__aio_complete(struct perf_mmap *md, struct aiocb *cblock)
|
||||
static int record__aio_complete(struct mmap *md, struct aiocb *cblock)
|
||||
{
|
||||
void *rem_buf;
|
||||
off_t rem_off;
|
||||
|
@ -214,7 +215,7 @@ static int record__aio_complete(struct perf_mmap *md, struct aiocb *cblock)
|
|||
return rc;
|
||||
}
|
||||
|
||||
static int record__aio_sync(struct perf_mmap *md, bool sync_all)
|
||||
static int record__aio_sync(struct mmap *md, bool sync_all)
|
||||
{
|
||||
struct aiocb **aiocb = md->aio.aiocb;
|
||||
struct aiocb *cblocks = md->aio.cblocks;
|
||||
|
@ -255,12 +256,12 @@ struct record_aio {
|
|||
size_t size;
|
||||
};
|
||||
|
||||
static int record__aio_pushfn(struct perf_mmap *map, void *to, void *buf, size_t size)
|
||||
static int record__aio_pushfn(struct mmap *map, void *to, void *buf, size_t size)
|
||||
{
|
||||
struct record_aio *aio = to;
|
||||
|
||||
/*
|
||||
* map->base data pointed by buf is copied into free map->aio.data[] buffer
|
||||
* map->core.base data pointed by buf is copied into free map->aio.data[] buffer
|
||||
* to release space in the kernel buffer as fast as possible, calling
|
||||
* perf_mmap__consume() from perf_mmap__push() function.
|
||||
*
|
||||
|
@ -300,7 +301,7 @@ static int record__aio_pushfn(struct perf_mmap *map, void *to, void *buf, size_t
|
|||
return size;
|
||||
}
|
||||
|
||||
static int record__aio_push(struct record *rec, struct perf_mmap *map, off_t *off)
|
||||
static int record__aio_push(struct record *rec, struct mmap *map, off_t *off)
|
||||
{
|
||||
int ret, idx;
|
||||
int trace_fd = rec->session->data->file.fd;
|
||||
|
@ -351,15 +352,15 @@ static void record__aio_mmap_read_sync(struct record *rec)
|
|||
{
|
||||
int i;
|
||||
struct evlist *evlist = rec->evlist;
|
||||
struct perf_mmap *maps = evlist->mmap;
|
||||
struct mmap *maps = evlist->mmap;
|
||||
|
||||
if (!record__aio_enabled(rec))
|
||||
return;
|
||||
|
||||
for (i = 0; i < evlist->nr_mmaps; i++) {
|
||||
struct perf_mmap *map = &maps[i];
|
||||
for (i = 0; i < evlist->core.nr_mmaps; i++) {
|
||||
struct mmap *map = &maps[i];
|
||||
|
||||
if (map->base)
|
||||
if (map->core.base)
|
||||
record__aio_sync(map, true);
|
||||
}
|
||||
}
|
||||
|
@ -387,7 +388,7 @@ static int record__aio_parse(const struct option *opt,
|
|||
#else /* HAVE_AIO_SUPPORT */
|
||||
static int nr_cblocks_max = 0;
|
||||
|
||||
static int record__aio_push(struct record *rec __maybe_unused, struct perf_mmap *map __maybe_unused,
|
||||
static int record__aio_push(struct record *rec __maybe_unused, struct mmap *map __maybe_unused,
|
||||
off_t *off __maybe_unused)
|
||||
{
|
||||
return -1;
|
||||
|
@ -439,7 +440,7 @@ static int record__mmap_flush_parse(const struct option *opt,
|
|||
if (!opts->mmap_flush)
|
||||
opts->mmap_flush = MMAP_FLUSH_DEFAULT;
|
||||
|
||||
flush_max = perf_evlist__mmap_size(opts->mmap_pages);
|
||||
flush_max = evlist__mmap_size(opts->mmap_pages);
|
||||
flush_max /= 4;
|
||||
if (opts->mmap_flush > flush_max)
|
||||
opts->mmap_flush = flush_max;
|
||||
|
@ -482,7 +483,7 @@ static int process_synthesized_event(struct perf_tool *tool,
|
|||
return record__write(rec, NULL, event, event->header.size);
|
||||
}
|
||||
|
||||
static int record__pushfn(struct perf_mmap *map, void *to, void *bf, size_t size)
|
||||
static int record__pushfn(struct mmap *map, void *to, void *bf, size_t size)
|
||||
{
|
||||
struct record *rec = to;
|
||||
|
||||
|
@ -527,7 +528,7 @@ static void record__sig_exit(void)
|
|||
#ifdef HAVE_AUXTRACE_SUPPORT
|
||||
|
||||
static int record__process_auxtrace(struct perf_tool *tool,
|
||||
struct perf_mmap *map,
|
||||
struct mmap *map,
|
||||
union perf_event *event, void *data1,
|
||||
size_t len1, void *data2, size_t len2)
|
||||
{
|
||||
|
@ -565,7 +566,7 @@ static int record__process_auxtrace(struct perf_tool *tool,
|
|||
}
|
||||
|
||||
static int record__auxtrace_mmap_read(struct record *rec,
|
||||
struct perf_mmap *map)
|
||||
struct mmap *map)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
@ -581,7 +582,7 @@ static int record__auxtrace_mmap_read(struct record *rec,
|
|||
}
|
||||
|
||||
static int record__auxtrace_mmap_read_snapshot(struct record *rec,
|
||||
struct perf_mmap *map)
|
||||
struct mmap *map)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
@ -602,8 +603,8 @@ static int record__auxtrace_read_snapshot_all(struct record *rec)
|
|||
int i;
|
||||
int rc = 0;
|
||||
|
||||
for (i = 0; i < rec->evlist->nr_mmaps; i++) {
|
||||
struct perf_mmap *map = &rec->evlist->mmap[i];
|
||||
for (i = 0; i < rec->evlist->core.nr_mmaps; i++) {
|
||||
struct mmap *map = &rec->evlist->mmap[i];
|
||||
|
||||
if (!map->auxtrace_mmap.base)
|
||||
continue;
|
||||
|
@ -668,7 +669,7 @@ static int record__auxtrace_init(struct record *rec)
|
|||
|
||||
static inline
|
||||
int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
|
||||
struct perf_mmap *map __maybe_unused)
|
||||
struct mmap *map __maybe_unused)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
@ -707,7 +708,7 @@ static int record__mmap_evlist(struct record *rec,
|
|||
if (opts->affinity != PERF_AFFINITY_SYS)
|
||||
cpu__setup_cpunode_map();
|
||||
|
||||
if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
|
||||
if (evlist__mmap_ex(evlist, opts->mmap_pages,
|
||||
opts->auxtrace_mmap_pages,
|
||||
opts->auxtrace_snapshot_mode,
|
||||
opts->nr_cblocks, opts->affinity,
|
||||
|
@ -755,9 +756,9 @@ static int record__open(struct record *rec)
|
|||
if (perf_evlist__add_dummy(evlist))
|
||||
return -ENOMEM;
|
||||
|
||||
pos = perf_evlist__first(evlist);
|
||||
pos = evlist__first(evlist);
|
||||
pos->tracking = 0;
|
||||
pos = perf_evlist__last(evlist);
|
||||
pos = evlist__last(evlist);
|
||||
pos->tracking = 1;
|
||||
pos->core.attr.enable_on_exec = 1;
|
||||
}
|
||||
|
@ -788,6 +789,17 @@ try_again:
|
|||
pos->supported = true;
|
||||
}
|
||||
|
||||
if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(evlist)) {
|
||||
pr_warning(
|
||||
"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
|
||||
"check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
|
||||
"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
|
||||
"file is not found in the buildid cache or in the vmlinux path.\n\n"
|
||||
"Samples in kernel modules won't be resolved at all.\n\n"
|
||||
"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
|
||||
"even with a suitable vmlinux or kallsyms file.\n\n");
|
||||
}
|
||||
|
||||
if (perf_evlist__apply_filters(evlist, &pos)) {
|
||||
pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
|
||||
pos->filter, perf_evsel__name(pos), errno,
|
||||
|
@ -890,7 +902,7 @@ static struct perf_event_header finished_round_event = {
|
|||
.type = PERF_RECORD_FINISHED_ROUND,
|
||||
};
|
||||
|
||||
static void record__adjust_affinity(struct record *rec, struct perf_mmap *map)
|
||||
static void record__adjust_affinity(struct record *rec, struct mmap *map)
|
||||
{
|
||||
if (rec->opts.affinity != PERF_AFFINITY_SYS &&
|
||||
!CPU_EQUAL(&rec->affinity_mask, &map->affinity_mask)) {
|
||||
|
@ -937,7 +949,7 @@ static int record__mmap_read_evlist(struct record *rec, struct evlist *evlist,
|
|||
u64 bytes_written = rec->bytes_written;
|
||||
int i;
|
||||
int rc = 0;
|
||||
struct perf_mmap *maps;
|
||||
struct mmap *maps;
|
||||
int trace_fd = rec->data.file.fd;
|
||||
off_t off = 0;
|
||||
|
||||
|
@ -954,20 +966,20 @@ static int record__mmap_read_evlist(struct record *rec, struct evlist *evlist,
|
|||
if (record__aio_enabled(rec))
|
||||
off = record__aio_get_pos(trace_fd);
|
||||
|
||||
for (i = 0; i < evlist->nr_mmaps; i++) {
|
||||
for (i = 0; i < evlist->core.nr_mmaps; i++) {
|
||||
u64 flush = 0;
|
||||
struct perf_mmap *map = &maps[i];
|
||||
struct mmap *map = &maps[i];
|
||||
|
||||
if (map->base) {
|
||||
if (map->core.base) {
|
||||
record__adjust_affinity(rec, map);
|
||||
if (synch) {
|
||||
flush = map->flush;
|
||||
map->flush = 1;
|
||||
flush = map->core.flush;
|
||||
map->core.flush = 1;
|
||||
}
|
||||
if (!record__aio_enabled(rec)) {
|
||||
if (perf_mmap__push(map, rec, record__pushfn) < 0) {
|
||||
if (synch)
|
||||
map->flush = flush;
|
||||
map->core.flush = flush;
|
||||
rc = -1;
|
||||
goto out;
|
||||
}
|
||||
|
@ -975,13 +987,13 @@ static int record__mmap_read_evlist(struct record *rec, struct evlist *evlist,
|
|||
if (record__aio_push(rec, map, &off) < 0) {
|
||||
record__aio_set_pos(trace_fd, off);
|
||||
if (synch)
|
||||
map->flush = flush;
|
||||
map->core.flush = flush;
|
||||
rc = -1;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
if (synch)
|
||||
map->flush = flush;
|
||||
map->core.flush = flush;
|
||||
}
|
||||
|
||||
if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
|
||||
|
@ -1186,10 +1198,10 @@ static const struct perf_event_mmap_page *
|
|||
perf_evlist__pick_pc(struct evlist *evlist)
|
||||
{
|
||||
if (evlist) {
|
||||
if (evlist->mmap && evlist->mmap[0].base)
|
||||
return evlist->mmap[0].base;
|
||||
if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].base)
|
||||
return evlist->overwrite_mmap[0].base;
|
||||
if (evlist->mmap && evlist->mmap[0].core.base)
|
||||
return evlist->mmap[0].core.base;
|
||||
if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].core.base)
|
||||
return evlist->overwrite_mmap[0].core.base;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
@ -1400,7 +1412,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
|
|||
err = -1;
|
||||
goto out_child;
|
||||
}
|
||||
session->header.env.comp_mmap_len = session->evlist->mmap_len;
|
||||
session->header.env.comp_mmap_len = session->evlist->core.mmap_len;
|
||||
|
||||
err = bpf__apply_obj_config();
|
||||
if (err) {
|
||||
|
@ -1603,7 +1615,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
|
|||
if (hits == rec->samples) {
|
||||
if (done || draining)
|
||||
break;
|
||||
err = perf_evlist__poll(rec->evlist, -1);
|
||||
err = evlist__poll(rec->evlist, -1);
|
||||
/*
|
||||
* Propagate error, only if there's any. Ignore positive
|
||||
* number of returned events and interrupt error.
|
||||
|
@ -1612,7 +1624,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
|
|||
err = 0;
|
||||
waking++;
|
||||
|
||||
if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
|
||||
if (evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
|
||||
draining = true;
|
||||
}
|
||||
|
||||
|
@ -1969,7 +1981,7 @@ out_free:
|
|||
|
||||
static void switch_output_size_warn(struct record *rec)
|
||||
{
|
||||
u64 wakeup_size = perf_evlist__mmap_size(rec->opts.mmap_pages);
|
||||
u64 wakeup_size = evlist__mmap_size(rec->opts.mmap_pages);
|
||||
struct switch_output *s = &rec->switch_output;
|
||||
|
||||
wakeup_size /= 2;
|
||||
|
@ -2364,16 +2376,6 @@ int cmd_record(int argc, const char **argv)
|
|||
|
||||
err = -ENOMEM;
|
||||
|
||||
if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(rec->evlist))
|
||||
pr_warning(
|
||||
"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
|
||||
"check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
|
||||
"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
|
||||
"file is not found in the buildid cache or in the vmlinux path.\n\n"
|
||||
"Samples in kernel modules won't be resolved at all.\n\n"
|
||||
"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
|
||||
"even with a suitable vmlinux or kallsyms file.\n\n");
|
||||
|
||||
if (rec->no_buildid_cache || rec->no_buildid) {
|
||||
disable_buildid_cache();
|
||||
} else if (rec->switch_output.enabled) {
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
#include "util/cpumap.h"
|
||||
#include "util/evlist.h"
|
||||
#include "util/evsel.h"
|
||||
#include "util/evsel_fprintf.h"
|
||||
#include "util/symbol.h"
|
||||
#include "util/thread.h"
|
||||
#include "util/header.h"
|
||||
|
@ -2055,7 +2056,7 @@ static void timehist_print_sample(struct perf_sched *sched,
|
|||
EVSEL__PRINT_SYM | EVSEL__PRINT_ONELINE |
|
||||
EVSEL__PRINT_CALLCHAIN_ARROW |
|
||||
EVSEL__PRINT_SKIP_IGNORED,
|
||||
&callchain_cursor, stdout);
|
||||
&callchain_cursor, symbol_conf.bt_stop_list, stdout);
|
||||
|
||||
out:
|
||||
printf("\n");
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include "util/trace-event.h"
|
||||
#include "util/evlist.h"
|
||||
#include "util/evsel.h"
|
||||
#include "util/evsel_fprintf.h"
|
||||
#include "util/evswitch.h"
|
||||
#include "util/sort.h"
|
||||
#include "util/data.h"
|
||||
|
@ -1325,7 +1326,8 @@ static int perf_sample__fprintf_bts(struct perf_sample *sample,
|
|||
} else
|
||||
printed += fprintf(fp, "\n");
|
||||
|
||||
printed += sample__fprintf_sym(sample, al, 0, print_opts, cursor, fp);
|
||||
printed += sample__fprintf_sym(sample, al, 0, print_opts, cursor,
|
||||
symbol_conf.bt_stop_list, fp);
|
||||
}
|
||||
|
||||
/* print branch_to information */
|
||||
|
@ -1867,7 +1869,8 @@ static void process_event(struct perf_script *script,
|
|||
cursor = &callchain_cursor;
|
||||
|
||||
fputc(cursor ? '\n' : ' ', fp);
|
||||
sample__fprintf_sym(sample, al, 0, output[type].print_ip_opts, cursor, fp);
|
||||
sample__fprintf_sym(sample, al, 0, output[type].print_ip_opts, cursor,
|
||||
symbol_conf.bt_stop_list, fp);
|
||||
}
|
||||
|
||||
if (PRINT_FIELD(IREGS))
|
||||
|
@ -1916,7 +1919,7 @@ static void __process_stat(struct evsel *counter, u64 tstamp)
|
|||
int cpu, thread;
|
||||
static int header_printed;
|
||||
|
||||
if (counter->system_wide)
|
||||
if (counter->core.system_wide)
|
||||
nthreads = 1;
|
||||
|
||||
if (!header_printed) {
|
||||
|
@ -2043,7 +2046,7 @@ static int process_attr(struct perf_tool *tool, union perf_event *event,
|
|||
return err;
|
||||
|
||||
evlist = *pevlist;
|
||||
evsel = perf_evlist__last(*pevlist);
|
||||
evsel = evlist__last(*pevlist);
|
||||
|
||||
if (!evsel->priv) {
|
||||
if (scr->per_event_dump) {
|
||||
|
|
|
@ -235,7 +235,7 @@ static int write_stat_round_event(u64 tm, u64 type)
|
|||
#define WRITE_STAT_ROUND_EVENT(time, interval) \
|
||||
write_stat_round_event(time, PERF_STAT_ROUND_TYPE__ ## interval)
|
||||
|
||||
#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
|
||||
#define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y)
|
||||
|
||||
static int
|
||||
perf_evsel__write_stat_event(struct evsel *counter, u32 cpu, u32 thread,
|
||||
|
@ -278,7 +278,7 @@ static int read_counter(struct evsel *counter, struct timespec *rs)
|
|||
if (!counter->supported)
|
||||
return -ENOENT;
|
||||
|
||||
if (counter->system_wide)
|
||||
if (counter->core.system_wide)
|
||||
nthreads = 1;
|
||||
|
||||
for (thread = 0; thread < nthreads; thread++) {
|
||||
|
@ -1671,7 +1671,7 @@ static void setup_system_wide(int forks)
|
|||
struct evsel *counter;
|
||||
|
||||
evlist__for_each_entry(evsel_list, counter) {
|
||||
if (!counter->system_wide)
|
||||
if (!counter->core.system_wide)
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -27,9 +27,11 @@
|
|||
#include "util/dso.h"
|
||||
#include "util/evlist.h"
|
||||
#include "util/evsel.h"
|
||||
#include "util/evsel_config.h"
|
||||
#include "util/event.h"
|
||||
#include "util/machine.h"
|
||||
#include "util/map.h"
|
||||
#include "util/mmap.h"
|
||||
#include "util/session.h"
|
||||
#include "util/symbol.h"
|
||||
#include "util/synthetic-events.h"
|
||||
|
@ -530,7 +532,7 @@ static bool perf_top__handle_keypress(struct perf_top *top, int c)
|
|||
prompt_integer(&counter, "Enter details event counter");
|
||||
|
||||
if (counter >= top->evlist->core.nr_entries) {
|
||||
top->sym_evsel = perf_evlist__first(top->evlist);
|
||||
top->sym_evsel = evlist__first(top->evlist);
|
||||
fprintf(stderr, "Sorry, no such event, using %s.\n", perf_evsel__name(top->sym_evsel));
|
||||
sleep(1);
|
||||
break;
|
||||
|
@ -539,7 +541,7 @@ static bool perf_top__handle_keypress(struct perf_top *top, int c)
|
|||
if (top->sym_evsel->idx == counter)
|
||||
break;
|
||||
} else
|
||||
top->sym_evsel = perf_evlist__first(top->evlist);
|
||||
top->sym_evsel = evlist__first(top->evlist);
|
||||
break;
|
||||
case 'f':
|
||||
prompt_integer(&top->count_filter, "Enter display event count filter");
|
||||
|
@ -863,7 +865,7 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
|
|||
{
|
||||
struct record_opts *opts = &top->record_opts;
|
||||
struct evlist *evlist = top->evlist;
|
||||
struct perf_mmap *md;
|
||||
struct mmap *md;
|
||||
union perf_event *event;
|
||||
|
||||
md = opts->overwrite ? &evlist->overwrite_mmap[idx] : &evlist->mmap[idx];
|
||||
|
@ -903,7 +905,7 @@ static void perf_top__mmap_read(struct perf_top *top)
|
|||
if (overwrite)
|
||||
perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_DATA_PENDING);
|
||||
|
||||
for (i = 0; i < top->evlist->nr_mmaps; i++)
|
||||
for (i = 0; i < top->evlist->core.nr_mmaps; i++)
|
||||
perf_top__mmap_read_idx(top, i);
|
||||
|
||||
if (overwrite) {
|
||||
|
@ -961,7 +963,7 @@ static int perf_top__overwrite_check(struct perf_top *top)
|
|||
/* has term for current event */
|
||||
if ((overwrite < 0) && (set >= 0)) {
|
||||
/* if it's first event, set overwrite */
|
||||
if (evsel == perf_evlist__first(evlist))
|
||||
if (evsel == evlist__first(evlist))
|
||||
overwrite = set;
|
||||
else
|
||||
return -1;
|
||||
|
@ -985,7 +987,7 @@ static int perf_top_overwrite_fallback(struct perf_top *top,
|
|||
return 0;
|
||||
|
||||
/* only fall back when first event fails */
|
||||
if (evsel != perf_evlist__first(evlist))
|
||||
if (evsel != evlist__first(evlist))
|
||||
return 0;
|
||||
|
||||
evlist__for_each_entry(evlist, counter)
|
||||
|
@ -1042,7 +1044,7 @@ try_again:
|
|||
}
|
||||
}
|
||||
|
||||
if (perf_evlist__mmap(evlist, opts->mmap_pages) < 0) {
|
||||
if (evlist__mmap(evlist, opts->mmap_pages) < 0) {
|
||||
ui__error("Failed to mmap with %d (%s)\n",
|
||||
errno, str_error_r(errno, msg, sizeof(msg)));
|
||||
goto out_err;
|
||||
|
@ -1306,7 +1308,7 @@ static int __cmd_top(struct perf_top *top)
|
|||
}
|
||||
|
||||
/* Wait for a minimal set of events before starting the snapshot */
|
||||
perf_evlist__poll(top->evlist, 100);
|
||||
evlist__poll(top->evlist, 100);
|
||||
|
||||
perf_top__mmap_read(top);
|
||||
|
||||
|
@ -1316,7 +1318,7 @@ static int __cmd_top(struct perf_top *top)
|
|||
perf_top__mmap_read(top);
|
||||
|
||||
if (opts->overwrite || (hits == top->samples))
|
||||
ret = perf_evlist__poll(top->evlist, 100);
|
||||
ret = evlist__poll(top->evlist, 100);
|
||||
|
||||
if (resize) {
|
||||
perf_top__resize(top);
|
||||
|
@ -1643,7 +1645,7 @@ int cmd_top(int argc, const char **argv)
|
|||
goto out_delete_evlist;
|
||||
}
|
||||
|
||||
top.sym_evsel = perf_evlist__first(top.evlist);
|
||||
top.sym_evsel = evlist__first(top.evlist);
|
||||
|
||||
if (!callchain_param.enabled) {
|
||||
symbol_conf.cumulate_callchain = false;
|
||||
|
|
|
@ -28,9 +28,12 @@
|
|||
#include "util/dso.h"
|
||||
#include "util/env.h"
|
||||
#include "util/event.h"
|
||||
#include "util/evsel.h"
|
||||
#include "util/evsel_fprintf.h"
|
||||
#include "util/synthetic-events.h"
|
||||
#include "util/evlist.h"
|
||||
#include "util/evswitch.h"
|
||||
#include "util/mmap.h"
|
||||
#include <subcmd/pager.h>
|
||||
#include <subcmd/exec-cmd.h>
|
||||
#include "util/machine.h"
|
||||
|
@ -2075,7 +2078,7 @@ static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sam
|
|||
EVSEL__PRINT_DSO |
|
||||
EVSEL__PRINT_UNKNOWN_AS_ADDR;
|
||||
|
||||
return sample__fprintf_callchain(sample, 38, print_opts, &callchain_cursor, trace->output);
|
||||
return sample__fprintf_callchain(sample, 38, print_opts, &callchain_cursor, symbol_conf.bt_stop_list, trace->output);
|
||||
}
|
||||
|
||||
static const char *errno_to_name(struct evsel *evsel, int err)
|
||||
|
@ -3409,7 +3412,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
|
|||
if (trace->dump.map)
|
||||
bpf_map__fprintf(trace->dump.map, trace->output);
|
||||
|
||||
err = perf_evlist__mmap(evlist, trace->opts.mmap_pages);
|
||||
err = evlist__mmap(evlist, trace->opts.mmap_pages);
|
||||
if (err < 0)
|
||||
goto out_error_mmap;
|
||||
|
||||
|
@ -3426,7 +3429,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
|
|||
|
||||
trace->multiple_threads = perf_thread_map__pid(evlist->core.threads, 0) == -1 ||
|
||||
evlist->core.threads->nr > 1 ||
|
||||
perf_evlist__first(evlist)->core.attr.inherit;
|
||||
evlist__first(evlist)->core.attr.inherit;
|
||||
|
||||
/*
|
||||
* Now that we already used evsel->core.attr to ask the kernel to setup the
|
||||
|
@ -3442,9 +3445,9 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
|
|||
again:
|
||||
before = trace->nr_events;
|
||||
|
||||
for (i = 0; i < evlist->nr_mmaps; i++) {
|
||||
for (i = 0; i < evlist->core.nr_mmaps; i++) {
|
||||
union perf_event *event;
|
||||
struct perf_mmap *md;
|
||||
struct mmap *md;
|
||||
|
||||
md = &evlist->mmap[i];
|
||||
if (perf_mmap__read_init(md) < 0)
|
||||
|
@ -3473,8 +3476,8 @@ again:
|
|||
if (trace->nr_events == before) {
|
||||
int timeout = done ? 100 : -1;
|
||||
|
||||
if (!draining && perf_evlist__poll(evlist, timeout) > 0) {
|
||||
if (perf_evlist__filter_pollfd(evlist, POLLERR | POLLHUP | POLLNVAL) == 0)
|
||||
if (!draining && evlist__poll(evlist, timeout) > 0) {
|
||||
if (evlist__filter_pollfd(evlist, POLLERR | POLLHUP | POLLNVAL) == 0)
|
||||
draining = true;
|
||||
|
||||
goto again;
|
||||
|
|
|
@ -59,7 +59,13 @@ else
|
|||
CFLAGS := -g -Wall
|
||||
endif
|
||||
|
||||
INCLUDES = -I$(srctree)/tools/perf/lib/include -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(SRCARCH)/include/ -I$(srctree)/tools/arch/$(SRCARCH)/include/uapi -I$(srctree)/tools/include/uapi
|
||||
INCLUDES = \
|
||||
-I$(srctree)/tools/perf/lib/include \
|
||||
-I$(srctree)/tools/lib/ \
|
||||
-I$(srctree)/tools/include \
|
||||
-I$(srctree)/tools/arch/$(SRCARCH)/include/ \
|
||||
-I$(srctree)/tools/arch/$(SRCARCH)/include/uapi \
|
||||
-I$(srctree)/tools/include/uapi
|
||||
|
||||
# Append required CFLAGS
|
||||
override CFLAGS += $(EXTRA_WARNINGS)
|
||||
|
@ -88,13 +94,34 @@ LIBPERF_PC := $(OUTPUT)libperf.pc
|
|||
|
||||
LIBPERF_ALL := $(LIBPERF_A) $(OUTPUT)libperf.so*
|
||||
|
||||
LIB_DIR := $(srctree)/tools/lib/api/
|
||||
|
||||
ifneq ($(OUTPUT),)
|
||||
ifneq ($(subdir),)
|
||||
API_PATH=$(OUTPUT)/../lib/api/
|
||||
else
|
||||
API_PATH=$(OUTPUT)
|
||||
endif
|
||||
else
|
||||
API_PATH=$(LIB_DIR)
|
||||
endif
|
||||
|
||||
LIBAPI = $(API_PATH)libapi.a
|
||||
|
||||
$(LIBAPI): FORCE
|
||||
$(Q)$(MAKE) -C $(LIB_DIR) O=$(OUTPUT) $(OUTPUT)libapi.a
|
||||
|
||||
$(LIBAPI)-clean:
|
||||
$(call QUIET_CLEAN, libapi)
|
||||
$(Q)$(MAKE) -C $(LIB_DIR) O=$(OUTPUT) clean >/dev/null
|
||||
|
||||
$(LIBPERF_IN): FORCE
|
||||
$(Q)$(MAKE) $(build)=libperf
|
||||
|
||||
$(LIBPERF_A): $(LIBPERF_IN)
|
||||
$(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIBPERF_IN)
|
||||
|
||||
$(LIBPERF_SO): $(LIBPERF_IN)
|
||||
$(LIBPERF_SO): $(LIBPERF_IN) $(LIBAPI)
|
||||
$(QUIET_LINK)$(CC) --shared -Wl,-soname,libperf.so \
|
||||
-Wl,--version-script=$(VERSION_SCRIPT) $^ -o $@
|
||||
@ln -sf $(@F) $(OUTPUT)libperf.so
|
||||
|
@ -106,12 +133,12 @@ libs: $(LIBPERF_A) $(LIBPERF_SO) $(LIBPERF_PC)
|
|||
all: fixdep
|
||||
$(Q)$(MAKE) libs
|
||||
|
||||
clean:
|
||||
clean: $(LIBAPI)-clean
|
||||
$(call QUIET_CLEAN, libperf) $(RM) $(LIBPERF_A) \
|
||||
*.o *~ *.a *.so *.so.$(VERSION) *.so.$(LIBPERF_VERSION) .*.d .*.cmd LIBPERF-CFLAGS $(LIBPERF_PC)
|
||||
$(Q)$(MAKE) -C tests clean
|
||||
|
||||
tests:
|
||||
tests: libs
|
||||
$(Q)$(MAKE) -C tests
|
||||
$(Q)$(MAKE) -C tests run
|
||||
|
||||
|
|
|
@ -4,7 +4,9 @@
|
|||
|
||||
#include <stdio.h>
|
||||
#include <stdarg.h>
|
||||
#include <unistd.h>
|
||||
#include <perf/core.h>
|
||||
#include <internal/lib.h>
|
||||
#include "internal.h"
|
||||
|
||||
static int __base_pr(enum libperf_print_level level, const char *format,
|
||||
|
@ -15,11 +17,6 @@ static int __base_pr(enum libperf_print_level level, const char *format,
|
|||
|
||||
static libperf_print_fn_t __libperf_pr = __base_pr;
|
||||
|
||||
void libperf_set_print(libperf_print_fn_t fn)
|
||||
{
|
||||
__libperf_pr = fn;
|
||||
}
|
||||
|
||||
__printf(2, 3)
|
||||
void libperf_print(enum libperf_print_level level, const char *format, ...)
|
||||
{
|
||||
|
@ -32,3 +29,9 @@ void libperf_print(enum libperf_print_level level, const char *format, ...)
|
|||
__libperf_pr(level, format, args);
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
void libperf_init(libperf_print_fn_t fn)
|
||||
{
|
||||
page_size = sysconf(_SC_PAGE_SIZE);
|
||||
__libperf_pr = fn;
|
||||
}
|
||||
|
|
|
@ -1,16 +1,30 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <perf/evlist.h>
|
||||
#include <perf/evsel.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/hash.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <internal/evlist.h>
|
||||
#include <internal/evsel.h>
|
||||
#include <internal/xyarray.h>
|
||||
#include <linux/zalloc.h>
|
||||
#include <stdlib.h>
|
||||
#include <errno.h>
|
||||
#include <unistd.h>
|
||||
#include <fcntl.h>
|
||||
#include <signal.h>
|
||||
#include <poll.h>
|
||||
#include <perf/cpumap.h>
|
||||
#include <perf/threadmap.h>
|
||||
#include <api/fd/array.h>
|
||||
|
||||
void perf_evlist__init(struct perf_evlist *evlist)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
|
||||
INIT_HLIST_HEAD(&evlist->heads[i]);
|
||||
INIT_LIST_HEAD(&evlist->entries);
|
||||
evlist->nr_entries = 0;
|
||||
}
|
||||
|
@ -157,3 +171,113 @@ void perf_evlist__disable(struct perf_evlist *evlist)
|
|||
perf_evlist__for_each_entry(evlist, evsel)
|
||||
perf_evsel__disable(evsel);
|
||||
}
|
||||
|
||||
u64 perf_evlist__read_format(struct perf_evlist *evlist)
|
||||
{
|
||||
struct perf_evsel *first = perf_evlist__first(evlist);
|
||||
|
||||
return first->attr.read_format;
|
||||
}
|
||||
|
||||
#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
|
||||
|
||||
static void perf_evlist__id_hash(struct perf_evlist *evlist,
|
||||
struct perf_evsel *evsel,
|
||||
int cpu, int thread, u64 id)
|
||||
{
|
||||
int hash;
|
||||
struct perf_sample_id *sid = SID(evsel, cpu, thread);
|
||||
|
||||
sid->id = id;
|
||||
sid->evsel = evsel;
|
||||
hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
|
||||
hlist_add_head(&sid->node, &evlist->heads[hash]);
|
||||
}
|
||||
|
||||
void perf_evlist__id_add(struct perf_evlist *evlist,
|
||||
struct perf_evsel *evsel,
|
||||
int cpu, int thread, u64 id)
|
||||
{
|
||||
perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
|
||||
evsel->id[evsel->ids++] = id;
|
||||
}
|
||||
|
||||
int perf_evlist__id_add_fd(struct perf_evlist *evlist,
|
||||
struct perf_evsel *evsel,
|
||||
int cpu, int thread, int fd)
|
||||
{
|
||||
u64 read_data[4] = { 0, };
|
||||
int id_idx = 1; /* The first entry is the counter value */
|
||||
u64 id;
|
||||
int ret;
|
||||
|
||||
ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
|
||||
if (!ret)
|
||||
goto add;
|
||||
|
||||
if (errno != ENOTTY)
|
||||
return -1;
|
||||
|
||||
/* Legacy way to get event id.. All hail to old kernels! */
|
||||
|
||||
/*
|
||||
* This way does not work with group format read, so bail
|
||||
* out in that case.
|
||||
*/
|
||||
if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
|
||||
return -1;
|
||||
|
||||
if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
|
||||
read(fd, &read_data, sizeof(read_data)) == -1)
|
||||
return -1;
|
||||
|
||||
if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
|
||||
++id_idx;
|
||||
if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
|
||||
++id_idx;
|
||||
|
||||
id = read_data[id_idx];
|
||||
|
||||
add:
|
||||
perf_evlist__id_add(evlist, evsel, cpu, thread, id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
|
||||
{
|
||||
int nr_cpus = perf_cpu_map__nr(evlist->cpus);
|
||||
int nr_threads = perf_thread_map__nr(evlist->threads);
|
||||
int nfds = 0;
|
||||
struct perf_evsel *evsel;
|
||||
|
||||
perf_evlist__for_each_entry(evlist, evsel) {
|
||||
if (evsel->system_wide)
|
||||
nfds += nr_cpus;
|
||||
else
|
||||
nfds += nr_cpus * nr_threads;
|
||||
}
|
||||
|
||||
if (fdarray__available_entries(&evlist->pollfd) < nfds &&
|
||||
fdarray__grow(&evlist->pollfd, nfds) < 0)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
|
||||
void *ptr, short revent)
|
||||
{
|
||||
int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP);
|
||||
|
||||
if (pos >= 0) {
|
||||
evlist->pollfd.priv[pos].ptr = ptr;
|
||||
fcntl(fd, F_SETFL, O_NONBLOCK);
|
||||
}
|
||||
|
||||
return pos;
|
||||
}
|
||||
|
||||
int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
|
||||
{
|
||||
return fdarray__poll(&evlist->pollfd, timeout);
|
||||
}
|
||||
|
|
|
@ -230,3 +230,33 @@ struct perf_event_attr *perf_evsel__attr(struct perf_evsel *evsel)
|
|||
{
|
||||
return &evsel->attr;
|
||||
}
|
||||
|
||||
int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
|
||||
{
|
||||
if (ncpus == 0 || nthreads == 0)
|
||||
return 0;
|
||||
|
||||
if (evsel->system_wide)
|
||||
nthreads = 1;
|
||||
|
||||
evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
|
||||
if (evsel->sample_id == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
|
||||
if (evsel->id == NULL) {
|
||||
xyarray__delete(evsel->sample_id);
|
||||
evsel->sample_id = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void perf_evsel__free_id(struct perf_evsel *evsel)
|
||||
{
|
||||
xyarray__delete(evsel->sample_id);
|
||||
evsel->sample_id = NULL;
|
||||
zfree(&evsel->id);
|
||||
evsel->ids = 0;
|
||||
}
|
||||
|
|
|
@ -3,6 +3,11 @@
|
|||
#define __LIBPERF_INTERNAL_EVLIST_H
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <api/fd/array.h>
|
||||
#include <internal/evsel.h>
|
||||
|
||||
#define PERF_EVLIST__HLIST_BITS 8
|
||||
#define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS)
|
||||
|
||||
struct perf_cpu_map;
|
||||
struct perf_thread_map;
|
||||
|
@ -13,8 +18,16 @@ struct perf_evlist {
|
|||
bool has_user_cpus;
|
||||
struct perf_cpu_map *cpus;
|
||||
struct perf_thread_map *threads;
|
||||
int nr_mmaps;
|
||||
size_t mmap_len;
|
||||
struct fdarray pollfd;
|
||||
struct hlist_head heads[PERF_EVLIST__HLIST_SIZE];
|
||||
};
|
||||
|
||||
int perf_evlist__alloc_pollfd(struct perf_evlist *evlist);
|
||||
int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
|
||||
void *ptr, short revent);
|
||||
|
||||
/**
|
||||
* __perf_evlist__for_each_entry - iterate thru all the evsels
|
||||
* @list: list_head instance to iterate
|
||||
|
@ -47,4 +60,24 @@ struct perf_evlist {
|
|||
#define perf_evlist__for_each_entry_reverse(evlist, evsel) \
|
||||
__perf_evlist__for_each_entry_reverse(&(evlist)->entries, evsel)
|
||||
|
||||
static inline struct perf_evsel *perf_evlist__first(struct perf_evlist *evlist)
|
||||
{
|
||||
return list_entry(evlist->entries.next, struct perf_evsel, node);
|
||||
}
|
||||
|
||||
static inline struct perf_evsel *perf_evlist__last(struct perf_evlist *evlist)
|
||||
{
|
||||
return list_entry(evlist->entries.prev, struct perf_evsel, node);
|
||||
}
|
||||
|
||||
u64 perf_evlist__read_format(struct perf_evlist *evlist);
|
||||
|
||||
void perf_evlist__id_add(struct perf_evlist *evlist,
|
||||
struct perf_evsel *evsel,
|
||||
int cpu, int thread, u64 id);
|
||||
|
||||
int perf_evlist__id_add_fd(struct perf_evlist *evlist,
|
||||
struct perf_evsel *evsel,
|
||||
int cpu, int thread, int fd);
|
||||
|
||||
#endif /* __LIBPERF_INTERNAL_EVLIST_H */
|
||||
|
|
|
@ -4,9 +4,35 @@
|
|||
|
||||
#include <linux/types.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <stdbool.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
struct perf_cpu_map;
|
||||
struct perf_thread_map;
|
||||
struct xyarray;
|
||||
|
||||
/*
|
||||
* Per fd, to map back from PERF_SAMPLE_ID to evsel, only used when there are
|
||||
* more than one entry in the evlist.
|
||||
*/
|
||||
struct perf_sample_id {
|
||||
struct hlist_node node;
|
||||
u64 id;
|
||||
struct perf_evsel *evsel;
|
||||
/*
|
||||
* 'idx' will be used for AUX area sampling. A sample will have AUX area
|
||||
* data that will be queued for decoding, where there are separate
|
||||
* queues for each CPU (per-cpu tracing) or task (per-thread tracing).
|
||||
* The sample ID can be used to lookup 'idx' which is effectively the
|
||||
* queue number.
|
||||
*/
|
||||
int idx;
|
||||
int cpu;
|
||||
pid_t tid;
|
||||
|
||||
/* Holds total ID period value for PERF_SAMPLE_READ processing. */
|
||||
u64 period;
|
||||
};
|
||||
|
||||
struct perf_evsel {
|
||||
struct list_head node;
|
||||
|
@ -15,9 +41,13 @@ struct perf_evsel {
|
|||
struct perf_cpu_map *own_cpus;
|
||||
struct perf_thread_map *threads;
|
||||
struct xyarray *fd;
|
||||
struct xyarray *sample_id;
|
||||
u64 *id;
|
||||
u32 ids;
|
||||
|
||||
/* parse modifier helper */
|
||||
int nr_members;
|
||||
bool system_wide;
|
||||
};
|
||||
|
||||
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
|
||||
|
@ -26,4 +56,7 @@ void perf_evsel__free_fd(struct perf_evsel *evsel);
|
|||
int perf_evsel__read_size(struct perf_evsel *evsel);
|
||||
int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter);
|
||||
|
||||
int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
|
||||
void perf_evsel__free_id(struct perf_evsel *evsel);
|
||||
|
||||
#endif /* __LIBPERF_INTERNAL_EVSEL_H */
|
||||
|
|
|
@ -2,7 +2,9 @@
|
|||
#ifndef __LIBPERF_INTERNAL_LIB_H
|
||||
#define __LIBPERF_INTERNAL_LIB_H
|
||||
|
||||
#include <unistd.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
extern unsigned int page_size;
|
||||
|
||||
ssize_t readn(int fd, void *buf, size_t n);
|
||||
ssize_t writen(int fd, const void *buf, size_t n);
|
||||
|
|
|
@ -0,0 +1,32 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __LIBPERF_INTERNAL_MMAP_H
|
||||
#define __LIBPERF_INTERNAL_MMAP_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/refcount.h>
|
||||
#include <linux/types.h>
|
||||
#include <stdbool.h>
|
||||
|
||||
/* perf sample has 16 bits size limit */
|
||||
#define PERF_SAMPLE_MAX_SIZE (1 << 16)
|
||||
|
||||
/**
|
||||
* struct perf_mmap - perf's ring buffer mmap details
|
||||
*
|
||||
* @refcnt - e.g. code using PERF_EVENT_IOC_SET_OUTPUT to share this
|
||||
*/
|
||||
struct perf_mmap {
|
||||
void *base;
|
||||
int mask;
|
||||
int fd;
|
||||
int cpu;
|
||||
refcount_t refcnt;
|
||||
u64 prev;
|
||||
u64 start;
|
||||
u64 end;
|
||||
bool overwrite;
|
||||
u64 flush;
|
||||
char event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8);
|
||||
};
|
||||
|
||||
#endif /* __LIBPERF_INTERNAL_MMAP_H */
|
|
@ -17,6 +17,6 @@ enum libperf_print_level {
|
|||
typedef int (*libperf_print_fn_t)(enum libperf_print_level level,
|
||||
const char *, va_list ap);
|
||||
|
||||
LIBPERF_API void libperf_set_print(libperf_print_fn_t fn);
|
||||
LIBPERF_API void libperf_init(libperf_print_fn_t fn);
|
||||
|
||||
#endif /* __LIBPERF_CORE_H */
|
||||
|
|
|
@ -31,5 +31,6 @@ LIBPERF_API void perf_evlist__disable(struct perf_evlist *evlist);
|
|||
LIBPERF_API void perf_evlist__set_maps(struct perf_evlist *evlist,
|
||||
struct perf_cpu_map *cpus,
|
||||
struct perf_thread_map *threads);
|
||||
LIBPERF_API int perf_evlist__poll(struct perf_evlist *evlist, int timeout);
|
||||
|
||||
#endif /* __LIBPERF_EVLIST_H */
|
||||
|
|
|
@ -5,6 +5,8 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <internal/lib.h>
|
||||
|
||||
unsigned int page_size;
|
||||
|
||||
static ssize_t ion(bool is_read, int fd, void *buf, size_t n)
|
||||
{
|
||||
void *buf_start = buf;
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
LIBPERF_0.0.1 {
|
||||
global:
|
||||
libperf_set_print;
|
||||
libperf_init;
|
||||
perf_cpu_map__dummy_new;
|
||||
perf_cpu_map__get;
|
||||
perf_cpu_map__put;
|
||||
|
@ -39,6 +39,7 @@ LIBPERF_0.0.1 {
|
|||
perf_evlist__remove;
|
||||
perf_evlist__next;
|
||||
perf_evlist__set_maps;
|
||||
perf_evlist__poll;
|
||||
local:
|
||||
*;
|
||||
};
|
||||
|
|
|
@ -1,13 +1,23 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <stdarg.h>
|
||||
#include <stdio.h>
|
||||
#include <perf/cpumap.h>
|
||||
#include <internal/tests.h>
|
||||
|
||||
static int libperf_print(enum libperf_print_level level,
|
||||
const char *fmt, va_list ap)
|
||||
{
|
||||
return vfprintf(stderr, fmt, ap);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
struct perf_cpu_map *cpus;
|
||||
|
||||
__T_START;
|
||||
|
||||
libperf_init(libperf_print);
|
||||
|
||||
cpus = perf_cpu_map__dummy_new();
|
||||
if (!cpus)
|
||||
return -1;
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <stdio.h>
|
||||
#include <stdarg.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <perf/cpumap.h>
|
||||
#include <perf/threadmap.h>
|
||||
|
@ -6,6 +8,12 @@
|
|||
#include <perf/evsel.h>
|
||||
#include <internal/tests.h>
|
||||
|
||||
static int libperf_print(enum libperf_print_level level,
|
||||
const char *fmt, va_list ap)
|
||||
{
|
||||
return vfprintf(stderr, fmt, ap);
|
||||
}
|
||||
|
||||
static int test_stat_cpu(void)
|
||||
{
|
||||
struct perf_cpu_map *cpus;
|
||||
|
@ -177,6 +185,8 @@ int main(int argc, char **argv)
|
|||
{
|
||||
__T_START;
|
||||
|
||||
libperf_init(libperf_print);
|
||||
|
||||
test_stat_cpu();
|
||||
test_stat_thread();
|
||||
test_stat_thread_enable();
|
||||
|
|
|
@ -1,10 +1,18 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <stdarg.h>
|
||||
#include <stdio.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <perf/cpumap.h>
|
||||
#include <perf/threadmap.h>
|
||||
#include <perf/evsel.h>
|
||||
#include <internal/tests.h>
|
||||
|
||||
static int libperf_print(enum libperf_print_level level,
|
||||
const char *fmt, va_list ap)
|
||||
{
|
||||
return vfprintf(stderr, fmt, ap);
|
||||
}
|
||||
|
||||
static int test_stat_cpu(void)
|
||||
{
|
||||
struct perf_cpu_map *cpus;
|
||||
|
@ -116,6 +124,8 @@ int main(int argc, char **argv)
|
|||
{
|
||||
__T_START;
|
||||
|
||||
libperf_init(libperf_print);
|
||||
|
||||
test_stat_cpu();
|
||||
test_stat_thread();
|
||||
test_stat_thread_enable();
|
||||
|
|
|
@ -1,13 +1,23 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <stdarg.h>
|
||||
#include <stdio.h>
|
||||
#include <perf/threadmap.h>
|
||||
#include <internal/tests.h>
|
||||
|
||||
static int libperf_print(enum libperf_print_level level,
|
||||
const char *fmt, va_list ap)
|
||||
{
|
||||
return vfprintf(stderr, fmt, ap);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
struct perf_thread_map *threads;
|
||||
|
||||
__T_START;
|
||||
|
||||
libperf_init(libperf_print);
|
||||
|
||||
threads = perf_thread_map__new_dummy();
|
||||
if (!threads)
|
||||
return -1;
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include "util/build-id.h"
|
||||
#include "util/cache.h"
|
||||
#include "util/env.h"
|
||||
#include <internal/lib.h> // page_size
|
||||
#include <subcmd/exec-cmd.h>
|
||||
#include "util/config.h"
|
||||
#include <subcmd/run-command.h>
|
||||
|
@ -20,11 +21,12 @@
|
|||
#include "util/bpf-loader.h"
|
||||
#include "util/debug.h"
|
||||
#include "util/event.h"
|
||||
#include "util/util.h" // page_size, usage()
|
||||
#include "util/util.h" // usage()
|
||||
#include "ui/ui.h"
|
||||
#include "perf-sys.h"
|
||||
#include <api/fs/fs.h>
|
||||
#include <api/fs/tracing_path.h>
|
||||
#include <perf/core.h>
|
||||
#include <errno.h>
|
||||
#include <pthread.h>
|
||||
#include <signal.h>
|
||||
|
@ -428,6 +430,12 @@ void pthread__unblock_sigwinch(void)
|
|||
pthread_sigmask(SIG_UNBLOCK, &set, NULL);
|
||||
}
|
||||
|
||||
static int libperf_print(enum libperf_print_level level,
|
||||
const char *fmt, va_list ap)
|
||||
{
|
||||
return eprintf(level, verbose, fmt, ap);
|
||||
}
|
||||
|
||||
int main(int argc, const char **argv)
|
||||
{
|
||||
int err;
|
||||
|
@ -438,8 +446,7 @@ int main(int argc, const char **argv)
|
|||
exec_cmd_init("perf", PREFIX, PERF_EXEC_PATH, EXEC_PATH_ENVIRONMENT);
|
||||
pager_init(PERF_PAGER_ENVIRONMENT);
|
||||
|
||||
/* The page_size is placed in util object. */
|
||||
page_size = sysconf(_SC_PAGE_SIZE);
|
||||
libperf_init(libperf_print);
|
||||
|
||||
cmd = extract_argv0_path(argv[0]);
|
||||
if (!cmd)
|
||||
|
|
|
@ -30,9 +30,9 @@ the topic. Eg: "Floating-point.json".
|
|||
All the topic JSON files for a CPU model/family should be in a separate
|
||||
sub directory. Thus for the Silvermont X86 CPU:
|
||||
|
||||
$ ls tools/perf/pmu-events/arch/x86/Silvermont_core
|
||||
Cache.json Memory.json Virtual-Memory.json
|
||||
Frontend.json Pipeline.json
|
||||
$ ls tools/perf/pmu-events/arch/x86/silvermont
|
||||
cache.json memory.json virtual-memory.json
|
||||
frontend.json pipeline.json
|
||||
|
||||
The JSONs folder for a CPU model/family may be placed in the root arch
|
||||
folder, or may be placed in a vendor sub-folder under the arch folder
|
||||
|
@ -94,7 +94,7 @@ users to specify events by their name:
|
|||
|
||||
where 'pm_1plus_ppc_cmpl' is a Power8 PMU event.
|
||||
|
||||
However some errors in processing may cause the perf build to fail.
|
||||
However some errors in processing may cause the alias build to fail.
|
||||
|
||||
Mapfile format
|
||||
===============
|
||||
|
@ -119,7 +119,7 @@ where:
|
|||
|
||||
Header line
|
||||
The header line is the first line in the file, which is
|
||||
always _IGNORED_. It can empty.
|
||||
always _IGNORED_. It can be empty.
|
||||
|
||||
CPUID:
|
||||
CPUID is an arch-specific char string, that can be used
|
||||
|
@ -138,15 +138,15 @@ where:
|
|||
files, relative to the directory containing the mapfile.csv
|
||||
|
||||
Type:
|
||||
indicates whether the events or "core" or "uncore" events.
|
||||
indicates whether the events are "core" or "uncore" events.
|
||||
|
||||
|
||||
Eg:
|
||||
|
||||
$ grep Silvermont tools/perf/pmu-events/arch/x86/mapfile.csv
|
||||
GenuineIntel-6-37,V13,Silvermont_core,core
|
||||
GenuineIntel-6-4D,V13,Silvermont_core,core
|
||||
GenuineIntel-6-4C,V13,Silvermont_core,core
|
||||
$ grep silvermont tools/perf/pmu-events/arch/x86/mapfile.csv
|
||||
GenuineIntel-6-37,v13,silvermont,core
|
||||
GenuineIntel-6-4D,v13,silvermont,core
|
||||
GenuineIntel-6-4C,v13,silvermont,core
|
||||
|
||||
i.e the three CPU models use the JSON files (i.e PMU events) listed
|
||||
in the directory 'tools/perf/pmu-events/arch/x86/Silvermont_core'.
|
||||
in the directory 'tools/perf/pmu-events/arch/x86/silvermont'.
|
||||
|
|
|
@ -1775,30 +1775,6 @@
|
|||
"BriefDescription": "L3 Load Prefetches",
|
||||
"PublicDescription": ""
|
||||
},
|
||||
{,
|
||||
"EventCode": "0xa29084",
|
||||
"EventName": "PM_L3_P0_GRP_PUMP",
|
||||
"BriefDescription": "L3 pf sent with grp scope port 0",
|
||||
"PublicDescription": ""
|
||||
},
|
||||
{,
|
||||
"EventCode": "0x528084",
|
||||
"EventName": "PM_L3_P0_LCO_DATA",
|
||||
"BriefDescription": "lco sent with data port 0",
|
||||
"PublicDescription": ""
|
||||
},
|
||||
{,
|
||||
"EventCode": "0x518080",
|
||||
"EventName": "PM_L3_P0_LCO_NO_DATA",
|
||||
"BriefDescription": "dataless l3 lco sent port 0",
|
||||
"PublicDescription": ""
|
||||
},
|
||||
{,
|
||||
"EventCode": "0xa4908c",
|
||||
"EventName": "PM_L3_P0_LCO_RTY",
|
||||
"BriefDescription": "L3 LCO received retry port 0",
|
||||
"PublicDescription": ""
|
||||
},
|
||||
{,
|
||||
"EventCode": "0x84908d",
|
||||
"EventName": "PM_L3_PF0_ALLOC",
|
||||
|
|
|
@ -283,5 +283,47 @@
|
|||
"BriefDescription": "Total cycles spent with one or more fill requests in flight from L2.",
|
||||
"PublicDescription": "Total cycles spent with one or more fill requests in flight from L2.",
|
||||
"UMask": "0x1"
|
||||
},
|
||||
{
|
||||
"EventName": "l3_request_g1.caching_l3_cache_accesses",
|
||||
"EventCode": "0x01",
|
||||
"BriefDescription": "Caching: L3 cache accesses",
|
||||
"UMask": "0x80",
|
||||
"Unit": "L3PMC"
|
||||
},
|
||||
{
|
||||
"EventName": "l3_lookup_state.all_l3_req_typs",
|
||||
"EventCode": "0x04",
|
||||
"BriefDescription": "All L3 Request Types",
|
||||
"UMask": "0xff",
|
||||
"Unit": "L3PMC"
|
||||
},
|
||||
{
|
||||
"EventName": "l3_comb_clstr_state.other_l3_miss_typs",
|
||||
"EventCode": "0x06",
|
||||
"BriefDescription": "Other L3 Miss Request Types",
|
||||
"UMask": "0xfe",
|
||||
"Unit": "L3PMC"
|
||||
},
|
||||
{
|
||||
"EventName": "l3_comb_clstr_state.request_miss",
|
||||
"EventCode": "0x06",
|
||||
"BriefDescription": "L3 cache misses",
|
||||
"UMask": "0x01",
|
||||
"Unit": "L3PMC"
|
||||
},
|
||||
{
|
||||
"EventName": "xi_sys_fill_latency",
|
||||
"EventCode": "0x90",
|
||||
"BriefDescription": "L3 Cache Miss Latency. Total cycles for all transactions divided by 16. Ignores SliceMask and ThreadMask.",
|
||||
"UMask": "0x00",
|
||||
"Unit": "L3PMC"
|
||||
},
|
||||
{
|
||||
"EventName": "xi_ccx_sdp_req1.all_l3_miss_req_typs",
|
||||
"EventCode": "0x9a",
|
||||
"BriefDescription": "All L3 Miss Request Types. Ignores SliceMask and ThreadMask.",
|
||||
"UMask": "0x3f",
|
||||
"Unit": "L3PMC"
|
||||
}
|
||||
]
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
{
|
||||
"EventName": "ex_ret_brn",
|
||||
"EventCode": "0xc2",
|
||||
"BriefDescription": "[Retired Branch Instructions.",
|
||||
"BriefDescription": "Retired Branch Instructions.",
|
||||
"PublicDescription": "The number of branch instructions retired. This includes all types of architectural control flow changes, including exceptions and interrupts."
|
||||
},
|
||||
{
|
||||
|
|
|
@ -239,6 +239,7 @@ static struct map {
|
|||
{ "hisi_sccl,ddrc", "hisi_sccl,ddrc" },
|
||||
{ "hisi_sccl,hha", "hisi_sccl,hha" },
|
||||
{ "hisi_sccl,l3c", "hisi_sccl,l3c" },
|
||||
{ "L3PMC", "amd_l3" },
|
||||
{}
|
||||
};
|
||||
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include "tests.h"
|
||||
#include "debug.h"
|
||||
#include "parse-events.h"
|
||||
#include "util/mmap.h"
|
||||
#include <errno.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
|
@ -32,8 +33,8 @@ static int count_samples(struct evlist *evlist, int *sample_count,
|
|||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < evlist->nr_mmaps; i++) {
|
||||
struct perf_mmap *map = &evlist->overwrite_mmap[i];
|
||||
for (i = 0; i < evlist->core.nr_mmaps; i++) {
|
||||
struct mmap *map = &evlist->overwrite_mmap[i];
|
||||
union perf_event *event;
|
||||
|
||||
perf_mmap__read_init(map);
|
||||
|
@ -63,9 +64,9 @@ static int do_test(struct evlist *evlist, int mmap_pages,
|
|||
int err;
|
||||
char sbuf[STRERR_BUFSIZE];
|
||||
|
||||
err = perf_evlist__mmap(evlist, mmap_pages);
|
||||
err = evlist__mmap(evlist, mmap_pages);
|
||||
if (err < 0) {
|
||||
pr_debug("perf_evlist__mmap: %s\n",
|
||||
pr_debug("evlist__mmap: %s\n",
|
||||
str_error_r(errno, sbuf, sizeof(sbuf)));
|
||||
return TEST_FAIL;
|
||||
}
|
||||
|
@ -75,7 +76,7 @@ static int do_test(struct evlist *evlist, int mmap_pages,
|
|||
evlist__disable(evlist);
|
||||
|
||||
err = count_samples(evlist, sample_count, comm_count);
|
||||
perf_evlist__munmap(evlist);
|
||||
evlist__munmap(evlist);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include "llvm.h"
|
||||
#include "debug.h"
|
||||
#include "parse-events.h"
|
||||
#include "util/mmap.h"
|
||||
#define NR_ITERS 111
|
||||
#define PERF_TEST_BPF_PATH "/sys/fs/bpf/perf_test"
|
||||
|
||||
|
@ -167,9 +168,9 @@ static int do_test(struct bpf_object *obj, int (*func)(void),
|
|||
goto out_delete_evlist;
|
||||
}
|
||||
|
||||
err = perf_evlist__mmap(evlist, opts.mmap_pages);
|
||||
err = evlist__mmap(evlist, opts.mmap_pages);
|
||||
if (err < 0) {
|
||||
pr_debug("perf_evlist__mmap: %s\n",
|
||||
pr_debug("evlist__mmap: %s\n",
|
||||
str_error_r(errno, sbuf, sizeof(sbuf)));
|
||||
goto out_delete_evlist;
|
||||
}
|
||||
|
@ -178,9 +179,9 @@ static int do_test(struct bpf_object *obj, int (*func)(void),
|
|||
(*func)();
|
||||
evlist__disable(evlist);
|
||||
|
||||
for (i = 0; i < evlist->nr_mmaps; i++) {
|
||||
for (i = 0; i < evlist->core.nr_mmaps; i++) {
|
||||
union perf_event *event;
|
||||
struct perf_mmap *md;
|
||||
struct mmap *md;
|
||||
|
||||
md = &evlist->mmap[i];
|
||||
if (perf_mmap__read_init(md) < 0)
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#include "symbol.h"
|
||||
#include "event.h"
|
||||
#include "record.h"
|
||||
#include "util/mmap.h"
|
||||
#include "util/synthetic-events.h"
|
||||
#include "thread.h"
|
||||
|
||||
|
@ -419,10 +420,10 @@ static int process_events(struct machine *machine, struct evlist *evlist,
|
|||
struct state *state)
|
||||
{
|
||||
union perf_event *event;
|
||||
struct perf_mmap *md;
|
||||
struct mmap *md;
|
||||
int i, ret;
|
||||
|
||||
for (i = 0; i < evlist->nr_mmaps; i++) {
|
||||
for (i = 0; i < evlist->core.nr_mmaps; i++) {
|
||||
md = &evlist->mmap[i];
|
||||
if (perf_mmap__read_init(md) < 0)
|
||||
continue;
|
||||
|
@ -651,7 +652,7 @@ static int do_test_code_reading(bool try_kcore)
|
|||
|
||||
perf_evlist__config(evlist, &opts, NULL);
|
||||
|
||||
evsel = perf_evlist__first(evlist);
|
||||
evsel = evlist__first(evlist);
|
||||
|
||||
evsel->core.attr.comm = 1;
|
||||
evsel->core.attr.disabled = 1;
|
||||
|
@ -685,9 +686,9 @@ static int do_test_code_reading(bool try_kcore)
|
|||
break;
|
||||
}
|
||||
|
||||
ret = perf_evlist__mmap(evlist, UINT_MAX);
|
||||
ret = evlist__mmap(evlist, UINT_MAX);
|
||||
if (ret < 0) {
|
||||
pr_debug("perf_evlist__mmap failed\n");
|
||||
pr_debug("evlist__mmap failed\n");
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
|
||||
static int attach__enable_on_exec(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__last(evlist);
|
||||
struct evsel *evsel = evlist__last(evlist);
|
||||
struct target target = {
|
||||
.uid = UINT_MAX,
|
||||
};
|
||||
|
@ -58,7 +58,7 @@ static int detach__enable_on_exec(struct evlist *evlist)
|
|||
|
||||
static int attach__current_disabled(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__last(evlist);
|
||||
struct evsel *evsel = evlist__last(evlist);
|
||||
struct perf_thread_map *threads;
|
||||
int err;
|
||||
|
||||
|
@ -84,7 +84,7 @@ static int attach__current_disabled(struct evlist *evlist)
|
|||
|
||||
static int attach__current_enabled(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__last(evlist);
|
||||
struct evsel *evsel = evlist__last(evlist);
|
||||
struct perf_thread_map *threads;
|
||||
int err;
|
||||
|
||||
|
@ -104,14 +104,14 @@ static int attach__current_enabled(struct evlist *evlist)
|
|||
|
||||
static int detach__disable(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__last(evlist);
|
||||
struct evsel *evsel = evlist__last(evlist);
|
||||
|
||||
return evsel__enable(evsel);
|
||||
}
|
||||
|
||||
static int attach__cpu_disabled(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__last(evlist);
|
||||
struct evsel *evsel = evlist__last(evlist);
|
||||
struct perf_cpu_map *cpus;
|
||||
int err;
|
||||
|
||||
|
@ -140,7 +140,7 @@ static int attach__cpu_disabled(struct evlist *evlist)
|
|||
|
||||
static int attach__cpu_enabled(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__last(evlist);
|
||||
struct evsel *evsel = evlist__last(evlist);
|
||||
struct perf_cpu_map *cpus;
|
||||
int err;
|
||||
|
||||
|
@ -180,7 +180,7 @@ static int test_times(int (attach)(struct evlist *),
|
|||
goto out_err;
|
||||
}
|
||||
|
||||
evsel = perf_evlist__last(evlist);
|
||||
evsel = evlist__last(evlist);
|
||||
evsel->core.attr.read_format |=
|
||||
PERF_FORMAT_TOTAL_TIME_ENABLED |
|
||||
PERF_FORMAT_TOTAL_TIME_RUNNING;
|
||||
|
|
|
@ -92,12 +92,12 @@ int test__event_update(struct test *test __maybe_unused, int subtest __maybe_unu
|
|||
evlist = perf_evlist__new_default();
|
||||
TEST_ASSERT_VAL("failed to get evlist", evlist);
|
||||
|
||||
evsel = perf_evlist__first(evlist);
|
||||
evsel = evlist__first(evlist);
|
||||
|
||||
TEST_ASSERT_VAL("failed to allocate ids",
|
||||
!perf_evsel__alloc_id(evsel, 1, 1));
|
||||
!perf_evsel__alloc_id(&evsel->core, 1, 1));
|
||||
|
||||
perf_evlist__id_add(evlist, evsel, 0, 0, 123);
|
||||
perf_evlist__id_add(&evlist->core, &evsel->core, 0, 0, 123);
|
||||
|
||||
evsel->unit = strdup("KRAVA");
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ static int perf_evsel__roundtrip_cache_name_test(void)
|
|||
}
|
||||
|
||||
idx = 0;
|
||||
evsel = perf_evlist__first(evlist);
|
||||
evsel = evlist__first(evlist);
|
||||
|
||||
for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
|
||||
for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
|
||||
|
|
|
@ -721,7 +721,7 @@ int test__hists_cumulate(struct test *test __maybe_unused, int subtest __maybe_u
|
|||
if (verbose > 1)
|
||||
machine__fprintf(machine, stderr);
|
||||
|
||||
evsel = perf_evlist__first(evlist);
|
||||
evsel = evlist__first(evlist);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(testcases); i++) {
|
||||
err = testcases[i](evsel, machine);
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include "machine.h"
|
||||
#include "parse-events.h"
|
||||
#include "hists_common.h"
|
||||
#include "util/mmap.h"
|
||||
#include <errno.h>
|
||||
#include <linux/kernel.h>
|
||||
|
||||
|
@ -310,8 +311,8 @@ int test__hists_link(struct test *test __maybe_unused, int subtest __maybe_unuse
|
|||
print_hists_in(hists);
|
||||
}
|
||||
|
||||
first = perf_evlist__first(evlist);
|
||||
evsel = perf_evlist__last(evlist);
|
||||
first = evlist__first(evlist);
|
||||
evsel = evlist__last(evlist);
|
||||
|
||||
first_hists = evsel__hists(first);
|
||||
hists = evsel__hists(evsel);
|
||||
|
|
|
@ -608,7 +608,7 @@ int test__hists_output(struct test *test __maybe_unused, int subtest __maybe_unu
|
|||
if (verbose > 1)
|
||||
machine__fprintf(machine, stderr);
|
||||
|
||||
evsel = perf_evlist__first(evlist);
|
||||
evsel = evlist__first(evlist);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(testcases); i++) {
|
||||
err = testcases[i](evsel, machine);
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include "record.h"
|
||||
#include "thread_map.h"
|
||||
#include "tests.h"
|
||||
#include "util/mmap.h"
|
||||
|
||||
#define CHECK__(x) { \
|
||||
while ((x) < 0) { \
|
||||
|
@ -31,11 +32,11 @@
|
|||
static int find_comm(struct evlist *evlist, const char *comm)
|
||||
{
|
||||
union perf_event *event;
|
||||
struct perf_mmap *md;
|
||||
struct mmap *md;
|
||||
int i, found;
|
||||
|
||||
found = 0;
|
||||
for (i = 0; i < evlist->nr_mmaps; i++) {
|
||||
for (i = 0; i < evlist->core.nr_mmaps; i++) {
|
||||
md = &evlist->mmap[i];
|
||||
if (perf_mmap__read_init(md) < 0)
|
||||
continue;
|
||||
|
@ -92,7 +93,7 @@ int test__keep_tracking(struct test *test __maybe_unused, int subtest __maybe_un
|
|||
|
||||
perf_evlist__config(evlist, &opts, NULL);
|
||||
|
||||
evsel = perf_evlist__first(evlist);
|
||||
evsel = evlist__first(evlist);
|
||||
|
||||
evsel->core.attr.comm = 1;
|
||||
evsel->core.attr.disabled = 1;
|
||||
|
@ -104,7 +105,7 @@ int test__keep_tracking(struct test *test __maybe_unused, int subtest __maybe_un
|
|||
goto out_err;
|
||||
}
|
||||
|
||||
CHECK__(perf_evlist__mmap(evlist, UINT_MAX));
|
||||
CHECK__(evlist__mmap(evlist, UINT_MAX));
|
||||
|
||||
/*
|
||||
* First, test that a 'comm' event can be found when the event is
|
||||
|
@ -131,7 +132,7 @@ int test__keep_tracking(struct test *test __maybe_unused, int subtest __maybe_un
|
|||
|
||||
evlist__enable(evlist);
|
||||
|
||||
evsel = perf_evlist__last(evlist);
|
||||
evsel = evlist__last(evlist);
|
||||
|
||||
CHECK__(evsel__disable(evsel));
|
||||
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#include "evsel.h"
|
||||
#include "thread_map.h"
|
||||
#include "tests.h"
|
||||
#include "util/mmap.h"
|
||||
#include <linux/err.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/string.h>
|
||||
|
@ -42,7 +43,7 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse
|
|||
expected_nr_events[nsyscalls], i, j;
|
||||
struct evsel *evsels[nsyscalls], *evsel;
|
||||
char sbuf[STRERR_BUFSIZE];
|
||||
struct perf_mmap *md;
|
||||
struct mmap *md;
|
||||
|
||||
threads = thread_map__new(-1, getpid(), UINT_MAX);
|
||||
if (threads == NULL) {
|
||||
|
@ -99,7 +100,7 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse
|
|||
expected_nr_events[i] = 1 + rand() % 127;
|
||||
}
|
||||
|
||||
if (perf_evlist__mmap(evlist, 128) < 0) {
|
||||
if (evlist__mmap(evlist, 128) < 0) {
|
||||
pr_debug("failed to mmap events: %d (%s)\n", errno,
|
||||
str_error_r(errno, sbuf, sizeof(sbuf)));
|
||||
goto out_delete_evlist;
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
#include "symbol.h"
|
||||
#include "util/synthetic-events.h"
|
||||
#include "thread.h"
|
||||
#include "util.h" // page_size
|
||||
#include <internal/lib.h> // page_size
|
||||
|
||||
#define THREADS 4
|
||||
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#include "record.h"
|
||||
#include "tests.h"
|
||||
#include "debug.h"
|
||||
#include "util/mmap.h"
|
||||
#include <errno.h>
|
||||
|
||||
#ifndef O_DIRECTORY
|
||||
|
@ -69,9 +70,9 @@ int test__syscall_openat_tp_fields(struct test *test __maybe_unused, int subtest
|
|||
goto out_delete_evlist;
|
||||
}
|
||||
|
||||
err = perf_evlist__mmap(evlist, UINT_MAX);
|
||||
err = evlist__mmap(evlist, UINT_MAX);
|
||||
if (err < 0) {
|
||||
pr_debug("perf_evlist__mmap: %s\n",
|
||||
pr_debug("evlist__mmap: %s\n",
|
||||
str_error_r(errno, sbuf, sizeof(sbuf)));
|
||||
goto out_delete_evlist;
|
||||
}
|
||||
|
@ -86,9 +87,9 @@ int test__syscall_openat_tp_fields(struct test *test __maybe_unused, int subtest
|
|||
while (1) {
|
||||
int before = nr_events;
|
||||
|
||||
for (i = 0; i < evlist->nr_mmaps; i++) {
|
||||
for (i = 0; i < evlist->core.nr_mmaps; i++) {
|
||||
union perf_event *event;
|
||||
struct perf_mmap *md;
|
||||
struct mmap *md;
|
||||
|
||||
md = &evlist->mmap[i];
|
||||
if (perf_mmap__read_init(md) < 0)
|
||||
|
@ -126,7 +127,7 @@ int test__syscall_openat_tp_fields(struct test *test __maybe_unused, int subtest
|
|||
}
|
||||
|
||||
if (nr_events == before)
|
||||
perf_evlist__poll(evlist, 10);
|
||||
evlist__poll(evlist, 10);
|
||||
|
||||
if (++nr_polls > 5) {
|
||||
pr_debug("%s: no events!\n", __func__);
|
||||
|
|
|
@ -46,7 +46,7 @@ static bool kvm_s390_create_vm_valid(void)
|
|||
|
||||
static int test__checkevent_tracepoint(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__first(evlist);
|
||||
struct evsel *evsel = evlist__first(evlist);
|
||||
|
||||
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
|
||||
TEST_ASSERT_VAL("wrong number of groups", 0 == evlist->nr_groups);
|
||||
|
@ -77,7 +77,7 @@ static int test__checkevent_tracepoint_multi(struct evlist *evlist)
|
|||
|
||||
static int test__checkevent_raw(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__first(evlist);
|
||||
struct evsel *evsel = evlist__first(evlist);
|
||||
|
||||
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
|
||||
TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
|
||||
|
@ -87,7 +87,7 @@ static int test__checkevent_raw(struct evlist *evlist)
|
|||
|
||||
static int test__checkevent_numeric(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__first(evlist);
|
||||
struct evsel *evsel = evlist__first(evlist);
|
||||
|
||||
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
|
||||
TEST_ASSERT_VAL("wrong type", 1 == evsel->core.attr.type);
|
||||
|
@ -97,7 +97,7 @@ static int test__checkevent_numeric(struct evlist *evlist)
|
|||
|
||||
static int test__checkevent_symbolic_name(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__first(evlist);
|
||||
struct evsel *evsel = evlist__first(evlist);
|
||||
|
||||
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
|
||||
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
|
||||
|
@ -108,7 +108,7 @@ static int test__checkevent_symbolic_name(struct evlist *evlist)
|
|||
|
||||
static int test__checkevent_symbolic_name_config(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__first(evlist);
|
||||
struct evsel *evsel = evlist__first(evlist);
|
||||
|
||||
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
|
||||
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
|
||||
|
@ -129,7 +129,7 @@ static int test__checkevent_symbolic_name_config(struct evlist *evlist)
|
|||
|
||||
static int test__checkevent_symbolic_alias(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__first(evlist);
|
||||
struct evsel *evsel = evlist__first(evlist);
|
||||
|
||||
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
|
||||
TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->core.attr.type);
|
||||
|
@ -140,7 +140,7 @@ static int test__checkevent_symbolic_alias(struct evlist *evlist)
|
|||
|
||||
static int test__checkevent_genhw(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__first(evlist);
|
||||
struct evsel *evsel = evlist__first(evlist);
|
||||
|
||||
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
|
||||
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HW_CACHE == evsel->core.attr.type);
|
||||
|
@ -150,7 +150,7 @@ static int test__checkevent_genhw(struct evlist *evlist)
|
|||
|
||||
static int test__checkevent_breakpoint(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__first(evlist);
|
||||
struct evsel *evsel = evlist__first(evlist);
|
||||
|
||||
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
|
||||
TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->core.attr.type);
|
||||
|
@ -164,7 +164,7 @@ static int test__checkevent_breakpoint(struct evlist *evlist)
|
|||
|
||||
static int test__checkevent_breakpoint_x(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__first(evlist);
|
||||
struct evsel *evsel = evlist__first(evlist);
|
||||
|
||||
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
|
||||
TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->core.attr.type);
|
||||
|
@ -177,7 +177,7 @@ static int test__checkevent_breakpoint_x(struct evlist *evlist)
|
|||
|
||||
static int test__checkevent_breakpoint_r(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__first(evlist);
|
||||
struct evsel *evsel = evlist__first(evlist);
|
||||
|
||||
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
|
||||
TEST_ASSERT_VAL("wrong type",
|
||||
|
@ -192,7 +192,7 @@ static int test__checkevent_breakpoint_r(struct evlist *evlist)
|
|||
|
||||
static int test__checkevent_breakpoint_w(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__first(evlist);
|
||||
struct evsel *evsel = evlist__first(evlist);
|
||||
|
||||
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
|
||||
TEST_ASSERT_VAL("wrong type",
|
||||
|
@ -207,7 +207,7 @@ static int test__checkevent_breakpoint_w(struct evlist *evlist)
|
|||
|
||||
static int test__checkevent_breakpoint_rw(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__first(evlist);
|
||||
struct evsel *evsel = evlist__first(evlist);
|
||||
|
||||
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
|
||||
TEST_ASSERT_VAL("wrong type",
|
||||
|
@ -222,7 +222,7 @@ static int test__checkevent_breakpoint_rw(struct evlist *evlist)
|
|||
|
||||
static int test__checkevent_tracepoint_modifier(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__first(evlist);
|
||||
struct evsel *evsel = evlist__first(evlist);
|
||||
|
||||
TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
|
||||
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
|
||||
|
@ -253,7 +253,7 @@ test__checkevent_tracepoint_multi_modifier(struct evlist *evlist)
|
|||
|
||||
static int test__checkevent_raw_modifier(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__first(evlist);
|
||||
struct evsel *evsel = evlist__first(evlist);
|
||||
|
||||
TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
|
||||
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
|
||||
|
@ -265,7 +265,7 @@ static int test__checkevent_raw_modifier(struct evlist *evlist)
|
|||
|
||||
static int test__checkevent_numeric_modifier(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__first(evlist);
|
||||
struct evsel *evsel = evlist__first(evlist);
|
||||
|
||||
TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
|
||||
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
|
||||
|
@ -277,7 +277,7 @@ static int test__checkevent_numeric_modifier(struct evlist *evlist)
|
|||
|
||||
static int test__checkevent_symbolic_name_modifier(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__first(evlist);
|
||||
struct evsel *evsel = evlist__first(evlist);
|
||||
|
||||
TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
|
||||
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
|
||||
|
@ -289,7 +289,7 @@ static int test__checkevent_symbolic_name_modifier(struct evlist *evlist)
|
|||
|
||||
static int test__checkevent_exclude_host_modifier(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__first(evlist);
|
||||
struct evsel *evsel = evlist__first(evlist);
|
||||
|
||||
TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
|
||||
TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host);
|
||||
|
@ -299,7 +299,7 @@ static int test__checkevent_exclude_host_modifier(struct evlist *evlist)
|
|||
|
||||
static int test__checkevent_exclude_guest_modifier(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__first(evlist);
|
||||
struct evsel *evsel = evlist__first(evlist);
|
||||
|
||||
TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
|
||||
TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
|
||||
|
@ -309,7 +309,7 @@ static int test__checkevent_exclude_guest_modifier(struct evlist *evlist)
|
|||
|
||||
static int test__checkevent_symbolic_alias_modifier(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__first(evlist);
|
||||
struct evsel *evsel = evlist__first(evlist);
|
||||
|
||||
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
|
||||
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
|
||||
|
@ -321,7 +321,7 @@ static int test__checkevent_symbolic_alias_modifier(struct evlist *evlist)
|
|||
|
||||
static int test__checkevent_genhw_modifier(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__first(evlist);
|
||||
struct evsel *evsel = evlist__first(evlist);
|
||||
|
||||
TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
|
||||
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
|
||||
|
@ -333,7 +333,7 @@ static int test__checkevent_genhw_modifier(struct evlist *evlist)
|
|||
|
||||
static int test__checkevent_exclude_idle_modifier(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__first(evlist);
|
||||
struct evsel *evsel = evlist__first(evlist);
|
||||
|
||||
TEST_ASSERT_VAL("wrong exclude idle", evsel->core.attr.exclude_idle);
|
||||
TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
|
||||
|
@ -348,7 +348,7 @@ static int test__checkevent_exclude_idle_modifier(struct evlist *evlist)
|
|||
|
||||
static int test__checkevent_exclude_idle_modifier_1(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__first(evlist);
|
||||
struct evsel *evsel = evlist__first(evlist);
|
||||
|
||||
TEST_ASSERT_VAL("wrong exclude idle", evsel->core.attr.exclude_idle);
|
||||
TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
|
||||
|
@ -363,7 +363,7 @@ static int test__checkevent_exclude_idle_modifier_1(struct evlist *evlist)
|
|||
|
||||
static int test__checkevent_breakpoint_modifier(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__first(evlist);
|
||||
struct evsel *evsel = evlist__first(evlist);
|
||||
|
||||
|
||||
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
|
||||
|
@ -378,7 +378,7 @@ static int test__checkevent_breakpoint_modifier(struct evlist *evlist)
|
|||
|
||||
static int test__checkevent_breakpoint_x_modifier(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__first(evlist);
|
||||
struct evsel *evsel = evlist__first(evlist);
|
||||
|
||||
TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
|
||||
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
|
||||
|
@ -392,7 +392,7 @@ static int test__checkevent_breakpoint_x_modifier(struct evlist *evlist)
|
|||
|
||||
static int test__checkevent_breakpoint_r_modifier(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__first(evlist);
|
||||
struct evsel *evsel = evlist__first(evlist);
|
||||
|
||||
TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
|
||||
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
|
||||
|
@ -406,7 +406,7 @@ static int test__checkevent_breakpoint_r_modifier(struct evlist *evlist)
|
|||
|
||||
static int test__checkevent_breakpoint_w_modifier(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__first(evlist);
|
||||
struct evsel *evsel = evlist__first(evlist);
|
||||
|
||||
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
|
||||
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
|
||||
|
@ -420,7 +420,7 @@ static int test__checkevent_breakpoint_w_modifier(struct evlist *evlist)
|
|||
|
||||
static int test__checkevent_breakpoint_rw_modifier(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__first(evlist);
|
||||
struct evsel *evsel = evlist__first(evlist);
|
||||
|
||||
TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
|
||||
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
|
||||
|
@ -435,7 +435,7 @@ static int test__checkevent_breakpoint_rw_modifier(struct evlist *evlist)
|
|||
static int test__checkevent_pmu(struct evlist *evlist)
|
||||
{
|
||||
|
||||
struct evsel *evsel = perf_evlist__first(evlist);
|
||||
struct evsel *evsel = evlist__first(evlist);
|
||||
|
||||
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
|
||||
TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
|
||||
|
@ -453,7 +453,7 @@ static int test__checkevent_pmu(struct evlist *evlist)
|
|||
|
||||
static int test__checkevent_list(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__first(evlist);
|
||||
struct evsel *evsel = evlist__first(evlist);
|
||||
|
||||
TEST_ASSERT_VAL("wrong number of entries", 3 == evlist->core.nr_entries);
|
||||
|
||||
|
@ -492,7 +492,7 @@ static int test__checkevent_list(struct evlist *evlist)
|
|||
|
||||
static int test__checkevent_pmu_name(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__first(evlist);
|
||||
struct evsel *evsel = evlist__first(evlist);
|
||||
|
||||
/* cpu/config=1,name=krava/u */
|
||||
TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
|
||||
|
@ -513,7 +513,7 @@ static int test__checkevent_pmu_name(struct evlist *evlist)
|
|||
|
||||
static int test__checkevent_pmu_partial_time_callgraph(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__first(evlist);
|
||||
struct evsel *evsel = evlist__first(evlist);
|
||||
|
||||
/* cpu/config=1,call-graph=fp,time,period=100000/ */
|
||||
TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
|
||||
|
@ -546,7 +546,7 @@ static int test__checkevent_pmu_partial_time_callgraph(struct evlist *evlist)
|
|||
|
||||
static int test__checkevent_pmu_events(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__first(evlist);
|
||||
struct evsel *evsel = evlist__first(evlist);
|
||||
|
||||
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
|
||||
TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
|
||||
|
@ -564,7 +564,7 @@ static int test__checkevent_pmu_events(struct evlist *evlist)
|
|||
|
||||
static int test__checkevent_pmu_events_mix(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__first(evlist);
|
||||
struct evsel *evsel = evlist__first(evlist);
|
||||
|
||||
/* pmu-event:u */
|
||||
TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
|
||||
|
@ -642,7 +642,7 @@ static int test__group1(struct evlist *evlist)
|
|||
TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups);
|
||||
|
||||
/* instructions:k */
|
||||
evsel = leader = perf_evlist__first(evlist);
|
||||
evsel = leader = evlist__first(evlist);
|
||||
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
|
||||
TEST_ASSERT_VAL("wrong config",
|
||||
PERF_COUNT_HW_INSTRUCTIONS == evsel->core.attr.config);
|
||||
|
@ -684,7 +684,7 @@ static int test__group2(struct evlist *evlist)
|
|||
TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups);
|
||||
|
||||
/* faults + :ku modifier */
|
||||
evsel = leader = perf_evlist__first(evlist);
|
||||
evsel = leader = evlist__first(evlist);
|
||||
TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->core.attr.type);
|
||||
TEST_ASSERT_VAL("wrong config",
|
||||
PERF_COUNT_SW_PAGE_FAULTS == evsel->core.attr.config);
|
||||
|
@ -739,7 +739,7 @@ static int test__group3(struct evlist *evlist __maybe_unused)
|
|||
TEST_ASSERT_VAL("wrong number of groups", 2 == evlist->nr_groups);
|
||||
|
||||
/* group1 syscalls:sys_enter_openat:H */
|
||||
evsel = leader = perf_evlist__first(evlist);
|
||||
evsel = leader = evlist__first(evlist);
|
||||
TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->core.attr.type);
|
||||
TEST_ASSERT_VAL("wrong sample_type",
|
||||
PERF_TP_SAMPLE_TYPE == evsel->core.attr.sample_type);
|
||||
|
@ -831,7 +831,7 @@ static int test__group4(struct evlist *evlist __maybe_unused)
|
|||
TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups);
|
||||
|
||||
/* cycles:u + p */
|
||||
evsel = leader = perf_evlist__first(evlist);
|
||||
evsel = leader = evlist__first(evlist);
|
||||
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
|
||||
TEST_ASSERT_VAL("wrong config",
|
||||
PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config);
|
||||
|
@ -875,7 +875,7 @@ static int test__group5(struct evlist *evlist __maybe_unused)
|
|||
TEST_ASSERT_VAL("wrong number of groups", 2 == evlist->nr_groups);
|
||||
|
||||
/* cycles + G */
|
||||
evsel = leader = perf_evlist__first(evlist);
|
||||
evsel = leader = evlist__first(evlist);
|
||||
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
|
||||
TEST_ASSERT_VAL("wrong config",
|
||||
PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config);
|
||||
|
@ -961,7 +961,7 @@ static int test__group_gh1(struct evlist *evlist)
|
|||
TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups);
|
||||
|
||||
/* cycles + :H group modifier */
|
||||
evsel = leader = perf_evlist__first(evlist);
|
||||
evsel = leader = evlist__first(evlist);
|
||||
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
|
||||
TEST_ASSERT_VAL("wrong config",
|
||||
PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config);
|
||||
|
@ -1001,7 +1001,7 @@ static int test__group_gh2(struct evlist *evlist)
|
|||
TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups);
|
||||
|
||||
/* cycles + :G group modifier */
|
||||
evsel = leader = perf_evlist__first(evlist);
|
||||
evsel = leader = evlist__first(evlist);
|
||||
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
|
||||
TEST_ASSERT_VAL("wrong config",
|
||||
PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config);
|
||||
|
@ -1041,7 +1041,7 @@ static int test__group_gh3(struct evlist *evlist)
|
|||
TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups);
|
||||
|
||||
/* cycles:G + :u group modifier */
|
||||
evsel = leader = perf_evlist__first(evlist);
|
||||
evsel = leader = evlist__first(evlist);
|
||||
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
|
||||
TEST_ASSERT_VAL("wrong config",
|
||||
PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config);
|
||||
|
@ -1081,7 +1081,7 @@ static int test__group_gh4(struct evlist *evlist)
|
|||
TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups);
|
||||
|
||||
/* cycles:G + :uG group modifier */
|
||||
evsel = leader = perf_evlist__first(evlist);
|
||||
evsel = leader = evlist__first(evlist);
|
||||
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
|
||||
TEST_ASSERT_VAL("wrong config",
|
||||
PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config);
|
||||
|
@ -1120,7 +1120,7 @@ static int test__leader_sample1(struct evlist *evlist)
|
|||
TEST_ASSERT_VAL("wrong number of entries", 3 == evlist->core.nr_entries);
|
||||
|
||||
/* cycles - sampling group leader */
|
||||
evsel = leader = perf_evlist__first(evlist);
|
||||
evsel = leader = evlist__first(evlist);
|
||||
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
|
||||
TEST_ASSERT_VAL("wrong config",
|
||||
PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config);
|
||||
|
@ -1173,7 +1173,7 @@ static int test__leader_sample2(struct evlist *evlist __maybe_unused)
|
|||
TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
|
||||
|
||||
/* instructions - sampling group leader */
|
||||
evsel = leader = perf_evlist__first(evlist);
|
||||
evsel = leader = evlist__first(evlist);
|
||||
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
|
||||
TEST_ASSERT_VAL("wrong config",
|
||||
PERF_COUNT_HW_INSTRUCTIONS == evsel->core.attr.config);
|
||||
|
@ -1207,7 +1207,7 @@ static int test__leader_sample2(struct evlist *evlist __maybe_unused)
|
|||
|
||||
static int test__checkevent_pinned_modifier(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__first(evlist);
|
||||
struct evsel *evsel = evlist__first(evlist);
|
||||
|
||||
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
|
||||
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
|
||||
|
@ -1225,7 +1225,7 @@ static int test__pinned_group(struct evlist *evlist)
|
|||
TEST_ASSERT_VAL("wrong number of entries", 3 == evlist->core.nr_entries);
|
||||
|
||||
/* cycles - group leader */
|
||||
evsel = leader = perf_evlist__first(evlist);
|
||||
evsel = leader = evlist__first(evlist);
|
||||
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
|
||||
TEST_ASSERT_VAL("wrong config",
|
||||
PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config);
|
||||
|
@ -1251,7 +1251,7 @@ static int test__pinned_group(struct evlist *evlist)
|
|||
|
||||
static int test__checkevent_breakpoint_len(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__first(evlist);
|
||||
struct evsel *evsel = evlist__first(evlist);
|
||||
|
||||
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
|
||||
TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->core.attr.type);
|
||||
|
@ -1266,7 +1266,7 @@ static int test__checkevent_breakpoint_len(struct evlist *evlist)
|
|||
|
||||
static int test__checkevent_breakpoint_len_w(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__first(evlist);
|
||||
struct evsel *evsel = evlist__first(evlist);
|
||||
|
||||
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
|
||||
TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->core.attr.type);
|
||||
|
@ -1282,7 +1282,7 @@ static int test__checkevent_breakpoint_len_w(struct evlist *evlist)
|
|||
static int
|
||||
test__checkevent_breakpoint_len_rw_modifier(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__first(evlist);
|
||||
struct evsel *evsel = evlist__first(evlist);
|
||||
|
||||
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
|
||||
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
|
||||
|
@ -1294,7 +1294,7 @@ test__checkevent_breakpoint_len_rw_modifier(struct evlist *evlist)
|
|||
|
||||
static int test__checkevent_precise_max_modifier(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__first(evlist);
|
||||
struct evsel *evsel = evlist__first(evlist);
|
||||
|
||||
TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
|
||||
TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->core.attr.type);
|
||||
|
@ -1305,7 +1305,7 @@ static int test__checkevent_precise_max_modifier(struct evlist *evlist)
|
|||
|
||||
static int test__checkevent_config_symbol(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__first(evlist);
|
||||
struct evsel *evsel = evlist__first(evlist);
|
||||
|
||||
TEST_ASSERT_VAL("wrong name setting", strcmp(evsel->name, "insn") == 0);
|
||||
return 0;
|
||||
|
@ -1313,7 +1313,7 @@ static int test__checkevent_config_symbol(struct evlist *evlist)
|
|||
|
||||
static int test__checkevent_config_raw(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__first(evlist);
|
||||
struct evsel *evsel = evlist__first(evlist);
|
||||
|
||||
TEST_ASSERT_VAL("wrong name setting", strcmp(evsel->name, "rawpmu") == 0);
|
||||
return 0;
|
||||
|
@ -1321,7 +1321,7 @@ static int test__checkevent_config_raw(struct evlist *evlist)
|
|||
|
||||
static int test__checkevent_config_num(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__first(evlist);
|
||||
struct evsel *evsel = evlist__first(evlist);
|
||||
|
||||
TEST_ASSERT_VAL("wrong name setting", strcmp(evsel->name, "numpmu") == 0);
|
||||
return 0;
|
||||
|
@ -1329,7 +1329,7 @@ static int test__checkevent_config_num(struct evlist *evlist)
|
|||
|
||||
static int test__checkevent_config_cache(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__first(evlist);
|
||||
struct evsel *evsel = evlist__first(evlist);
|
||||
|
||||
TEST_ASSERT_VAL("wrong name setting", strcmp(evsel->name, "cachepmu") == 0);
|
||||
return 0;
|
||||
|
@ -1342,7 +1342,7 @@ static bool test__intel_pt_valid(void)
|
|||
|
||||
static int test__intel_pt(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__first(evlist);
|
||||
struct evsel *evsel = evlist__first(evlist);
|
||||
|
||||
TEST_ASSERT_VAL("wrong name setting", strcmp(evsel->name, "intel_pt//u") == 0);
|
||||
return 0;
|
||||
|
@ -1350,7 +1350,7 @@ static int test__intel_pt(struct evlist *evlist)
|
|||
|
||||
static int test__checkevent_complex_name(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__first(evlist);
|
||||
struct evsel *evsel = evlist__first(evlist);
|
||||
|
||||
TEST_ASSERT_VAL("wrong complex name parsing", strcmp(evsel->name, "COMPLEX_CYCLES_NAME:orig=cycles,desc=chip-clock-ticks") == 0);
|
||||
return 0;
|
||||
|
@ -1358,7 +1358,7 @@ static int test__checkevent_complex_name(struct evlist *evlist)
|
|||
|
||||
static int test__sym_event_slash(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__first(evlist);
|
||||
struct evsel *evsel = evlist__first(evlist);
|
||||
|
||||
TEST_ASSERT_VAL("wrong type", evsel->core.attr.type == PERF_TYPE_HARDWARE);
|
||||
TEST_ASSERT_VAL("wrong config", evsel->core.attr.config == PERF_COUNT_HW_CPU_CYCLES);
|
||||
|
@ -1368,7 +1368,7 @@ static int test__sym_event_slash(struct evlist *evlist)
|
|||
|
||||
static int test__sym_event_dc(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *evsel = perf_evlist__first(evlist);
|
||||
struct evsel *evsel = evlist__first(evlist);
|
||||
|
||||
TEST_ASSERT_VAL("wrong type", evsel->core.attr.type == PERF_TYPE_HARDWARE);
|
||||
TEST_ASSERT_VAL("wrong config", evsel->core.attr.config == PERF_COUNT_HW_CPU_CYCLES);
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#include "debug.h"
|
||||
#include "record.h"
|
||||
#include "tests.h"
|
||||
#include "util/mmap.h"
|
||||
|
||||
static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t *maskp)
|
||||
{
|
||||
|
@ -103,7 +104,7 @@ int test__PERF_RECORD(struct test *test __maybe_unused, int subtest __maybe_unus
|
|||
/*
|
||||
* Config the evsels, setting attr->comm on the first one, etc.
|
||||
*/
|
||||
evsel = perf_evlist__first(evlist);
|
||||
evsel = evlist__first(evlist);
|
||||
perf_evsel__set_sample_bit(evsel, CPU);
|
||||
perf_evsel__set_sample_bit(evsel, TID);
|
||||
perf_evsel__set_sample_bit(evsel, TIME);
|
||||
|
@ -143,9 +144,9 @@ int test__PERF_RECORD(struct test *test __maybe_unused, int subtest __maybe_unus
|
|||
* fds in the same CPU to be injected in the same mmap ring buffer
|
||||
* (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)).
|
||||
*/
|
||||
err = perf_evlist__mmap(evlist, opts.mmap_pages);
|
||||
err = evlist__mmap(evlist, opts.mmap_pages);
|
||||
if (err < 0) {
|
||||
pr_debug("perf_evlist__mmap: %s\n",
|
||||
pr_debug("evlist__mmap: %s\n",
|
||||
str_error_r(errno, sbuf, sizeof(sbuf)));
|
||||
goto out_delete_evlist;
|
||||
}
|
||||
|
@ -164,9 +165,9 @@ int test__PERF_RECORD(struct test *test __maybe_unused, int subtest __maybe_unus
|
|||
while (1) {
|
||||
int before = total_events;
|
||||
|
||||
for (i = 0; i < evlist->nr_mmaps; i++) {
|
||||
for (i = 0; i < evlist->core.nr_mmaps; i++) {
|
||||
union perf_event *event;
|
||||
struct perf_mmap *md;
|
||||
struct mmap *md;
|
||||
|
||||
md = &evlist->mmap[i];
|
||||
if (perf_mmap__read_init(md) < 0)
|
||||
|
@ -286,7 +287,7 @@ int test__PERF_RECORD(struct test *test __maybe_unused, int subtest __maybe_unus
|
|||
* perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does.
|
||||
*/
|
||||
if (total_events == before && false)
|
||||
perf_evlist__poll(evlist, -1);
|
||||
evlist__poll(evlist, -1);
|
||||
|
||||
sleep(1);
|
||||
if (++wakeups > 5) {
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
#include <limits.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <unistd.h>
|
||||
#include <sys/epoll.h>
|
||||
#include <util/symbol.h>
|
||||
#include <linux/filter.h>
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include "util/evsel.h"
|
||||
#include "util/evlist.h"
|
||||
#include "util/cpumap.h"
|
||||
#include "util/mmap.h"
|
||||
#include "util/thread_map.h"
|
||||
#include <perf/evlist.h>
|
||||
|
||||
|
@ -42,7 +43,7 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
|
|||
};
|
||||
struct perf_cpu_map *cpus;
|
||||
struct perf_thread_map *threads;
|
||||
struct perf_mmap *md;
|
||||
struct mmap *md;
|
||||
|
||||
attr.sample_freq = 500;
|
||||
|
||||
|
@ -82,7 +83,7 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
|
|||
goto out_delete_evlist;
|
||||
}
|
||||
|
||||
err = perf_evlist__mmap(evlist, 128);
|
||||
err = evlist__mmap(evlist, 128);
|
||||
if (err < 0) {
|
||||
pr_debug("failed to mmap event: %d (%s)\n", errno,
|
||||
str_error_r(errno, sbuf, sizeof(sbuf)));
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include "thread_map.h"
|
||||
#include "record.h"
|
||||
#include "tests.h"
|
||||
#include "util/mmap.h"
|
||||
|
||||
static int spin_sleep(void)
|
||||
{
|
||||
|
@ -143,7 +144,7 @@ static int process_sample_event(struct evlist *evlist,
|
|||
return err;
|
||||
/*
|
||||
* Check for no missing sched_switch events i.e. that the
|
||||
* evsel->system_wide flag has worked.
|
||||
* evsel->core.system_wide flag has worked.
|
||||
*/
|
||||
if (switch_tracking->tids[cpu] != -1 &&
|
||||
switch_tracking->tids[cpu] != prev_tid) {
|
||||
|
@ -263,10 +264,10 @@ static int process_events(struct evlist *evlist,
|
|||
unsigned pos, cnt = 0;
|
||||
LIST_HEAD(events);
|
||||
struct event_node *events_array, *node;
|
||||
struct perf_mmap *md;
|
||||
struct mmap *md;
|
||||
int i, ret;
|
||||
|
||||
for (i = 0; i < evlist->nr_mmaps; i++) {
|
||||
for (i = 0; i < evlist->core.nr_mmaps; i++) {
|
||||
md = &evlist->mmap[i];
|
||||
if (perf_mmap__read_init(md) < 0)
|
||||
continue;
|
||||
|
@ -315,7 +316,7 @@ out_free_nodes:
|
|||
*
|
||||
* This function implements a test that checks that sched_switch events and
|
||||
* tracking events can be recorded for a workload (current process) using the
|
||||
* evsel->system_wide and evsel->tracking flags (respectively) with other events
|
||||
* evsel->core.system_wide and evsel->tracking flags (respectively) with other events
|
||||
* sometimes enabled or disabled.
|
||||
*/
|
||||
int test__switch_tracking(struct test *test __maybe_unused, int subtest __maybe_unused)
|
||||
|
@ -366,7 +367,7 @@ int test__switch_tracking(struct test *test __maybe_unused, int subtest __maybe_
|
|||
goto out_err;
|
||||
}
|
||||
|
||||
cpu_clocks_evsel = perf_evlist__last(evlist);
|
||||
cpu_clocks_evsel = evlist__last(evlist);
|
||||
|
||||
/* Second event */
|
||||
err = parse_events(evlist, "cycles:u", NULL);
|
||||
|
@ -375,7 +376,7 @@ int test__switch_tracking(struct test *test __maybe_unused, int subtest __maybe_
|
|||
goto out_err;
|
||||
}
|
||||
|
||||
cycles_evsel = perf_evlist__last(evlist);
|
||||
cycles_evsel = evlist__last(evlist);
|
||||
|
||||
/* Third event */
|
||||
if (!perf_evlist__can_select_event(evlist, sched_switch)) {
|
||||
|
@ -390,22 +391,22 @@ int test__switch_tracking(struct test *test __maybe_unused, int subtest __maybe_
|
|||
goto out_err;
|
||||
}
|
||||
|
||||
switch_evsel = perf_evlist__last(evlist);
|
||||
switch_evsel = evlist__last(evlist);
|
||||
|
||||
perf_evsel__set_sample_bit(switch_evsel, CPU);
|
||||
perf_evsel__set_sample_bit(switch_evsel, TIME);
|
||||
|
||||
switch_evsel->system_wide = true;
|
||||
switch_evsel->core.system_wide = true;
|
||||
switch_evsel->no_aux_samples = true;
|
||||
switch_evsel->immediate = true;
|
||||
|
||||
/* Test moving an event to the front */
|
||||
if (cycles_evsel == perf_evlist__first(evlist)) {
|
||||
if (cycles_evsel == evlist__first(evlist)) {
|
||||
pr_debug("cycles event already at front");
|
||||
goto out_err;
|
||||
}
|
||||
perf_evlist__to_front(evlist, cycles_evsel);
|
||||
if (cycles_evsel != perf_evlist__first(evlist)) {
|
||||
if (cycles_evsel != evlist__first(evlist)) {
|
||||
pr_debug("Failed to move cycles event to front");
|
||||
goto out_err;
|
||||
}
|
||||
|
@ -420,7 +421,7 @@ int test__switch_tracking(struct test *test __maybe_unused, int subtest __maybe_
|
|||
goto out_err;
|
||||
}
|
||||
|
||||
tracking_evsel = perf_evlist__last(evlist);
|
||||
tracking_evsel = evlist__last(evlist);
|
||||
|
||||
perf_evlist__set_tracking_event(evlist, tracking_evsel);
|
||||
|
||||
|
@ -433,7 +434,7 @@ int test__switch_tracking(struct test *test __maybe_unused, int subtest __maybe_
|
|||
perf_evlist__config(evlist, &opts, NULL);
|
||||
|
||||
/* Check moved event is still at the front */
|
||||
if (cycles_evsel != perf_evlist__first(evlist)) {
|
||||
if (cycles_evsel != evlist__first(evlist)) {
|
||||
pr_debug("Front event no longer at front");
|
||||
goto out_err;
|
||||
}
|
||||
|
@ -460,9 +461,9 @@ int test__switch_tracking(struct test *test __maybe_unused, int subtest __maybe_
|
|||
goto out;
|
||||
}
|
||||
|
||||
err = perf_evlist__mmap(evlist, UINT_MAX);
|
||||
err = evlist__mmap(evlist, UINT_MAX);
|
||||
if (err) {
|
||||
pr_debug("perf_evlist__mmap failed!\n");
|
||||
pr_debug("evlist__mmap failed!\n");
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
#include "target.h"
|
||||
#include "thread_map.h"
|
||||
#include "tests.h"
|
||||
#include "util/mmap.h"
|
||||
|
||||
#include <errno.h>
|
||||
#include <signal.h>
|
||||
|
@ -51,7 +52,7 @@ int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused
|
|||
char sbuf[STRERR_BUFSIZE];
|
||||
struct perf_cpu_map *cpus;
|
||||
struct perf_thread_map *threads;
|
||||
struct perf_mmap *md;
|
||||
struct mmap *md;
|
||||
|
||||
signal(SIGCHLD, sig_handler);
|
||||
|
||||
|
@ -87,7 +88,7 @@ int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused
|
|||
goto out_delete_evlist;
|
||||
}
|
||||
|
||||
evsel = perf_evlist__first(evlist);
|
||||
evsel = evlist__first(evlist);
|
||||
evsel->core.attr.task = 1;
|
||||
#ifdef __s390x__
|
||||
evsel->core.attr.sample_freq = 1000000;
|
||||
|
@ -106,7 +107,7 @@ int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused
|
|||
goto out_delete_evlist;
|
||||
}
|
||||
|
||||
if (perf_evlist__mmap(evlist, 128) < 0) {
|
||||
if (evlist__mmap(evlist, 128) < 0) {
|
||||
pr_debug("failed to mmap events: %d (%s)\n", errno,
|
||||
str_error_r(errno, sbuf, sizeof(sbuf)));
|
||||
goto out_delete_evlist;
|
||||
|
@ -129,7 +130,7 @@ retry:
|
|||
|
||||
out_init:
|
||||
if (!exited || !nr_exit) {
|
||||
perf_evlist__poll(evlist, -1);
|
||||
evlist__poll(evlist, -1);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
#include "dso.h"
|
||||
#include "map.h"
|
||||
#include "symbol.h"
|
||||
#include "util.h" // page_size
|
||||
#include <internal/lib.h> // page_size
|
||||
#include "tests.h"
|
||||
#include "debug.h"
|
||||
#include "machine.h"
|
||||
|
|
|
@ -3319,13 +3319,13 @@ browse_hists:
|
|||
switch (key) {
|
||||
case K_TAB:
|
||||
if (pos->core.node.next == &evlist->core.entries)
|
||||
pos = perf_evlist__first(evlist);
|
||||
pos = evlist__first(evlist);
|
||||
else
|
||||
pos = perf_evsel__next(pos);
|
||||
goto browse_hists;
|
||||
case K_UNTAB:
|
||||
if (pos->core.node.prev == &evlist->core.entries)
|
||||
pos = perf_evlist__last(evlist);
|
||||
pos = evlist__last(evlist);
|
||||
else
|
||||
pos = perf_evsel__prev(pos);
|
||||
goto browse_hists;
|
||||
|
@ -3417,7 +3417,7 @@ int perf_evlist__tui_browse_hists(struct evlist *evlist, const char *help,
|
|||
|
||||
single_entry:
|
||||
if (nr_entries == 1) {
|
||||
struct evsel *first = perf_evlist__first(evlist);
|
||||
struct evsel *first = evlist__first(evlist);
|
||||
|
||||
return perf_evsel__hists_browse(first, nr_entries, help,
|
||||
false, hbt, min_pcnt,
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include "../string2.h"
|
||||
#include "gtk.h"
|
||||
#include <signal.h>
|
||||
#include <stdlib.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
#define MAX_COLUMNS 32
|
||||
|
|
|
@ -3,6 +3,7 @@ perf-y += block-range.o
|
|||
perf-y += build-id.o
|
||||
perf-y += cacheline.o
|
||||
perf-y += config.o
|
||||
perf-y += copyfile.o
|
||||
perf-y += ctype.o
|
||||
perf-y += db-export.o
|
||||
perf-y += env.o
|
||||
|
@ -10,6 +11,7 @@ perf-y += event.o
|
|||
perf-y += evlist.o
|
||||
perf-y += evsel.o
|
||||
perf-y += evsel_fprintf.o
|
||||
perf-y += perf_event_attr_fprintf.o
|
||||
perf-y += evswitch.o
|
||||
perf-y += find_bit.o
|
||||
perf-y += get_current_dir_name.o
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
#include "bpf-event.h"
|
||||
#include "block-range.h"
|
||||
#include "string2.h"
|
||||
#include "util/event.h"
|
||||
#include "arch/common.h"
|
||||
#include <regex.h>
|
||||
#include <pthread.h>
|
||||
|
|
|
@ -50,7 +50,7 @@
|
|||
#include "intel-bts.h"
|
||||
#include "arm-spe.h"
|
||||
#include "s390-cpumsf.h"
|
||||
#include "util.h" // page_size
|
||||
#include "util/mmap.h"
|
||||
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/kernel.h>
|
||||
|
@ -1228,7 +1228,7 @@ int perf_event__process_auxtrace_error(struct perf_session *session,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int __auxtrace_mmap__read(struct perf_mmap *map,
|
||||
static int __auxtrace_mmap__read(struct mmap *map,
|
||||
struct auxtrace_record *itr,
|
||||
struct perf_tool *tool, process_auxtrace_t fn,
|
||||
bool snapshot, size_t snapshot_size)
|
||||
|
@ -1339,13 +1339,13 @@ static int __auxtrace_mmap__read(struct perf_mmap *map,
|
|||
return 1;
|
||||
}
|
||||
|
||||
int auxtrace_mmap__read(struct perf_mmap *map, struct auxtrace_record *itr,
|
||||
int auxtrace_mmap__read(struct mmap *map, struct auxtrace_record *itr,
|
||||
struct perf_tool *tool, process_auxtrace_t fn)
|
||||
{
|
||||
return __auxtrace_mmap__read(map, itr, tool, fn, false, 0);
|
||||
}
|
||||
|
||||
int auxtrace_mmap__read_snapshot(struct perf_mmap *map,
|
||||
int auxtrace_mmap__read_snapshot(struct mmap *map,
|
||||
struct auxtrace_record *itr,
|
||||
struct perf_tool *tool, process_auxtrace_t fn,
|
||||
size_t snapshot_size)
|
||||
|
|
|
@ -22,7 +22,7 @@ union perf_event;
|
|||
struct perf_session;
|
||||
struct evlist;
|
||||
struct perf_tool;
|
||||
struct perf_mmap;
|
||||
struct mmap;
|
||||
struct perf_sample;
|
||||
struct option;
|
||||
struct record_opts;
|
||||
|
@ -445,14 +445,14 @@ void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
|
|||
bool per_cpu);
|
||||
|
||||
typedef int (*process_auxtrace_t)(struct perf_tool *tool,
|
||||
struct perf_mmap *map,
|
||||
struct mmap *map,
|
||||
union perf_event *event, void *data1,
|
||||
size_t len1, void *data2, size_t len2);
|
||||
|
||||
int auxtrace_mmap__read(struct perf_mmap *map, struct auxtrace_record *itr,
|
||||
int auxtrace_mmap__read(struct mmap *map, struct auxtrace_record *itr,
|
||||
struct perf_tool *tool, process_auxtrace_t fn);
|
||||
|
||||
int auxtrace_mmap__read_snapshot(struct perf_mmap *map,
|
||||
int auxtrace_mmap__read_snapshot(struct mmap *map,
|
||||
struct auxtrace_record *itr,
|
||||
struct perf_tool *tool, process_auxtrace_t fn,
|
||||
size_t snapshot_size);
|
||||
|
|
|
@ -1568,7 +1568,7 @@ struct evsel *bpf__setup_output_event(struct evlist *evlist, const char *name)
|
|||
return ERR_PTR(-err);
|
||||
}
|
||||
|
||||
evsel = perf_evlist__last(evlist);
|
||||
evsel = evlist__last(evlist);
|
||||
}
|
||||
|
||||
bpf__for_each_map_named(map, obj, tmp, name) {
|
||||
|
|
|
@ -7,12 +7,13 @@
|
|||
* Copyright (C) 2009, 2010 Red Hat Inc.
|
||||
* Copyright (C) 2009, 2010 Arnaldo Carvalho de Melo <acme@redhat.com>
|
||||
*/
|
||||
#include "util.h" // copyfile_ns(), lsdir(), mkdir_p(), rm_rf()
|
||||
#include "util.h" // lsdir(), mkdir_p(), rm_rf()
|
||||
#include <dirent.h>
|
||||
#include <errno.h>
|
||||
#include <stdio.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
#include "util/copyfile.h"
|
||||
#include "dso.h"
|
||||
#include "build-id.h"
|
||||
#include "event.h"
|
||||
|
|
|
@ -0,0 +1,144 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include "util/copyfile.h"
|
||||
#include "util/namespaces.h"
|
||||
#include <internal/lib.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/stat.h>
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
|
||||
static int slow_copyfile(const char *from, const char *to, struct nsinfo *nsi)
|
||||
{
|
||||
int err = -1;
|
||||
char *line = NULL;
|
||||
size_t n;
|
||||
FILE *from_fp, *to_fp;
|
||||
struct nscookie nsc;
|
||||
|
||||
nsinfo__mountns_enter(nsi, &nsc);
|
||||
from_fp = fopen(from, "r");
|
||||
nsinfo__mountns_exit(&nsc);
|
||||
if (from_fp == NULL)
|
||||
goto out;
|
||||
|
||||
to_fp = fopen(to, "w");
|
||||
if (to_fp == NULL)
|
||||
goto out_fclose_from;
|
||||
|
||||
while (getline(&line, &n, from_fp) > 0)
|
||||
if (fputs(line, to_fp) == EOF)
|
||||
goto out_fclose_to;
|
||||
err = 0;
|
||||
out_fclose_to:
|
||||
fclose(to_fp);
|
||||
free(line);
|
||||
out_fclose_from:
|
||||
fclose(from_fp);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
int copyfile_offset(int ifd, loff_t off_in, int ofd, loff_t off_out, u64 size)
|
||||
{
|
||||
void *ptr;
|
||||
loff_t pgoff;
|
||||
|
||||
pgoff = off_in & ~(page_size - 1);
|
||||
off_in -= pgoff;
|
||||
|
||||
ptr = mmap(NULL, off_in + size, PROT_READ, MAP_PRIVATE, ifd, pgoff);
|
||||
if (ptr == MAP_FAILED)
|
||||
return -1;
|
||||
|
||||
while (size) {
|
||||
ssize_t ret = pwrite(ofd, ptr + off_in, size, off_out);
|
||||
if (ret < 0 && errno == EINTR)
|
||||
continue;
|
||||
if (ret <= 0)
|
||||
break;
|
||||
|
||||
size -= ret;
|
||||
off_in += ret;
|
||||
off_out += ret;
|
||||
}
|
||||
munmap(ptr, off_in + size);
|
||||
|
||||
return size ? -1 : 0;
|
||||
}
|
||||
|
||||
static int copyfile_mode_ns(const char *from, const char *to, mode_t mode,
|
||||
struct nsinfo *nsi)
|
||||
{
|
||||
int fromfd, tofd;
|
||||
struct stat st;
|
||||
int err;
|
||||
char *tmp = NULL, *ptr = NULL;
|
||||
struct nscookie nsc;
|
||||
|
||||
nsinfo__mountns_enter(nsi, &nsc);
|
||||
err = stat(from, &st);
|
||||
nsinfo__mountns_exit(&nsc);
|
||||
if (err)
|
||||
goto out;
|
||||
err = -1;
|
||||
|
||||
/* extra 'x' at the end is to reserve space for '.' */
|
||||
if (asprintf(&tmp, "%s.XXXXXXx", to) < 0) {
|
||||
tmp = NULL;
|
||||
goto out;
|
||||
}
|
||||
ptr = strrchr(tmp, '/');
|
||||
if (!ptr)
|
||||
goto out;
|
||||
ptr = memmove(ptr + 1, ptr, strlen(ptr) - 1);
|
||||
*ptr = '.';
|
||||
|
||||
tofd = mkstemp(tmp);
|
||||
if (tofd < 0)
|
||||
goto out;
|
||||
|
||||
if (fchmod(tofd, mode))
|
||||
goto out_close_to;
|
||||
|
||||
if (st.st_size == 0) { /* /proc? do it slowly... */
|
||||
err = slow_copyfile(from, tmp, nsi);
|
||||
goto out_close_to;
|
||||
}
|
||||
|
||||
nsinfo__mountns_enter(nsi, &nsc);
|
||||
fromfd = open(from, O_RDONLY);
|
||||
nsinfo__mountns_exit(&nsc);
|
||||
if (fromfd < 0)
|
||||
goto out_close_to;
|
||||
|
||||
err = copyfile_offset(fromfd, 0, tofd, 0, st.st_size);
|
||||
|
||||
close(fromfd);
|
||||
out_close_to:
|
||||
close(tofd);
|
||||
if (!err)
|
||||
err = link(tmp, to);
|
||||
unlink(tmp);
|
||||
out:
|
||||
free(tmp);
|
||||
return err;
|
||||
}
|
||||
|
||||
int copyfile_ns(const char *from, const char *to, struct nsinfo *nsi)
|
||||
{
|
||||
return copyfile_mode_ns(from, to, 0755, nsi);
|
||||
}
|
||||
|
||||
int copyfile_mode(const char *from, const char *to, mode_t mode)
|
||||
{
|
||||
return copyfile_mode_ns(from, to, mode, NULL);
|
||||
}
|
||||
|
||||
int copyfile(const char *from, const char *to)
|
||||
{
|
||||
return copyfile_mode(from, to, 0755);
|
||||
}
|
|
@ -0,0 +1,16 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
#ifndef PERF_COPYFILE_H_
|
||||
#define PERF_COPYFILE_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <sys/types.h>
|
||||
#include <fcntl.h>
|
||||
|
||||
struct nsinfo;
|
||||
|
||||
int copyfile(const char *from, const char *to);
|
||||
int copyfile_mode(const char *from, const char *to, mode_t mode);
|
||||
int copyfile_ns(const char *from, const char *to, struct nsinfo *nsi);
|
||||
int copyfile_offset(int ifd, loff_t off_in, int ofd, loff_t off_out, u64 size);
|
||||
|
||||
#endif // PERF_COPYFILE_H_
|
|
@ -1298,7 +1298,7 @@ static int cs_etm__synth_events(struct cs_etm_auxtrace *etm,
|
|||
attr.read_format = evsel->core.attr.read_format;
|
||||
|
||||
/* create new id val to be a fixed offset from evsel id */
|
||||
id = evsel->id[0] + 1000000000;
|
||||
id = evsel->core.id[0] + 1000000000;
|
||||
|
||||
if (!id)
|
||||
id = 1;
|
||||
|
|
|
@ -10,13 +10,14 @@
|
|||
#include <inttypes.h>
|
||||
#include <poll.h>
|
||||
#include "cpumap.h"
|
||||
#include "util/mmap.h"
|
||||
#include "thread_map.h"
|
||||
#include "target.h"
|
||||
#include "evlist.h"
|
||||
#include "evsel.h"
|
||||
#include "debug.h"
|
||||
#include "units.h"
|
||||
#include "util.h" // page_size
|
||||
#include <internal/lib.h> // page_size
|
||||
#include "../perf.h"
|
||||
#include "asm/bug.h"
|
||||
#include "bpf-event.h"
|
||||
|
@ -49,18 +50,14 @@ int sigqueue(pid_t pid, int sig, const union sigval value);
|
|||
#endif
|
||||
|
||||
#define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y))
|
||||
#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
|
||||
#define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y)
|
||||
|
||||
void evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus,
|
||||
struct perf_thread_map *threads)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
|
||||
INIT_HLIST_HEAD(&evlist->heads[i]);
|
||||
perf_evlist__init(&evlist->core);
|
||||
perf_evlist__set_maps(&evlist->core, cpus, threads);
|
||||
fdarray__init(&evlist->pollfd, 64);
|
||||
fdarray__init(&evlist->core.pollfd, 64);
|
||||
evlist->workload.pid = -1;
|
||||
evlist->bkw_mmap_state = BKW_MMAP_NOTREADY;
|
||||
}
|
||||
|
@ -108,7 +105,7 @@ struct evlist *perf_evlist__new_dummy(void)
|
|||
*/
|
||||
void perf_evlist__set_id_pos(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *first = perf_evlist__first(evlist);
|
||||
struct evsel *first = evlist__first(evlist);
|
||||
|
||||
evlist->id_pos = first->id_pos;
|
||||
evlist->is_pos = first->is_pos;
|
||||
|
@ -124,7 +121,7 @@ static void perf_evlist__update_id_pos(struct evlist *evlist)
|
|||
perf_evlist__set_id_pos(evlist);
|
||||
}
|
||||
|
||||
static void perf_evlist__purge(struct evlist *evlist)
|
||||
static void evlist__purge(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *pos, *n;
|
||||
|
||||
|
@ -137,11 +134,11 @@ static void perf_evlist__purge(struct evlist *evlist)
|
|||
evlist->core.nr_entries = 0;
|
||||
}
|
||||
|
||||
void perf_evlist__exit(struct evlist *evlist)
|
||||
void evlist__exit(struct evlist *evlist)
|
||||
{
|
||||
zfree(&evlist->mmap);
|
||||
zfree(&evlist->overwrite_mmap);
|
||||
fdarray__exit(&evlist->pollfd);
|
||||
fdarray__exit(&evlist->core.pollfd);
|
||||
}
|
||||
|
||||
void evlist__delete(struct evlist *evlist)
|
||||
|
@ -149,14 +146,14 @@ void evlist__delete(struct evlist *evlist)
|
|||
if (evlist == NULL)
|
||||
return;
|
||||
|
||||
perf_evlist__munmap(evlist);
|
||||
evlist__munmap(evlist);
|
||||
evlist__close(evlist);
|
||||
perf_cpu_map__put(evlist->core.cpus);
|
||||
perf_thread_map__put(evlist->core.threads);
|
||||
evlist->core.cpus = NULL;
|
||||
evlist->core.threads = NULL;
|
||||
perf_evlist__purge(evlist);
|
||||
perf_evlist__exit(evlist);
|
||||
evlist__purge(evlist);
|
||||
evlist__exit(evlist);
|
||||
free(evlist);
|
||||
}
|
||||
|
||||
|
@ -318,7 +315,7 @@ int perf_evlist__add_newtp(struct evlist *evlist,
|
|||
static int perf_evlist__nr_threads(struct evlist *evlist,
|
||||
struct evsel *evsel)
|
||||
{
|
||||
if (evsel->system_wide)
|
||||
if (evsel->core.system_wide)
|
||||
return 1;
|
||||
else
|
||||
return perf_thread_map__nr(evlist->core.threads);
|
||||
|
@ -401,128 +398,29 @@ int perf_evlist__enable_event_idx(struct evlist *evlist,
|
|||
return perf_evlist__enable_event_thread(evlist, evsel, idx);
|
||||
}
|
||||
|
||||
int perf_evlist__alloc_pollfd(struct evlist *evlist)
|
||||
int evlist__add_pollfd(struct evlist *evlist, int fd)
|
||||
{
|
||||
int nr_cpus = perf_cpu_map__nr(evlist->core.cpus);
|
||||
int nr_threads = perf_thread_map__nr(evlist->core.threads);
|
||||
int nfds = 0;
|
||||
struct evsel *evsel;
|
||||
|
||||
evlist__for_each_entry(evlist, evsel) {
|
||||
if (evsel->system_wide)
|
||||
nfds += nr_cpus;
|
||||
else
|
||||
nfds += nr_cpus * nr_threads;
|
||||
}
|
||||
|
||||
if (fdarray__available_entries(&evlist->pollfd) < nfds &&
|
||||
fdarray__grow(&evlist->pollfd, nfds) < 0)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __perf_evlist__add_pollfd(struct evlist *evlist, int fd,
|
||||
struct perf_mmap *map, short revent)
|
||||
{
|
||||
int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP);
|
||||
/*
|
||||
* Save the idx so that when we filter out fds POLLHUP'ed we can
|
||||
* close the associated evlist->mmap[] entry.
|
||||
*/
|
||||
if (pos >= 0) {
|
||||
evlist->pollfd.priv[pos].ptr = map;
|
||||
|
||||
fcntl(fd, F_SETFL, O_NONBLOCK);
|
||||
}
|
||||
|
||||
return pos;
|
||||
}
|
||||
|
||||
int perf_evlist__add_pollfd(struct evlist *evlist, int fd)
|
||||
{
|
||||
return __perf_evlist__add_pollfd(evlist, fd, NULL, POLLIN);
|
||||
return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN);
|
||||
}
|
||||
|
||||
static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd,
|
||||
void *arg __maybe_unused)
|
||||
{
|
||||
struct perf_mmap *map = fda->priv[fd].ptr;
|
||||
struct mmap *map = fda->priv[fd].ptr;
|
||||
|
||||
if (map)
|
||||
perf_mmap__put(map);
|
||||
}
|
||||
|
||||
int perf_evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask)
|
||||
int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask)
|
||||
{
|
||||
return fdarray__filter(&evlist->pollfd, revents_and_mask,
|
||||
return fdarray__filter(&evlist->core.pollfd, revents_and_mask,
|
||||
perf_evlist__munmap_filtered, NULL);
|
||||
}
|
||||
|
||||
int perf_evlist__poll(struct evlist *evlist, int timeout)
|
||||
int evlist__poll(struct evlist *evlist, int timeout)
|
||||
{
|
||||
return fdarray__poll(&evlist->pollfd, timeout);
|
||||
}
|
||||
|
||||
static void perf_evlist__id_hash(struct evlist *evlist,
|
||||
struct evsel *evsel,
|
||||
int cpu, int thread, u64 id)
|
||||
{
|
||||
int hash;
|
||||
struct perf_sample_id *sid = SID(evsel, cpu, thread);
|
||||
|
||||
sid->id = id;
|
||||
sid->evsel = evsel;
|
||||
hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
|
||||
hlist_add_head(&sid->node, &evlist->heads[hash]);
|
||||
}
|
||||
|
||||
void perf_evlist__id_add(struct evlist *evlist, struct evsel *evsel,
|
||||
int cpu, int thread, u64 id)
|
||||
{
|
||||
perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
|
||||
evsel->id[evsel->ids++] = id;
|
||||
}
|
||||
|
||||
int perf_evlist__id_add_fd(struct evlist *evlist,
|
||||
struct evsel *evsel,
|
||||
int cpu, int thread, int fd)
|
||||
{
|
||||
u64 read_data[4] = { 0, };
|
||||
int id_idx = 1; /* The first entry is the counter value */
|
||||
u64 id;
|
||||
int ret;
|
||||
|
||||
ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
|
||||
if (!ret)
|
||||
goto add;
|
||||
|
||||
if (errno != ENOTTY)
|
||||
return -1;
|
||||
|
||||
/* Legacy way to get event id.. All hail to old kernels! */
|
||||
|
||||
/*
|
||||
* This way does not work with group format read, so bail
|
||||
* out in that case.
|
||||
*/
|
||||
if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
|
||||
return -1;
|
||||
|
||||
if (!(evsel->core.attr.read_format & PERF_FORMAT_ID) ||
|
||||
read(fd, &read_data, sizeof(read_data)) == -1)
|
||||
return -1;
|
||||
|
||||
if (evsel->core.attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
|
||||
++id_idx;
|
||||
if (evsel->core.attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
|
||||
++id_idx;
|
||||
|
||||
id = read_data[id_idx];
|
||||
|
||||
add:
|
||||
perf_evlist__id_add(evlist, evsel, cpu, thread, id);
|
||||
return 0;
|
||||
return perf_evlist__poll(&evlist->core, timeout);
|
||||
}
|
||||
|
||||
static void perf_evlist__set_sid_idx(struct evlist *evlist,
|
||||
|
@ -535,7 +433,7 @@ static void perf_evlist__set_sid_idx(struct evlist *evlist,
|
|||
sid->cpu = evlist->core.cpus->map[cpu];
|
||||
else
|
||||
sid->cpu = -1;
|
||||
if (!evsel->system_wide && evlist->core.threads && thread >= 0)
|
||||
if (!evsel->core.system_wide && evlist->core.threads && thread >= 0)
|
||||
sid->tid = perf_thread_map__pid(evlist->core.threads, thread);
|
||||
else
|
||||
sid->tid = -1;
|
||||
|
@ -548,7 +446,7 @@ struct perf_sample_id *perf_evlist__id2sid(struct evlist *evlist, u64 id)
|
|||
int hash;
|
||||
|
||||
hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
|
||||
head = &evlist->heads[hash];
|
||||
head = &evlist->core.heads[hash];
|
||||
|
||||
hlist_for_each_entry(sid, head, node)
|
||||
if (sid->id == id)
|
||||
|
@ -562,14 +460,14 @@ struct evsel *perf_evlist__id2evsel(struct evlist *evlist, u64 id)
|
|||
struct perf_sample_id *sid;
|
||||
|
||||
if (evlist->core.nr_entries == 1 || !id)
|
||||
return perf_evlist__first(evlist);
|
||||
return evlist__first(evlist);
|
||||
|
||||
sid = perf_evlist__id2sid(evlist, id);
|
||||
if (sid)
|
||||
return sid->evsel;
|
||||
return container_of(sid->evsel, struct evsel, core);
|
||||
|
||||
if (!perf_evlist__sample_id_all(evlist))
|
||||
return perf_evlist__first(evlist);
|
||||
return evlist__first(evlist);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
@ -584,7 +482,7 @@ struct evsel *perf_evlist__id2evsel_strict(struct evlist *evlist,
|
|||
|
||||
sid = perf_evlist__id2sid(evlist, id);
|
||||
if (sid)
|
||||
return sid->evsel;
|
||||
return container_of(sid->evsel, struct evsel, core);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
@ -613,7 +511,7 @@ static int perf_evlist__event2id(struct evlist *evlist,
|
|||
struct evsel *perf_evlist__event2evsel(struct evlist *evlist,
|
||||
union perf_event *event)
|
||||
{
|
||||
struct evsel *first = perf_evlist__first(evlist);
|
||||
struct evsel *first = evlist__first(evlist);
|
||||
struct hlist_head *head;
|
||||
struct perf_sample_id *sid;
|
||||
int hash;
|
||||
|
@ -634,11 +532,11 @@ struct evsel *perf_evlist__event2evsel(struct evlist *evlist,
|
|||
return first;
|
||||
|
||||
hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
|
||||
head = &evlist->heads[hash];
|
||||
head = &evlist->core.heads[hash];
|
||||
|
||||
hlist_for_each_entry(sid, head, node) {
|
||||
if (sid->id == id)
|
||||
return sid->evsel;
|
||||
return container_of(sid->evsel, struct evsel, core);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
@ -650,8 +548,8 @@ static int perf_evlist__set_paused(struct evlist *evlist, bool value)
|
|||
if (!evlist->overwrite_mmap)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < evlist->nr_mmaps; i++) {
|
||||
int fd = evlist->overwrite_mmap[i].fd;
|
||||
for (i = 0; i < evlist->core.nr_mmaps; i++) {
|
||||
int fd = evlist->overwrite_mmap[i].core.fd;
|
||||
int err;
|
||||
|
||||
if (fd < 0)
|
||||
|
@ -673,42 +571,42 @@ static int perf_evlist__resume(struct evlist *evlist)
|
|||
return perf_evlist__set_paused(evlist, false);
|
||||
}
|
||||
|
||||
static void perf_evlist__munmap_nofree(struct evlist *evlist)
|
||||
static void evlist__munmap_nofree(struct evlist *evlist)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (evlist->mmap)
|
||||
for (i = 0; i < evlist->nr_mmaps; i++)
|
||||
for (i = 0; i < evlist->core.nr_mmaps; i++)
|
||||
perf_mmap__munmap(&evlist->mmap[i]);
|
||||
|
||||
if (evlist->overwrite_mmap)
|
||||
for (i = 0; i < evlist->nr_mmaps; i++)
|
||||
for (i = 0; i < evlist->core.nr_mmaps; i++)
|
||||
perf_mmap__munmap(&evlist->overwrite_mmap[i]);
|
||||
}
|
||||
|
||||
void perf_evlist__munmap(struct evlist *evlist)
|
||||
void evlist__munmap(struct evlist *evlist)
|
||||
{
|
||||
perf_evlist__munmap_nofree(evlist);
|
||||
evlist__munmap_nofree(evlist);
|
||||
zfree(&evlist->mmap);
|
||||
zfree(&evlist->overwrite_mmap);
|
||||
}
|
||||
|
||||
static struct perf_mmap *perf_evlist__alloc_mmap(struct evlist *evlist,
|
||||
bool overwrite)
|
||||
static struct mmap *evlist__alloc_mmap(struct evlist *evlist,
|
||||
bool overwrite)
|
||||
{
|
||||
int i;
|
||||
struct perf_mmap *map;
|
||||
struct mmap *map;
|
||||
|
||||
evlist->nr_mmaps = perf_cpu_map__nr(evlist->core.cpus);
|
||||
evlist->core.nr_mmaps = perf_cpu_map__nr(evlist->core.cpus);
|
||||
if (perf_cpu_map__empty(evlist->core.cpus))
|
||||
evlist->nr_mmaps = perf_thread_map__nr(evlist->core.threads);
|
||||
map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
|
||||
evlist->core.nr_mmaps = perf_thread_map__nr(evlist->core.threads);
|
||||
map = zalloc(evlist->core.nr_mmaps * sizeof(struct mmap));
|
||||
if (!map)
|
||||
return NULL;
|
||||
|
||||
for (i = 0; i < evlist->nr_mmaps; i++) {
|
||||
map[i].fd = -1;
|
||||
map[i].overwrite = overwrite;
|
||||
for (i = 0; i < evlist->core.nr_mmaps; i++) {
|
||||
map[i].core.fd = -1;
|
||||
map[i].core.overwrite = overwrite;
|
||||
/*
|
||||
* When the perf_mmap() call is made we grab one refcount, plus
|
||||
* one extra to let perf_mmap__consume() get the last
|
||||
|
@ -718,7 +616,7 @@ static struct perf_mmap *perf_evlist__alloc_mmap(struct evlist *evlist,
|
|||
* Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
|
||||
* thus does perf_mmap__get() on it.
|
||||
*/
|
||||
refcount_set(&map[i].refcnt, 0);
|
||||
refcount_set(&map[i].core.refcnt, 0);
|
||||
}
|
||||
return map;
|
||||
}
|
||||
|
@ -732,7 +630,7 @@ perf_evlist__should_poll(struct evlist *evlist __maybe_unused,
|
|||
return true;
|
||||
}
|
||||
|
||||
static int perf_evlist__mmap_per_evsel(struct evlist *evlist, int idx,
|
||||
static int evlist__mmap_per_evsel(struct evlist *evlist, int idx,
|
||||
struct mmap_params *mp, int cpu_idx,
|
||||
int thread, int *_output, int *_output_overwrite)
|
||||
{
|
||||
|
@ -741,7 +639,7 @@ static int perf_evlist__mmap_per_evsel(struct evlist *evlist, int idx,
|
|||
int evlist_cpu = cpu_map__cpu(evlist->core.cpus, cpu_idx);
|
||||
|
||||
evlist__for_each_entry(evlist, evsel) {
|
||||
struct perf_mmap *maps = evlist->mmap;
|
||||
struct mmap *maps = evlist->mmap;
|
||||
int *output = _output;
|
||||
int fd;
|
||||
int cpu;
|
||||
|
@ -752,7 +650,7 @@ static int perf_evlist__mmap_per_evsel(struct evlist *evlist, int idx,
|
|||
maps = evlist->overwrite_mmap;
|
||||
|
||||
if (!maps) {
|
||||
maps = perf_evlist__alloc_mmap(evlist, true);
|
||||
maps = evlist__alloc_mmap(evlist, true);
|
||||
if (!maps)
|
||||
return -1;
|
||||
evlist->overwrite_mmap = maps;
|
||||
|
@ -762,7 +660,7 @@ static int perf_evlist__mmap_per_evsel(struct evlist *evlist, int idx,
|
|||
mp->prot &= ~PROT_WRITE;
|
||||
}
|
||||
|
||||
if (evsel->system_wide && thread)
|
||||
if (evsel->core.system_wide && thread)
|
||||
continue;
|
||||
|
||||
cpu = perf_cpu_map__idx(evsel->core.cpus, evlist_cpu);
|
||||
|
@ -792,14 +690,14 @@ static int perf_evlist__mmap_per_evsel(struct evlist *evlist, int idx,
|
|||
* other events, so it should not need to be polled anyway.
|
||||
* Therefore don't add it for polling.
|
||||
*/
|
||||
if (!evsel->system_wide &&
|
||||
__perf_evlist__add_pollfd(evlist, fd, &maps[idx], revent) < 0) {
|
||||
if (!evsel->core.system_wide &&
|
||||
perf_evlist__add_pollfd(&evlist->core, fd, &maps[idx], revent) < 0) {
|
||||
perf_mmap__put(&maps[idx]);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (evsel->core.attr.read_format & PERF_FORMAT_ID) {
|
||||
if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
|
||||
if (perf_evlist__id_add_fd(&evlist->core, &evsel->core, cpu, thread,
|
||||
fd) < 0)
|
||||
return -1;
|
||||
perf_evlist__set_sid_idx(evlist, evsel, idx, cpu,
|
||||
|
@ -810,7 +708,7 @@ static int perf_evlist__mmap_per_evsel(struct evlist *evlist, int idx,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int perf_evlist__mmap_per_cpu(struct evlist *evlist,
|
||||
static int evlist__mmap_per_cpu(struct evlist *evlist,
|
||||
struct mmap_params *mp)
|
||||
{
|
||||
int cpu, thread;
|
||||
|
@ -826,7 +724,7 @@ static int perf_evlist__mmap_per_cpu(struct evlist *evlist,
|
|||
true);
|
||||
|
||||
for (thread = 0; thread < nr_threads; thread++) {
|
||||
if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
|
||||
if (evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
|
||||
thread, &output, &output_overwrite))
|
||||
goto out_unmap;
|
||||
}
|
||||
|
@ -835,11 +733,11 @@ static int perf_evlist__mmap_per_cpu(struct evlist *evlist,
|
|||
return 0;
|
||||
|
||||
out_unmap:
|
||||
perf_evlist__munmap_nofree(evlist);
|
||||
evlist__munmap_nofree(evlist);
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int perf_evlist__mmap_per_thread(struct evlist *evlist,
|
||||
static int evlist__mmap_per_thread(struct evlist *evlist,
|
||||
struct mmap_params *mp)
|
||||
{
|
||||
int thread;
|
||||
|
@ -853,7 +751,7 @@ static int perf_evlist__mmap_per_thread(struct evlist *evlist,
|
|||
auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread,
|
||||
false);
|
||||
|
||||
if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
|
||||
if (evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
|
||||
&output, &output_overwrite))
|
||||
goto out_unmap;
|
||||
}
|
||||
|
@ -861,7 +759,7 @@ static int perf_evlist__mmap_per_thread(struct evlist *evlist,
|
|||
return 0;
|
||||
|
||||
out_unmap:
|
||||
perf_evlist__munmap_nofree(evlist);
|
||||
evlist__munmap_nofree(evlist);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -888,7 +786,7 @@ unsigned long perf_event_mlock_kb_in_pages(void)
|
|||
return pages;
|
||||
}
|
||||
|
||||
size_t perf_evlist__mmap_size(unsigned long pages)
|
||||
size_t evlist__mmap_size(unsigned long pages)
|
||||
{
|
||||
if (pages == UINT_MAX)
|
||||
pages = perf_event_mlock_kb_in_pages();
|
||||
|
@ -971,7 +869,7 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
|
|||
}
|
||||
|
||||
/**
|
||||
* perf_evlist__mmap_ex - Create mmaps to receive events.
|
||||
* evlist__mmap_ex - Create mmaps to receive events.
|
||||
* @evlist: list of events
|
||||
* @pages: map length in pages
|
||||
* @overwrite: overwrite older events?
|
||||
|
@ -979,7 +877,7 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
|
|||
* @auxtrace_overwrite - overwrite older auxtrace data?
|
||||
*
|
||||
* If @overwrite is %false the user needs to signal event consumption using
|
||||
* perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this
|
||||
* perf_mmap__write_tail(). Using evlist__mmap_read() does this
|
||||
* automatically.
|
||||
*
|
||||
* Similarly, if @auxtrace_overwrite is %false the user needs to signal data
|
||||
|
@ -987,7 +885,7 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
|
|||
*
|
||||
* Return: %0 on success, negative error code otherwise.
|
||||
*/
|
||||
int perf_evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
|
||||
int evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
|
||||
unsigned int auxtrace_pages,
|
||||
bool auxtrace_overwrite, int nr_cblocks, int affinity, int flush,
|
||||
int comp_level)
|
||||
|
@ -1004,36 +902,36 @@ int perf_evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
|
|||
.comp_level = comp_level };
|
||||
|
||||
if (!evlist->mmap)
|
||||
evlist->mmap = perf_evlist__alloc_mmap(evlist, false);
|
||||
evlist->mmap = evlist__alloc_mmap(evlist, false);
|
||||
if (!evlist->mmap)
|
||||
return -ENOMEM;
|
||||
|
||||
if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
|
||||
if (evlist->core.pollfd.entries == NULL && perf_evlist__alloc_pollfd(&evlist->core) < 0)
|
||||
return -ENOMEM;
|
||||
|
||||
evlist->mmap_len = perf_evlist__mmap_size(pages);
|
||||
pr_debug("mmap size %zuB\n", evlist->mmap_len);
|
||||
mp.mask = evlist->mmap_len - page_size - 1;
|
||||
evlist->core.mmap_len = evlist__mmap_size(pages);
|
||||
pr_debug("mmap size %zuB\n", evlist->core.mmap_len);
|
||||
mp.mask = evlist->core.mmap_len - page_size - 1;
|
||||
|
||||
auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->mmap_len,
|
||||
auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->core.mmap_len,
|
||||
auxtrace_pages, auxtrace_overwrite);
|
||||
|
||||
evlist__for_each_entry(evlist, evsel) {
|
||||
if ((evsel->core.attr.read_format & PERF_FORMAT_ID) &&
|
||||
evsel->sample_id == NULL &&
|
||||
perf_evsel__alloc_id(evsel, perf_cpu_map__nr(cpus), threads->nr) < 0)
|
||||
evsel->core.sample_id == NULL &&
|
||||
perf_evsel__alloc_id(&evsel->core, perf_cpu_map__nr(cpus), threads->nr) < 0)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (perf_cpu_map__empty(cpus))
|
||||
return perf_evlist__mmap_per_thread(evlist, &mp);
|
||||
return evlist__mmap_per_thread(evlist, &mp);
|
||||
|
||||
return perf_evlist__mmap_per_cpu(evlist, &mp);
|
||||
return evlist__mmap_per_cpu(evlist, &mp);
|
||||
}
|
||||
|
||||
int perf_evlist__mmap(struct evlist *evlist, unsigned int pages)
|
||||
int evlist__mmap(struct evlist *evlist, unsigned int pages)
|
||||
{
|
||||
return perf_evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS, 1, 0);
|
||||
return evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS, 1, 0);
|
||||
}
|
||||
|
||||
int perf_evlist__create_maps(struct evlist *evlist, struct target *target)
|
||||
|
@ -1225,7 +1123,7 @@ u64 perf_evlist__combined_branch_type(struct evlist *evlist)
|
|||
|
||||
bool perf_evlist__valid_read_format(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *first = perf_evlist__first(evlist), *pos = first;
|
||||
struct evsel *first = evlist__first(evlist), *pos = first;
|
||||
u64 read_format = first->core.attr.read_format;
|
||||
u64 sample_type = first->core.attr.sample_type;
|
||||
|
||||
|
@ -1243,15 +1141,9 @@ bool perf_evlist__valid_read_format(struct evlist *evlist)
|
|||
return true;
|
||||
}
|
||||
|
||||
u64 perf_evlist__read_format(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *first = perf_evlist__first(evlist);
|
||||
return first->core.attr.read_format;
|
||||
}
|
||||
|
||||
u16 perf_evlist__id_hdr_size(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *first = perf_evlist__first(evlist);
|
||||
struct evsel *first = evlist__first(evlist);
|
||||
struct perf_sample *data;
|
||||
u64 sample_type;
|
||||
u16 size = 0;
|
||||
|
@ -1284,7 +1176,7 @@ out:
|
|||
|
||||
bool perf_evlist__valid_sample_id_all(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *first = perf_evlist__first(evlist), *pos = first;
|
||||
struct evsel *first = evlist__first(evlist), *pos = first;
|
||||
|
||||
evlist__for_each_entry_continue(evlist, pos) {
|
||||
if (first->core.attr.sample_id_all != pos->core.attr.sample_id_all)
|
||||
|
@ -1296,7 +1188,7 @@ bool perf_evlist__valid_sample_id_all(struct evlist *evlist)
|
|||
|
||||
bool perf_evlist__sample_id_all(struct evlist *evlist)
|
||||
{
|
||||
struct evsel *first = perf_evlist__first(evlist);
|
||||
struct evsel *first = evlist__first(evlist);
|
||||
return first->core.attr.sample_id_all;
|
||||
}
|
||||
|
||||
|
@ -1529,19 +1421,6 @@ int perf_evlist__parse_sample_timestamp(struct evlist *evlist,
|
|||
return perf_evsel__parse_sample_timestamp(evsel, event, timestamp);
|
||||
}
|
||||
|
||||
size_t perf_evlist__fprintf(struct evlist *evlist, FILE *fp)
|
||||
{
|
||||
struct evsel *evsel;
|
||||
size_t printed = 0;
|
||||
|
||||
evlist__for_each_entry(evlist, evsel) {
|
||||
printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
|
||||
perf_evsel__name(evsel));
|
||||
}
|
||||
|
||||
return printed + fprintf(fp, "\n");
|
||||
}
|
||||
|
||||
int perf_evlist__strerror_open(struct evlist *evlist,
|
||||
int err, char *buf, size_t size)
|
||||
{
|
||||
|
@ -1571,7 +1450,7 @@ int perf_evlist__strerror_open(struct evlist *evlist,
|
|||
"Hint:\tThe current value is %d.", value);
|
||||
break;
|
||||
case EINVAL: {
|
||||
struct evsel *first = perf_evlist__first(evlist);
|
||||
struct evsel *first = evlist__first(evlist);
|
||||
int max_freq;
|
||||
|
||||
if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0)
|
||||
|
@ -1599,7 +1478,7 @@ out_default:
|
|||
int perf_evlist__strerror_mmap(struct evlist *evlist, int err, char *buf, size_t size)
|
||||
{
|
||||
char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
|
||||
int pages_attempted = evlist->mmap_len / 1024, pages_max_per_user, printed = 0;
|
||||
int pages_attempted = evlist->core.mmap_len / 1024, pages_max_per_user, printed = 0;
|
||||
|
||||
switch (err) {
|
||||
case EPERM:
|
||||
|
@ -1633,7 +1512,7 @@ void perf_evlist__to_front(struct evlist *evlist,
|
|||
struct evsel *evsel, *n;
|
||||
LIST_HEAD(move);
|
||||
|
||||
if (move_evsel == perf_evlist__first(evlist))
|
||||
if (move_evsel == evlist__first(evlist))
|
||||
return;
|
||||
|
||||
evlist__for_each_entry_safe(evlist, n, evsel) {
|
||||
|
@ -1754,7 +1633,7 @@ bool perf_evlist__exclude_kernel(struct evlist *evlist)
|
|||
void perf_evlist__force_leader(struct evlist *evlist)
|
||||
{
|
||||
if (!evlist->nr_groups) {
|
||||
struct evsel *leader = perf_evlist__first(evlist);
|
||||
struct evsel *leader = evlist__first(evlist);
|
||||
|
||||
perf_evlist__set_leader(evlist);
|
||||
leader->forced_leader = true;
|
||||
|
@ -1780,7 +1659,7 @@ struct evsel *perf_evlist__reset_weak_group(struct evlist *evsel_list,
|
|||
is_open = false;
|
||||
if (c2->leader == leader) {
|
||||
if (is_open)
|
||||
evsel__close(c2);
|
||||
perf_evsel__close(&evsel->core);
|
||||
c2->leader = c2;
|
||||
c2->core.nr_members = 0;
|
||||
}
|
||||
|
@ -1844,10 +1723,10 @@ static void *perf_evlist__poll_thread(void *arg)
|
|||
draining = true;
|
||||
|
||||
if (!draining)
|
||||
perf_evlist__poll(evlist, 1000);
|
||||
evlist__poll(evlist, 1000);
|
||||
|
||||
for (i = 0; i < evlist->nr_mmaps; i++) {
|
||||
struct perf_mmap *map = &evlist->mmap[i];
|
||||
for (i = 0; i < evlist->core.nr_mmaps; i++) {
|
||||
struct mmap *map = &evlist->mmap[i];
|
||||
union perf_event *event;
|
||||
|
||||
if (perf_mmap__read_init(map))
|
||||
|
@ -1889,7 +1768,7 @@ int perf_evlist__start_sb_thread(struct evlist *evlist,
|
|||
goto out_delete_evlist;
|
||||
}
|
||||
|
||||
if (perf_evlist__mmap(evlist, UINT_MAX))
|
||||
if (evlist__mmap(evlist, UINT_MAX))
|
||||
goto out_delete_evlist;
|
||||
|
||||
evlist__for_each_entry(evlist, counter) {
|
||||
|
|
|
@ -7,11 +7,11 @@
|
|||
#include <linux/refcount.h>
|
||||
#include <linux/list.h>
|
||||
#include <api/fd/array.h>
|
||||
#include <stdio.h>
|
||||
#include <internal/evlist.h>
|
||||
#include <internal/evsel.h>
|
||||
#include "events_stats.h"
|
||||
#include "evsel.h"
|
||||
#include "mmap.h"
|
||||
#include <pthread.h>
|
||||
#include <signal.h>
|
||||
#include <unistd.h>
|
||||
|
||||
|
@ -20,16 +20,38 @@ struct thread_map;
|
|||
struct perf_cpu_map;
|
||||
struct record_opts;
|
||||
|
||||
#define PERF_EVLIST__HLIST_BITS 8
|
||||
#define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS)
|
||||
/*
|
||||
* State machine of bkw_mmap_state:
|
||||
*
|
||||
* .________________(forbid)_____________.
|
||||
* | V
|
||||
* NOTREADY --(0)--> RUNNING --(1)--> DATA_PENDING --(2)--> EMPTY
|
||||
* ^ ^ | ^ |
|
||||
* | |__(forbid)____/ |___(forbid)___/|
|
||||
* | |
|
||||
* \_________________(3)_______________/
|
||||
*
|
||||
* NOTREADY : Backward ring buffers are not ready
|
||||
* RUNNING : Backward ring buffers are recording
|
||||
* DATA_PENDING : We are required to collect data from backward ring buffers
|
||||
* EMPTY : We have collected data from backward ring buffers.
|
||||
*
|
||||
* (0): Setup backward ring buffer
|
||||
* (1): Pause ring buffers for reading
|
||||
* (2): Read from ring buffers
|
||||
* (3): Resume ring buffers for recording
|
||||
*/
|
||||
enum bkw_mmap_state {
|
||||
BKW_MMAP_NOTREADY,
|
||||
BKW_MMAP_RUNNING,
|
||||
BKW_MMAP_DATA_PENDING,
|
||||
BKW_MMAP_EMPTY,
|
||||
};
|
||||
|
||||
struct evlist {
|
||||
struct perf_evlist core;
|
||||
struct hlist_head heads[PERF_EVLIST__HLIST_SIZE];
|
||||
int nr_groups;
|
||||
int nr_mmaps;
|
||||
bool enabled;
|
||||
size_t mmap_len;
|
||||
int id_pos;
|
||||
int is_pos;
|
||||
u64 combined_sample_type;
|
||||
|
@ -38,9 +60,8 @@ struct evlist {
|
|||
int cork_fd;
|
||||
pid_t pid;
|
||||
} workload;
|
||||
struct fdarray pollfd;
|
||||
struct perf_mmap *mmap;
|
||||
struct perf_mmap *overwrite_mmap;
|
||||
struct mmap *mmap;
|
||||
struct mmap *overwrite_mmap;
|
||||
struct evsel *selected;
|
||||
struct events_stats stats;
|
||||
struct perf_env *env;
|
||||
|
@ -65,7 +86,7 @@ struct evlist *perf_evlist__new_default(void);
|
|||
struct evlist *perf_evlist__new_dummy(void);
|
||||
void evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus,
|
||||
struct perf_thread_map *threads);
|
||||
void perf_evlist__exit(struct evlist *evlist);
|
||||
void evlist__exit(struct evlist *evlist);
|
||||
void evlist__delete(struct evlist *evlist);
|
||||
|
||||
void evlist__add(struct evlist *evlist, struct evsel *entry);
|
||||
|
@ -119,17 +140,10 @@ struct evsel *
|
|||
perf_evlist__find_tracepoint_by_name(struct evlist *evlist,
|
||||
const char *name);
|
||||
|
||||
void perf_evlist__id_add(struct evlist *evlist, struct evsel *evsel,
|
||||
int cpu, int thread, u64 id);
|
||||
int perf_evlist__id_add_fd(struct evlist *evlist,
|
||||
struct evsel *evsel,
|
||||
int cpu, int thread, int fd);
|
||||
int evlist__add_pollfd(struct evlist *evlist, int fd);
|
||||
int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask);
|
||||
|
||||
int perf_evlist__add_pollfd(struct evlist *evlist, int fd);
|
||||
int perf_evlist__alloc_pollfd(struct evlist *evlist);
|
||||
int perf_evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask);
|
||||
|
||||
int perf_evlist__poll(struct evlist *evlist, int timeout);
|
||||
int evlist__poll(struct evlist *evlist, int timeout);
|
||||
|
||||
struct evsel *perf_evlist__id2evsel(struct evlist *evlist, u64 id);
|
||||
struct evsel *perf_evlist__id2evsel_strict(struct evlist *evlist,
|
||||
|
@ -139,7 +153,7 @@ struct perf_sample_id *perf_evlist__id2sid(struct evlist *evlist, u64 id);
|
|||
|
||||
void perf_evlist__toggle_bkw_mmap(struct evlist *evlist, enum bkw_mmap_state state);
|
||||
|
||||
void perf_evlist__mmap_consume(struct evlist *evlist, int idx);
|
||||
void evlist__mmap_consume(struct evlist *evlist, int idx);
|
||||
|
||||
int evlist__open(struct evlist *evlist);
|
||||
void evlist__close(struct evlist *evlist);
|
||||
|
@ -170,14 +184,14 @@ int perf_evlist__parse_mmap_pages(const struct option *opt,
|
|||
|
||||
unsigned long perf_event_mlock_kb_in_pages(void);
|
||||
|
||||
int perf_evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
|
||||
int evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
|
||||
unsigned int auxtrace_pages,
|
||||
bool auxtrace_overwrite, int nr_cblocks,
|
||||
int affinity, int flush, int comp_level);
|
||||
int perf_evlist__mmap(struct evlist *evlist, unsigned int pages);
|
||||
void perf_evlist__munmap(struct evlist *evlist);
|
||||
int evlist__mmap(struct evlist *evlist, unsigned int pages);
|
||||
void evlist__munmap(struct evlist *evlist);
|
||||
|
||||
size_t perf_evlist__mmap_size(unsigned long pages);
|
||||
size_t evlist__mmap_size(unsigned long pages);
|
||||
|
||||
void evlist__disable(struct evlist *evlist);
|
||||
void evlist__enable(struct evlist *evlist);
|
||||
|
@ -195,7 +209,6 @@ int perf_evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel);
|
|||
void __perf_evlist__set_leader(struct list_head *list);
|
||||
void perf_evlist__set_leader(struct evlist *evlist);
|
||||
|
||||
u64 perf_evlist__read_format(struct evlist *evlist);
|
||||
u64 __perf_evlist__combined_sample_type(struct evlist *evlist);
|
||||
u64 perf_evlist__combined_sample_type(struct evlist *evlist);
|
||||
u64 perf_evlist__combined_branch_type(struct evlist *evlist);
|
||||
|
@ -221,17 +234,19 @@ static inline bool perf_evlist__empty(struct evlist *evlist)
|
|||
return list_empty(&evlist->core.entries);
|
||||
}
|
||||
|
||||
static inline struct evsel *perf_evlist__first(struct evlist *evlist)
|
||||
static inline struct evsel *evlist__first(struct evlist *evlist)
|
||||
{
|
||||
return list_entry(evlist->core.entries.next, struct evsel, core.node);
|
||||
struct perf_evsel *evsel = perf_evlist__first(&evlist->core);
|
||||
|
||||
return container_of(evsel, struct evsel, core);
|
||||
}
|
||||
|
||||
static inline struct evsel *perf_evlist__last(struct evlist *evlist)
|
||||
static inline struct evsel *evlist__last(struct evlist *evlist)
|
||||
{
|
||||
return list_entry(evlist->core.entries.prev, struct evsel, core.node);
|
||||
}
|
||||
struct perf_evsel *evsel = perf_evlist__last(&evlist->core);
|
||||
|
||||
size_t perf_evlist__fprintf(struct evlist *evlist, FILE *fp);
|
||||
return container_of(evsel, struct evsel, core);
|
||||
}
|
||||
|
||||
int perf_evlist__strerror_open(struct evlist *evlist, int err, char *buf, size_t size);
|
||||
int perf_evlist__strerror_mmap(struct evlist *evlist, int err, char *buf, size_t size);
|
||||
|
|
|
@ -30,6 +30,8 @@
|
|||
#include "counts.h"
|
||||
#include "event.h"
|
||||
#include "evsel.h"
|
||||
#include "util/evsel_config.h"
|
||||
#include "util/evsel_fprintf.h"
|
||||
#include "evlist.h"
|
||||
#include <perf/cpumap.h>
|
||||
#include "thread_map.h"
|
||||
|
@ -1227,36 +1229,6 @@ int evsel__disable(struct evsel *evsel)
|
|||
return err;
|
||||
}
|
||||
|
||||
int perf_evsel__alloc_id(struct evsel *evsel, int ncpus, int nthreads)
|
||||
{
|
||||
if (ncpus == 0 || nthreads == 0)
|
||||
return 0;
|
||||
|
||||
if (evsel->system_wide)
|
||||
nthreads = 1;
|
||||
|
||||
evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
|
||||
if (evsel->sample_id == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
|
||||
if (evsel->id == NULL) {
|
||||
xyarray__delete(evsel->sample_id);
|
||||
evsel->sample_id = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void perf_evsel__free_id(struct evsel *evsel)
|
||||
{
|
||||
xyarray__delete(evsel->sample_id);
|
||||
evsel->sample_id = NULL;
|
||||
zfree(&evsel->id);
|
||||
evsel->ids = 0;
|
||||
}
|
||||
|
||||
static void perf_evsel__free_config_terms(struct evsel *evsel)
|
||||
{
|
||||
struct perf_evsel_config_term *term, *h;
|
||||
|
@ -1273,7 +1245,7 @@ void perf_evsel__exit(struct evsel *evsel)
|
|||
assert(evsel->evlist == NULL);
|
||||
perf_evsel__free_counts(evsel);
|
||||
perf_evsel__free_fd(&evsel->core);
|
||||
perf_evsel__free_id(evsel);
|
||||
perf_evsel__free_id(&evsel->core);
|
||||
perf_evsel__free_config_terms(evsel);
|
||||
cgroup__put(evsel->cgrp);
|
||||
perf_cpu_map__put(evsel->core.cpus);
|
||||
|
@ -1473,152 +1445,6 @@ static int get_group_fd(struct evsel *evsel, int cpu, int thread)
|
|||
return fd;
|
||||
}
|
||||
|
||||
struct bit_names {
|
||||
int bit;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
static void __p_bits(char *buf, size_t size, u64 value, struct bit_names *bits)
|
||||
{
|
||||
bool first_bit = true;
|
||||
int i = 0;
|
||||
|
||||
do {
|
||||
if (value & bits[i].bit) {
|
||||
buf += scnprintf(buf, size, "%s%s", first_bit ? "" : "|", bits[i].name);
|
||||
first_bit = false;
|
||||
}
|
||||
} while (bits[++i].name != NULL);
|
||||
}
|
||||
|
||||
static void __p_sample_type(char *buf, size_t size, u64 value)
|
||||
{
|
||||
#define bit_name(n) { PERF_SAMPLE_##n, #n }
|
||||
struct bit_names bits[] = {
|
||||
bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR),
|
||||
bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU),
|
||||
bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW),
|
||||
bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
|
||||
bit_name(IDENTIFIER), bit_name(REGS_INTR), bit_name(DATA_SRC),
|
||||
bit_name(WEIGHT), bit_name(PHYS_ADDR),
|
||||
{ .name = NULL, }
|
||||
};
|
||||
#undef bit_name
|
||||
__p_bits(buf, size, value, bits);
|
||||
}
|
||||
|
||||
static void __p_branch_sample_type(char *buf, size_t size, u64 value)
|
||||
{
|
||||
#define bit_name(n) { PERF_SAMPLE_BRANCH_##n, #n }
|
||||
struct bit_names bits[] = {
|
||||
bit_name(USER), bit_name(KERNEL), bit_name(HV), bit_name(ANY),
|
||||
bit_name(ANY_CALL), bit_name(ANY_RETURN), bit_name(IND_CALL),
|
||||
bit_name(ABORT_TX), bit_name(IN_TX), bit_name(NO_TX),
|
||||
bit_name(COND), bit_name(CALL_STACK), bit_name(IND_JUMP),
|
||||
bit_name(CALL), bit_name(NO_FLAGS), bit_name(NO_CYCLES),
|
||||
{ .name = NULL, }
|
||||
};
|
||||
#undef bit_name
|
||||
__p_bits(buf, size, value, bits);
|
||||
}
|
||||
|
||||
static void __p_read_format(char *buf, size_t size, u64 value)
|
||||
{
|
||||
#define bit_name(n) { PERF_FORMAT_##n, #n }
|
||||
struct bit_names bits[] = {
|
||||
bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING),
|
||||
bit_name(ID), bit_name(GROUP),
|
||||
{ .name = NULL, }
|
||||
};
|
||||
#undef bit_name
|
||||
__p_bits(buf, size, value, bits);
|
||||
}
|
||||
|
||||
#define BUF_SIZE 1024
|
||||
|
||||
#define p_hex(val) snprintf(buf, BUF_SIZE, "%#"PRIx64, (uint64_t)(val))
|
||||
#define p_unsigned(val) snprintf(buf, BUF_SIZE, "%"PRIu64, (uint64_t)(val))
|
||||
#define p_signed(val) snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)(val))
|
||||
#define p_sample_type(val) __p_sample_type(buf, BUF_SIZE, val)
|
||||
#define p_branch_sample_type(val) __p_branch_sample_type(buf, BUF_SIZE, val)
|
||||
#define p_read_format(val) __p_read_format(buf, BUF_SIZE, val)
|
||||
|
||||
#define PRINT_ATTRn(_n, _f, _p) \
|
||||
do { \
|
||||
if (attr->_f) { \
|
||||
_p(attr->_f); \
|
||||
ret += attr__fprintf(fp, _n, buf, priv);\
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define PRINT_ATTRf(_f, _p) PRINT_ATTRn(#_f, _f, _p)
|
||||
|
||||
int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
|
||||
attr__fprintf_f attr__fprintf, void *priv)
|
||||
{
|
||||
char buf[BUF_SIZE];
|
||||
int ret = 0;
|
||||
|
||||
PRINT_ATTRf(type, p_unsigned);
|
||||
PRINT_ATTRf(size, p_unsigned);
|
||||
PRINT_ATTRf(config, p_hex);
|
||||
PRINT_ATTRn("{ sample_period, sample_freq }", sample_period, p_unsigned);
|
||||
PRINT_ATTRf(sample_type, p_sample_type);
|
||||
PRINT_ATTRf(read_format, p_read_format);
|
||||
|
||||
PRINT_ATTRf(disabled, p_unsigned);
|
||||
PRINT_ATTRf(inherit, p_unsigned);
|
||||
PRINT_ATTRf(pinned, p_unsigned);
|
||||
PRINT_ATTRf(exclusive, p_unsigned);
|
||||
PRINT_ATTRf(exclude_user, p_unsigned);
|
||||
PRINT_ATTRf(exclude_kernel, p_unsigned);
|
||||
PRINT_ATTRf(exclude_hv, p_unsigned);
|
||||
PRINT_ATTRf(exclude_idle, p_unsigned);
|
||||
PRINT_ATTRf(mmap, p_unsigned);
|
||||
PRINT_ATTRf(comm, p_unsigned);
|
||||
PRINT_ATTRf(freq, p_unsigned);
|
||||
PRINT_ATTRf(inherit_stat, p_unsigned);
|
||||
PRINT_ATTRf(enable_on_exec, p_unsigned);
|
||||
PRINT_ATTRf(task, p_unsigned);
|
||||
PRINT_ATTRf(watermark, p_unsigned);
|
||||
PRINT_ATTRf(precise_ip, p_unsigned);
|
||||
PRINT_ATTRf(mmap_data, p_unsigned);
|
||||
PRINT_ATTRf(sample_id_all, p_unsigned);
|
||||
PRINT_ATTRf(exclude_host, p_unsigned);
|
||||
PRINT_ATTRf(exclude_guest, p_unsigned);
|
||||
PRINT_ATTRf(exclude_callchain_kernel, p_unsigned);
|
||||
PRINT_ATTRf(exclude_callchain_user, p_unsigned);
|
||||
PRINT_ATTRf(mmap2, p_unsigned);
|
||||
PRINT_ATTRf(comm_exec, p_unsigned);
|
||||
PRINT_ATTRf(use_clockid, p_unsigned);
|
||||
PRINT_ATTRf(context_switch, p_unsigned);
|
||||
PRINT_ATTRf(write_backward, p_unsigned);
|
||||
PRINT_ATTRf(namespaces, p_unsigned);
|
||||
PRINT_ATTRf(ksymbol, p_unsigned);
|
||||
PRINT_ATTRf(bpf_event, p_unsigned);
|
||||
PRINT_ATTRf(aux_output, p_unsigned);
|
||||
|
||||
PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned);
|
||||
PRINT_ATTRf(bp_type, p_unsigned);
|
||||
PRINT_ATTRn("{ bp_addr, config1 }", bp_addr, p_hex);
|
||||
PRINT_ATTRn("{ bp_len, config2 }", bp_len, p_hex);
|
||||
PRINT_ATTRf(branch_sample_type, p_branch_sample_type);
|
||||
PRINT_ATTRf(sample_regs_user, p_hex);
|
||||
PRINT_ATTRf(sample_stack_user, p_unsigned);
|
||||
PRINT_ATTRf(clockid, p_signed);
|
||||
PRINT_ATTRf(sample_regs_intr, p_hex);
|
||||
PRINT_ATTRf(aux_watermark, p_unsigned);
|
||||
PRINT_ATTRf(sample_max_stack, p_unsigned);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __open_attr__fprintf(FILE *fp, const char *name, const char *val,
|
||||
void *priv __maybe_unused)
|
||||
{
|
||||
return fprintf(fp, " %-32s %s\n", name, val);
|
||||
}
|
||||
|
||||
static void perf_evsel__remove_fd(struct evsel *pos,
|
||||
int nr_cpus, int nr_threads,
|
||||
int thread_idx)
|
||||
|
@ -1663,7 +1489,7 @@ static bool ignore_missing_thread(struct evsel *evsel,
|
|||
return false;
|
||||
|
||||
/* The system wide setup does not work with threads. */
|
||||
if (evsel->system_wide)
|
||||
if (evsel->core.system_wide)
|
||||
return false;
|
||||
|
||||
/* The -ESRCH is perf event syscall errno for pid's not found. */
|
||||
|
@ -1689,6 +1515,12 @@ static bool ignore_missing_thread(struct evsel *evsel,
|
|||
return true;
|
||||
}
|
||||
|
||||
static int __open_attr__fprintf(FILE *fp, const char *name, const char *val,
|
||||
void *priv __maybe_unused)
|
||||
{
|
||||
return fprintf(fp, " %-32s %s\n", name, val);
|
||||
}
|
||||
|
||||
static void display_attr(struct perf_event_attr *attr)
|
||||
{
|
||||
if (verbose >= 2) {
|
||||
|
@ -1772,7 +1604,7 @@ int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus,
|
|||
threads = empty_thread_map;
|
||||
}
|
||||
|
||||
if (evsel->system_wide)
|
||||
if (evsel->core.system_wide)
|
||||
nthreads = 1;
|
||||
else
|
||||
nthreads = threads->nr;
|
||||
|
@ -1819,7 +1651,7 @@ retry_sample_id:
|
|||
for (thread = 0; thread < nthreads; thread++) {
|
||||
int fd, group_fd;
|
||||
|
||||
if (!evsel->cgrp && !evsel->system_wide)
|
||||
if (!evsel->cgrp && !evsel->core.system_wide)
|
||||
pid = perf_thread_map__pid(threads, thread);
|
||||
|
||||
group_fd = get_group_fd(evsel, cpu, thread);
|
||||
|
@ -1992,7 +1824,7 @@ out_close:
|
|||
void evsel__close(struct evsel *evsel)
|
||||
{
|
||||
perf_evsel__close(&evsel->core);
|
||||
perf_evsel__free_id(evsel);
|
||||
perf_evsel__free_id(&evsel->core);
|
||||
}
|
||||
|
||||
int perf_evsel__open_per_cpu(struct evsel *evsel,
|
||||
|
@ -2535,9 +2367,11 @@ bool perf_evsel__fallback(struct evsel *evsel, int err,
|
|||
if (evsel->name)
|
||||
free(evsel->name);
|
||||
evsel->name = new_name;
|
||||
scnprintf(msg, msgsize,
|
||||
"kernel.perf_event_paranoid=%d, trying to fall back to excluding kernel samples", paranoid);
|
||||
scnprintf(msg, msgsize, "kernel.perf_event_paranoid=%d, trying "
|
||||
"to fall back to excluding kernel and hypervisor "
|
||||
" samples", paranoid);
|
||||
evsel->core.attr.exclude_kernel = 1;
|
||||
evsel->core.attr.exclude_hv = 1;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -2690,7 +2524,7 @@ static int store_evsel_ids(struct evsel *evsel, struct evlist *evlist)
|
|||
thread++) {
|
||||
int fd = FD(evsel, cpu, thread);
|
||||
|
||||
if (perf_evlist__id_add_fd(evlist, evsel,
|
||||
if (perf_evlist__id_add_fd(&evlist->core, &evsel->core,
|
||||
cpu, thread, fd) < 0)
|
||||
return -1;
|
||||
}
|
||||
|
@ -2704,7 +2538,7 @@ int perf_evsel__store_ids(struct evsel *evsel, struct evlist *evlist)
|
|||
struct perf_cpu_map *cpus = evsel->core.cpus;
|
||||
struct perf_thread_map *threads = evsel->core.threads;
|
||||
|
||||
if (perf_evsel__alloc_id(evsel, cpus->nr, threads->nr))
|
||||
if (perf_evsel__alloc_id(&evsel->core, cpus->nr, threads->nr))
|
||||
return -ENOMEM;
|
||||
|
||||
return store_evsel_ids(evsel, evlist);
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
|
||||
#include <linux/list.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdio.h>
|
||||
#include <sys/types.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/types.h>
|
||||
|
@ -13,79 +12,11 @@
|
|||
#include "symbol_conf.h"
|
||||
#include <internal/cpumap.h>
|
||||
|
||||
struct addr_location;
|
||||
struct evsel;
|
||||
union perf_event;
|
||||
|
||||
/*
|
||||
* Per fd, to map back from PERF_SAMPLE_ID to evsel, only used when there are
|
||||
* more than one entry in the evlist.
|
||||
*/
|
||||
struct perf_sample_id {
|
||||
struct hlist_node node;
|
||||
u64 id;
|
||||
struct evsel *evsel;
|
||||
/*
|
||||
* 'idx' will be used for AUX area sampling. A sample will have AUX area
|
||||
* data that will be queued for decoding, where there are separate
|
||||
* queues for each CPU (per-cpu tracing) or task (per-thread tracing).
|
||||
* The sample ID can be used to lookup 'idx' which is effectively the
|
||||
* queue number.
|
||||
*/
|
||||
int idx;
|
||||
int cpu;
|
||||
pid_t tid;
|
||||
|
||||
/* Holds total ID period value for PERF_SAMPLE_READ processing. */
|
||||
u64 period;
|
||||
};
|
||||
|
||||
struct bpf_object;
|
||||
struct cgroup;
|
||||
|
||||
/*
|
||||
* The 'struct perf_evsel_config_term' is used to pass event
|
||||
* specific configuration data to perf_evsel__config routine.
|
||||
* It is allocated within event parsing and attached to
|
||||
* perf_evsel::config_terms list head.
|
||||
*/
|
||||
enum term_type {
|
||||
PERF_EVSEL__CONFIG_TERM_PERIOD,
|
||||
PERF_EVSEL__CONFIG_TERM_FREQ,
|
||||
PERF_EVSEL__CONFIG_TERM_TIME,
|
||||
PERF_EVSEL__CONFIG_TERM_CALLGRAPH,
|
||||
PERF_EVSEL__CONFIG_TERM_STACK_USER,
|
||||
PERF_EVSEL__CONFIG_TERM_INHERIT,
|
||||
PERF_EVSEL__CONFIG_TERM_MAX_STACK,
|
||||
PERF_EVSEL__CONFIG_TERM_MAX_EVENTS,
|
||||
PERF_EVSEL__CONFIG_TERM_OVERWRITE,
|
||||
PERF_EVSEL__CONFIG_TERM_DRV_CFG,
|
||||
PERF_EVSEL__CONFIG_TERM_BRANCH,
|
||||
PERF_EVSEL__CONFIG_TERM_PERCORE,
|
||||
PERF_EVSEL__CONFIG_TERM_AUX_OUTPUT,
|
||||
};
|
||||
|
||||
struct perf_evsel_config_term {
|
||||
struct list_head list;
|
||||
enum term_type type;
|
||||
union {
|
||||
u64 period;
|
||||
u64 freq;
|
||||
bool time;
|
||||
char *callgraph;
|
||||
char *drv_cfg;
|
||||
u64 stack_user;
|
||||
int max_stack;
|
||||
bool inherit;
|
||||
bool overwrite;
|
||||
char *branch;
|
||||
unsigned long max_events;
|
||||
bool percore;
|
||||
bool aux_output;
|
||||
} val;
|
||||
bool weak;
|
||||
};
|
||||
|
||||
struct perf_counts;
|
||||
struct perf_stat_evsel;
|
||||
union perf_event;
|
||||
|
||||
typedef int (perf_evsel__sb_cb_t)(union perf_event *event, void *data);
|
||||
|
||||
|
@ -94,10 +25,6 @@ enum perf_tool_event {
|
|||
PERF_TOOL_DURATION_TIME = 1,
|
||||
};
|
||||
|
||||
struct bpf_object;
|
||||
struct perf_counts;
|
||||
struct xyarray;
|
||||
|
||||
/** struct evsel - event selector
|
||||
*
|
||||
* @evlist - evlist this evsel is in, if it is in one.
|
||||
|
@ -117,12 +44,9 @@ struct evsel {
|
|||
struct perf_evsel core;
|
||||
struct evlist *evlist;
|
||||
char *filter;
|
||||
struct xyarray *sample_id;
|
||||
u64 *id;
|
||||
struct perf_counts *counts;
|
||||
struct perf_counts *prev_raw_counts;
|
||||
int idx;
|
||||
u32 ids;
|
||||
unsigned long max_events;
|
||||
unsigned long nr_events_printed;
|
||||
char *name;
|
||||
|
@ -146,7 +70,6 @@ struct evsel {
|
|||
bool disabled;
|
||||
bool no_aux_samples;
|
||||
bool immediate;
|
||||
bool system_wide;
|
||||
bool tracking;
|
||||
bool per_pkg;
|
||||
bool precise_max;
|
||||
|
@ -277,8 +200,6 @@ const char *perf_evsel__name(struct evsel *evsel);
|
|||
const char *perf_evsel__group_name(struct evsel *evsel);
|
||||
int perf_evsel__group_desc(struct evsel *evsel, char *buf, size_t size);
|
||||
|
||||
int perf_evsel__alloc_id(struct evsel *evsel, int ncpus, int nthreads);
|
||||
|
||||
void __perf_evsel__set_sample_bit(struct evsel *evsel,
|
||||
enum perf_event_sample_format bit);
|
||||
void __perf_evsel__reset_sample_bit(struct evsel *evsel,
|
||||
|
@ -434,37 +355,6 @@ static inline bool perf_evsel__is_clock(struct evsel *evsel)
|
|||
perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK);
|
||||
}
|
||||
|
||||
struct perf_attr_details {
|
||||
bool freq;
|
||||
bool verbose;
|
||||
bool event_group;
|
||||
bool force;
|
||||
bool trace_fields;
|
||||
};
|
||||
|
||||
int perf_evsel__fprintf(struct evsel *evsel,
|
||||
struct perf_attr_details *details, FILE *fp);
|
||||
|
||||
#define EVSEL__PRINT_IP (1<<0)
|
||||
#define EVSEL__PRINT_SYM (1<<1)
|
||||
#define EVSEL__PRINT_DSO (1<<2)
|
||||
#define EVSEL__PRINT_SYMOFFSET (1<<3)
|
||||
#define EVSEL__PRINT_ONELINE (1<<4)
|
||||
#define EVSEL__PRINT_SRCLINE (1<<5)
|
||||
#define EVSEL__PRINT_UNKNOWN_AS_ADDR (1<<6)
|
||||
#define EVSEL__PRINT_CALLCHAIN_ARROW (1<<7)
|
||||
#define EVSEL__PRINT_SKIP_IGNORED (1<<8)
|
||||
|
||||
struct callchain_cursor;
|
||||
|
||||
int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment,
|
||||
unsigned int print_opts,
|
||||
struct callchain_cursor *cursor, FILE *fp);
|
||||
|
||||
int sample__fprintf_sym(struct perf_sample *sample, struct addr_location *al,
|
||||
int left_alignment, unsigned int print_opts,
|
||||
struct callchain_cursor *cursor, FILE *fp);
|
||||
|
||||
bool perf_evsel__fallback(struct evsel *evsel, int err,
|
||||
char *msg, size_t msgsize);
|
||||
int perf_evsel__open_strerror(struct evsel *evsel, struct target *target,
|
||||
|
@ -497,11 +387,6 @@ static inline bool evsel__has_callchain(const struct evsel *evsel)
|
|||
return (evsel->core.attr.sample_type & PERF_SAMPLE_CALLCHAIN) != 0;
|
||||
}
|
||||
|
||||
typedef int (*attr__fprintf_f)(FILE *, const char *, const char *, void *);
|
||||
|
||||
int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
|
||||
attr__fprintf_f attr__fprintf, void *priv);
|
||||
|
||||
struct perf_env *perf_evsel__env(struct evsel *evsel);
|
||||
|
||||
int perf_evsel__store_ids(struct evsel *evsel, struct evlist *evlist);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue