OpenCloudOS-Kernel/tools/perf/util/intel-pt.c

4497 lines
111 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0-only
/*
* intel_pt.c: Intel Processor Trace support
* Copyright (c) 2013-2015, Intel Corporation.
*/
#include <inttypes.h>
perf intel-pt: Synthesize cycle events There is no good reason why we cannot synthesize "cycle" events from Intel PT just as we can synthesize "instruction" events, in particular when CYC packets are available. This enables using PT to getting much more accurate cycle profiles than regular sampling (record -e cycles) when the work last for very short periods (<10 ms). Thus, add support for this, based off of the existing IPC calculation framework. The new option to --itrace is "y" (for cYcles), as c was taken for calls. Cycle and instruction events can be synthesized together, and are by default. The only real caveat is that CYC packets are only emitted whenever some other packet is, which in practice is when a branch instruction is encountered (and not even all branches). Thus, even at no subsampling (e.g. --itrace=y0ns), it is impossible to get more accuracy than a single basic block, and all cycles spent executing that block will get attributed to the branch instruction that ends the packet. Thus, one cannot know whether the cycles came from e.g. a specific load, a mispredicted branch, or something else. When subsampling (which is the default), the cycle events will get smeared out even more, but will still be generally useful to attribute cycle counts to functions. Reviewed-by: Adrian Hunter <adrian.hunter@intel.com> Signed-off-by: Steinar H. Gunderson <sesse@google.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: https://lore.kernel.org/r/20220322082452.1429091-1-sesse@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-03-22 16:24:52 +08:00
#include <linux/perf_event.h>
#include <stdio.h>
#include <stdbool.h>
#include <errno.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/zalloc.h>
#include "session.h"
#include "machine.h"
#include "memswap.h"
#include "sort.h"
#include "tool.h"
#include "event.h"
#include "evlist.h"
#include "evsel.h"
#include "map.h"
#include "color.h"
#include "thread.h"
#include "thread-stack.h"
#include "symbol.h"
#include "callchain.h"
#include "dso.h"
#include "debug.h"
#include "auxtrace.h"
#include "tsc.h"
#include "intel-pt.h"
#include "config.h"
#include "util/perf_api_probe.h"
#include "util/synthetic-events.h"
#include "time-utils.h"
#include "../arch/x86/include/uapi/asm/perf_regs.h"
#include "intel-pt-decoder/intel-pt-log.h"
#include "intel-pt-decoder/intel-pt-decoder.h"
#include "intel-pt-decoder/intel-pt-insn-decoder.h"
#include "intel-pt-decoder/intel-pt-pkt-decoder.h"
#define MAX_TIMESTAMP (~0ULL)
#define INTEL_PT_CFG_PASS_THRU BIT_ULL(0)
#define INTEL_PT_CFG_PWR_EVT_EN BIT_ULL(4)
#define INTEL_PT_CFG_BRANCH_EN BIT_ULL(13)
#define INTEL_PT_CFG_EVT_EN BIT_ULL(31)
#define INTEL_PT_CFG_TNT_DIS BIT_ULL(55)
struct range {
u64 start;
u64 end;
};
struct intel_pt {
struct auxtrace auxtrace;
struct auxtrace_queues queues;
struct auxtrace_heap heap;
u32 auxtrace_type;
struct perf_session *session;
struct machine *machine;
struct evsel *switch_evsel;
struct thread *unknown_thread;
bool timeless_decoding;
bool sampling_mode;
bool snapshot_mode;
bool per_cpu_mmaps;
bool have_tsc;
bool data_queued;
bool est_tsc;
bool sync_switch;
bool sync_switch_not_supported;
perf intel-pt: Add mispred-all config option to aid use with autofdo autofdo incorrectly expects branch flags to include either mispred or predicted. In fact mispred = predicted = 0 is valid and means the flags are not supported, which they aren't by Intel PT. To make autofdo work, add a config option which will cause Intel PT decoder to set the mispred flag on all branches. Below is an example of using Intel PT with autofdo. The example is also added to the Intel PT documentation. It requires autofdo (https://github.com/google/autofdo) and gcc version 5. The bubble sort example is from the AutoFDO tutorial (https://gcc.gnu.org/wiki/AutoFDO/Tutorial) amended to take the number of elements as a parameter. $ gcc-5 -O3 sort.c -o sort_optimized $ ./sort_optimized 30000 Bubble sorting array of 30000 elements 2254 ms $ cat ~/.perfconfig [intel-pt] mispred-all $ perf record -e intel_pt//u ./sort 3000 Bubble sorting array of 3000 elements 58 ms [ perf record: Woken up 2 times to write data ] [ perf record: Captured and wrote 3.939 MB perf.data ] $ perf inject -i perf.data -o inj --itrace=i100usle --strip $ ./create_gcov --binary=./sort --profile=inj --gcov=sort.gcov -gcov_version=1 $ gcc-5 -O3 -fauto-profile=sort.gcov sort.c -o sort_autofdo $ ./sort_autofdo 30000 Bubble sorting array of 30000 elements 2155 ms Note there is currently no advantage to using Intel PT instead of LBR, but that may change in the future if greater use is made of the data. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lkml.kernel.org/r/1443186956-18718-26-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-09-25 21:15:56 +08:00
bool mispred_all;
bool use_thread_stack;
bool callstack;
bool cap_event_trace;
bool have_guest_sideband;
unsigned int br_stack_sz;
perf intel-pt: Add support for synthesizing branch stacks for regular events Use the new thread_stack__br_sample_late() function to create a thread stack for regular events. Example: # perf record --kcore --aux-sample -e '{intel_pt//,cycles:ppp}' -c 10000 uname Linux [ perf record: Woken up 2 times to write data ] [ perf record: Captured and wrote 0.743 MB perf.data ] # perf report --itrace=Le --stdio | head -30 | tail -18 # Samples: 11K of event 'cycles:ppp' # Event count (approx.): 11648 # # Overhead Command Source Shared Object Source Symbol Target Symbol Basic Block Cycles # ........ ....... .................... ............................ ............................ .................. # 5.49% uname libc-2.30.so [.] _dl_addr [.] _dl_addr - 2.41% uname ld-2.30.so [.] _dl_relocate_object [.] _dl_relocate_object - 2.31% uname ld-2.30.so [.] do_lookup_x [.] do_lookup_x - 2.17% uname [kernel.kallsyms] [k] unmap_page_range [k] unmap_page_range - 2.05% uname ld-2.30.so [k] _dl_start [k] _dl_start - 1.97% uname ld-2.30.so [.] _dl_lookup_symbol_x [.] _dl_lookup_symbol_x - 1.94% uname [kernel.kallsyms] [k] filemap_map_pages [k] filemap_map_pages - 1.60% uname [kernel.kallsyms] [k] __handle_mm_fault [k] __handle_mm_fault - 1.44% uname [kernel.kallsyms] [k] page_add_file_rmap [k] page_add_file_rmap - 1.12% uname [kernel.kallsyms] [k] vma_interval_tree_insert [k] vma_interval_tree_insert - 0.94% uname [kernel.kallsyms] [k] perf_iterate_ctx [k] perf_iterate_ctx - Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lore.kernel.org/lkml/20200429150751.12570-8-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-04-29 23:07:49 +08:00
unsigned int br_stack_sz_plus;
int have_sched_switch;
u32 pmu_type;
u64 kernel_start;
u64 switch_ip;
u64 ptss_ip;
u64 first_timestamp;
struct perf_tsc_conversion tc;
bool cap_user_time_zero;
struct itrace_synth_opts synth_opts;
bool sample_instructions;
u64 instructions_sample_type;
u64 instructions_id;
perf intel-pt: Synthesize cycle events There is no good reason why we cannot synthesize "cycle" events from Intel PT just as we can synthesize "instruction" events, in particular when CYC packets are available. This enables using PT to getting much more accurate cycle profiles than regular sampling (record -e cycles) when the work last for very short periods (<10 ms). Thus, add support for this, based off of the existing IPC calculation framework. The new option to --itrace is "y" (for cYcles), as c was taken for calls. Cycle and instruction events can be synthesized together, and are by default. The only real caveat is that CYC packets are only emitted whenever some other packet is, which in practice is when a branch instruction is encountered (and not even all branches). Thus, even at no subsampling (e.g. --itrace=y0ns), it is impossible to get more accuracy than a single basic block, and all cycles spent executing that block will get attributed to the branch instruction that ends the packet. Thus, one cannot know whether the cycles came from e.g. a specific load, a mispredicted branch, or something else. When subsampling (which is the default), the cycle events will get smeared out even more, but will still be generally useful to attribute cycle counts to functions. Reviewed-by: Adrian Hunter <adrian.hunter@intel.com> Signed-off-by: Steinar H. Gunderson <sesse@google.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: https://lore.kernel.org/r/20220322082452.1429091-1-sesse@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-03-22 16:24:52 +08:00
bool sample_cycles;
u64 cycles_sample_type;
u64 cycles_id;
bool sample_branches;
u32 branches_filter;
u64 branches_sample_type;
u64 branches_id;
bool sample_transactions;
u64 transactions_sample_type;
u64 transactions_id;
bool sample_ptwrites;
u64 ptwrites_sample_type;
u64 ptwrites_id;
bool sample_pwr_events;
u64 pwr_events_sample_type;
u64 mwait_id;
u64 pwre_id;
u64 exstop_id;
u64 pwrx_id;
u64 cbr_id;
perf intel-pt: Add PSB events Emitting a PSB+ can cause a CPU a slight delay. When doing timing analysis of code with Intel PT, it is useful to know if a timing bubble was caused by Intel PT or not. Add reporting of PSB events via perf script. PSB events are printed with the existing itrace 'p' option which also prints power and frequency changes. The PSB event contains the trace offset at which the PSB occurs, to allow easy reference back to the PSB+ packets. The PSB event timestamp is always the timestamp from the PSB+ TSC packet, and the ip is always the address from the PSB+ FUP packet. The code changes are non-trivial because the decoder must walk to the PSB+ FUP address before outputting the PSB event. Example: $ perf record -e intel_pt/cyc,psb_period=0/u uname Linux [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.046 MB perf.data ] $ perf script --itrace=p --ns perf 17981 [006] 25617.510820383: psb: psb offs: 0 0 [unknown] ([unknown]) perf 17981 [006] 25617.510820383: cbr: cbr: 42 freq: 4219 MHz (156%) 0 [unknown] ([unknown]) uname 17981 [006] 25617.510889753: psb: psb offs: 0xb50 7f78c12a212e __GI___tunables_init+0xee (/usr/lib/x86_64-linux-gnu/ld-2.31.so) uname 17981 [006] 25617.510899162: psb: psb offs: 0x12d0 7f78c128af1c dl_main+0x93c (/usr/lib/x86_64-linux-gnu/ld-2.31.so) uname 17981 [006] 25617.510939242: psb: psb offs: 0x1a50 7f78c128eefc _dl_map_object_from_fd+0x13c (/usr/lib/x86_64-linux-gnu/ld-2.31.so) uname 17981 [006] 25617.510981274: psb: psb offs: 0x21c8 7f78c1296307 _dl_relocate_object+0x927 (/usr/lib/x86_64-linux-gnu/ld-2.31.so) uname 17981 [006] 25617.510993034: psb: psb offs: 0x2948 7f78c12940e4 _dl_lookup_symbol_x+0x14 (/usr/lib/x86_64-linux-gnu/ld-2.31.so) uname 17981 [006] 25617.511003871: psb: psb offs: 0x30c8 7f78c12937b3 do_lookup_x+0x2f3 (/usr/lib/x86_64-linux-gnu/ld-2.31.so) uname 17981 [006] 25617.511019854: psb: psb offs: 0x3850 7f78c1295eed _dl_relocate_object+0x50d (/usr/lib/x86_64-linux-gnu/ld-2.31.so) uname 17981 [006] 25617.511029015: psb: psb offs: 0x4390 7f78c12a855a strcmp+0xf6a (/usr/lib/x86_64-linux-gnu/ld-2.31.so) uname 17981 [006] 25617.511064876: psb: psb offs: 0x4b10 0 [unknown] ([unknown]) uname 17981 [006] 25617.511080762: psb: psb offs: 0x5290 7f78c11db53d _dl_addr+0x13d (/usr/lib/x86_64-linux-gnu/libc-2.31.so) uname 17981 [006] 25617.511086035: psb: psb offs: 0x5a08 7f78c11db538 _dl_addr+0x138 (/usr/lib/x86_64-linux-gnu/libc-2.31.so) uname 17981 [006] 25617.511091381: psb: psb offs: 0x6190 7f78c11db534 _dl_addr+0x134 (/usr/lib/x86_64-linux-gnu/libc-2.31.so) uname 17981 [006] 25617.511096681: psb: psb offs: 0x6910 7f78c11db4c3 _dl_addr+0xc3 (/usr/lib/x86_64-linux-gnu/libc-2.31.so) uname 17981 [006] 25617.511119520: psb: psb offs: 0x7090 7f78c10ada5e _nl_intern_locale_data+0x12e (/usr/lib/x86_64-linux-gnu/libc-2.31.so) uname 17981 [006] 25617.511126584: psb: psb offs: 0x7818 7f78c10ada50 _nl_intern_locale_data+0x120 (/usr/lib/x86_64-linux-gnu/libc-2.31.so) uname 17981 [006] 25617.511132775: psb: psb offs: 0x8358 7f78c10c20c0 getenv+0xa0 (/usr/lib/x86_64-linux-gnu/libc-2.31.so) uname 17981 [006] 25617.511134598: psb: psb offs: 0x8ad0 7f78c10ada09 _nl_intern_locale_data+0xd9 (/usr/lib/x86_64-linux-gnu/libc-2.31.so) uname 17981 [006] 25617.511135685: psb: psb offs: 0x9258 7f78c10ada50 _nl_intern_locale_data+0x120 (/usr/lib/x86_64-linux-gnu/libc-2.31.so) uname 17981 [006] 25617.511138322: psb: psb offs: 0x99d0 7f78c11fffd9 __strncmp_avx2+0x39 (/usr/lib/x86_64-linux-gnu/libc-2.31.so) uname 17981 [006] 25617.511158907: psb: psb offs: 0xa150 0 [unknown] ([unknown]) Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Reviewed-by: Andi Kleen <ak@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: https://lore.kernel.org/r/20210205175350.23817-5-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2021-02-06 01:53:50 +08:00
u64 psb_id;
bool single_pebs;
bool sample_pebs;
struct evsel *pebs_evsel;
u64 evt_sample_type;
u64 evt_id;
u64 iflag_chg_sample_type;
u64 iflag_chg_id;
u64 tsc_bit;
u64 mtc_bit;
u64 mtc_freq_bits;
u32 tsc_ctc_ratio_n;
u32 tsc_ctc_ratio_d;
u64 cyc_bit;
u64 noretcomp_bit;
unsigned max_non_turbo_ratio;
unsigned cbr2khz;
int max_loops;
unsigned long num_events;
char *filter;
struct addr_filters filts;
struct range *time_ranges;
unsigned int range_cnt;
perf intel-pt: Add support for synthesizing callchains for regular events Currently, callchains can be synthesized only for synthesized events. Support also synthesizing callchains for regular events. Example: # perf record --kcore --aux-sample -e '{intel_pt//,cycles}' -c 10000 uname Linux [ perf record: Woken up 3 times to write data ] [ perf record: Captured and wrote 0.532 MB perf.data ] # perf script --itrace=Ge | head -20 uname 4864 2419025.358181: 10000 cycles: ffffffffbba56965 apparmor_bprm_committing_creds+0x35 ([kernel.kallsyms]) ffffffffbc400cd5 __indirect_thunk_start+0x5 ([kernel.kallsyms]) ffffffffbba07422 security_bprm_committing_creds+0x22 ([kernel.kallsyms]) ffffffffbb89805d install_exec_creds+0xd ([kernel.kallsyms]) ffffffffbb90d9ac load_elf_binary+0x3ac ([kernel.kallsyms]) uname 4864 2419025.358185: 10000 cycles: ffffffffbba56db0 apparmor_bprm_committed_creds+0x20 ([kernel.kallsyms]) ffffffffbc400cd5 __indirect_thunk_start+0x5 ([kernel.kallsyms]) ffffffffbba07452 security_bprm_committed_creds+0x22 ([kernel.kallsyms]) ffffffffbb89809a install_exec_creds+0x4a ([kernel.kallsyms]) ffffffffbb90d9ac load_elf_binary+0x3ac ([kernel.kallsyms]) uname 4864 2419025.358189: 10000 cycles: ffffffffbb86fdf6 vma_adjust_trans_huge+0x6 ([kernel.kallsyms]) ffffffffbb821660 __vma_adjust+0x160 ([kernel.kallsyms]) ffffffffbb897be7 shift_arg_pages+0x97 ([kernel.kallsyms]) ffffffffbb897ed9 setup_arg_pages+0x1e9 ([kernel.kallsyms]) ffffffffbb90d9f2 load_elf_binary+0x3f2 ([kernel.kallsyms]) Committer testing: # perf record --kcore --aux-sample -e '{intel_pt//,cycles}' -c 10000 uname Linux [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.233 MB perf.data ] # Then, before this patch: # perf script --itrace=Ge | head -20 uname 28642 168664.856384: 10000 cycles: ffffffff9810aeaa commit_creds+0x2a ([kernel.kallsyms]) uname 28642 168664.856388: 10000 cycles: ffffffff982a24f1 mprotect_fixup+0x151 ([kernel.kallsyms]) uname 28642 168664.856392: 10000 cycles: ffffffff982a385b move_page_tables+0xbcb ([kernel.kallsyms]) uname 28642 168664.856396: 10000 cycles: ffffffff982fd4ec __mod_memcg_state+0x1c ([kernel.kallsyms]) uname 28642 168664.856400: 10000 cycles: ffffffff9829fddd do_mmap+0xfd ([kernel.kallsyms]) uname 28642 168664.856404: 10000 cycles: ffffffff9829c879 __vma_adjust+0x479 ([kernel.kallsyms]) uname 28642 168664.856408: 10000 cycles: ffffffff98238e94 __perf_addr_filters_adjust+0x34 ([kernel.kallsyms]) uname 28642 168664.856412: 10000 cycles: ffffffff98a38e0b down_write+0x1b ([kernel.kallsyms]) uname 28642 168664.856416: 10000 cycles: ffffffff983006a0 memcg_kmem_get_cache+0x0 ([kernel.kallsyms]) uname 28642 168664.856421: 10000 cycles: ffffffff98396eaf load_elf_binary+0x92f ([kernel.kallsyms]) uname 28642 168664.856425: 10000 cycles: ffffffff982e0222 kfree+0x62 ([kernel.kallsyms]) uname 28642 168664.856428: 10000 cycles: ffffffff9846dfd4 file_has_perm+0x54 ([kernel.kallsyms]) uname 28642 168664.856433: 10000 cycles: ffffffff98288911 vma_interval_tree_insert+0x51 ([kernel.kallsyms]) uname 28642 168664.856437: 10000 cycles: ffffffff9823e577 perf_event_mmap_output+0x27 ([kernel.kallsyms]) uname 28642 168664.856441: 10000 cycles: ffffffff98a26fa0 xas_load+0x40 ([kernel.kallsyms]) uname 28642 168664.856445: 10000 cycles: ffffffff98004f30 arch_setup_additional_pages+0x0 ([kernel.kallsyms]) uname 28642 168664.856448: 10000 cycles: ffffffff98a297c0 copy_user_generic_unrolled+0xa0 ([kernel.kallsyms]) uname 28642 168664.856452: 10000 cycles: ffffffff9853a87a strnlen_user+0x10a ([kernel.kallsyms]) uname 28642 168664.856456: 10000 cycles: ffffffff986638a7 randomize_page+0x27 ([kernel.kallsyms]) uname 28642 168664.856460: 10000 cycles: ffffffff98a3b645 _raw_spin_lock+0x5 ([kernel.kallsyms]) # And after: # perf script --itrace=Ge | head -20 uname 28642 168664.856384: 10000 cycles: ffffffff9810aeaa commit_creds+0x2a ([kernel.kallsyms]) ffffffff9831fe87 install_exec_creds+0x17 ([kernel.kallsyms]) ffffffff983968d9 load_elf_binary+0x359 ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) uname 28642 168664.856388: 10000 cycles: ffffffff982a24f1 mprotect_fixup+0x151 ([kernel.kallsyms]) ffffffff9831fa83 setup_arg_pages+0x123 ([kernel.kallsyms]) ffffffff9839691f load_elf_binary+0x39f ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) uname 28642 168664.856392: 10000 cycles: ffffffff982a385b move_page_tables+0xbcb ([kernel.kallsyms]) ffffffff9831f889 shift_arg_pages+0xa9 ([kernel.kallsyms]) ffffffff9831fb4f setup_arg_pages+0x1ef ([kernel.kallsyms]) ffffffff9839691f load_elf_binary+0x39f ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) # Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lore.kernel.org/lkml/20200401101613.6201-12-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-04-01 18:16:08 +08:00
struct ip_callchain *chain;
perf intel-pt: Add support for synthesizing branch stacks for regular events Use the new thread_stack__br_sample_late() function to create a thread stack for regular events. Example: # perf record --kcore --aux-sample -e '{intel_pt//,cycles:ppp}' -c 10000 uname Linux [ perf record: Woken up 2 times to write data ] [ perf record: Captured and wrote 0.743 MB perf.data ] # perf report --itrace=Le --stdio | head -30 | tail -18 # Samples: 11K of event 'cycles:ppp' # Event count (approx.): 11648 # # Overhead Command Source Shared Object Source Symbol Target Symbol Basic Block Cycles # ........ ....... .................... ............................ ............................ .................. # 5.49% uname libc-2.30.so [.] _dl_addr [.] _dl_addr - 2.41% uname ld-2.30.so [.] _dl_relocate_object [.] _dl_relocate_object - 2.31% uname ld-2.30.so [.] do_lookup_x [.] do_lookup_x - 2.17% uname [kernel.kallsyms] [k] unmap_page_range [k] unmap_page_range - 2.05% uname ld-2.30.so [k] _dl_start [k] _dl_start - 1.97% uname ld-2.30.so [.] _dl_lookup_symbol_x [.] _dl_lookup_symbol_x - 1.94% uname [kernel.kallsyms] [k] filemap_map_pages [k] filemap_map_pages - 1.60% uname [kernel.kallsyms] [k] __handle_mm_fault [k] __handle_mm_fault - 1.44% uname [kernel.kallsyms] [k] page_add_file_rmap [k] page_add_file_rmap - 1.12% uname [kernel.kallsyms] [k] vma_interval_tree_insert [k] vma_interval_tree_insert - 0.94% uname [kernel.kallsyms] [k] perf_iterate_ctx [k] perf_iterate_ctx - Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lore.kernel.org/lkml/20200429150751.12570-8-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-04-29 23:07:49 +08:00
struct branch_stack *br_stack;
u64 dflt_tsc_offset;
struct rb_root vmcs_info;
};
enum switch_state {
INTEL_PT_SS_NOT_TRACING,
INTEL_PT_SS_UNKNOWN,
INTEL_PT_SS_TRACING,
INTEL_PT_SS_EXPECTING_SWITCH_EVENT,
INTEL_PT_SS_EXPECTING_SWITCH_IP,
};
/* applicable_counters is 64-bits */
#define INTEL_PT_MAX_PEBS 64
struct intel_pt_pebs_event {
struct evsel *evsel;
u64 id;
};
struct intel_pt_queue {
struct intel_pt *pt;
unsigned int queue_nr;
struct auxtrace_buffer *buffer;
struct auxtrace_buffer *old_buffer;
void *decoder;
const struct intel_pt_state *state;
struct ip_callchain *chain;
struct branch_stack *last_branch;
union perf_event *event_buf;
bool on_heap;
bool stop;
bool step_through_buffers;
bool use_buffer_pid_tid;
bool sync_switch;
bool sample_ipc;
pid_t pid, tid;
int cpu;
int switch_state;
pid_t next_tid;
struct thread *thread;
struct machine *guest_machine;
perf intel-pt: Add guest_code support A common case for KVM test programs is that the test program acts as the hypervisor, creating, running and destroying the virtual machine, and providing the guest object code from its own object code. In this case, the VM is not running an OS, but only the functions loaded into it by the hypervisor test program, and conveniently, loaded at the same virtual addresses. To support that, a new option "--guest-code" has been added in previous patches. In this patch, add support also to Intel PT. In particular, ensure guest_code thread is set up before attempting to walk object code or synthesize samples. Example: # perf record --kcore -e intel_pt/cyc/ -- tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.280 MB perf.data ] # perf script --guest-code --itrace=bep --ns -F-period,+addr,+flags [SNIP] tsc_msrs_test 18436 [007] 10897.962087733: branches: call ffffffffc13b2ff5 __vmx_vcpu_run+0x15 (vmlinux) => ffffffffc13b2f50 vmx_update_host_rsp+0x0 (vmlinux) tsc_msrs_test 18436 [007] 10897.962087733: branches: return ffffffffc13b2f5d vmx_update_host_rsp+0xd (vmlinux) => ffffffffc13b2ffa __vmx_vcpu_run+0x1a (vmlinux) tsc_msrs_test 18436 [007] 10897.962087733: branches: call ffffffffc13b303b __vmx_vcpu_run+0x5b (vmlinux) => ffffffffc13b2f80 vmx_vmenter+0x0 (vmlinux) tsc_msrs_test 18436 [007] 10897.962087836: branches: vmentry ffffffffc13b2f82 vmx_vmenter+0x2 (vmlinux) => 0 [unknown] ([unknown]) [guest/18436] 18436 [007] 10897.962087836: branches: vmentry 0 [unknown] ([unknown]) => 402c81 guest_code+0x131 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) [guest/18436] 18436 [007] 10897.962087836: branches: call 402c81 guest_code+0x131 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 40dba0 ucall+0x0 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) [guest/18436] 18436 [007] 10897.962088248: branches: vmexit 40dba0 ucall+0x0 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 0 [unknown] ([unknown]) tsc_msrs_test 18436 [007] 10897.962088248: branches: vmexit 0 [unknown] ([unknown]) => ffffffffc13b2fa0 vmx_vmexit+0x0 (vmlinux) tsc_msrs_test 18436 [007] 10897.962088248: branches: jmp ffffffffc13b2fa0 vmx_vmexit+0x0 (vmlinux) => ffffffffc13b2fd2 vmx_vmexit+0x32 (vmlinux) tsc_msrs_test 18436 [007] 10897.962088256: branches: return ffffffffc13b2fd2 vmx_vmexit+0x32 (vmlinux) => ffffffffc13b3040 __vmx_vcpu_run+0x60 (vmlinux) tsc_msrs_test 18436 [007] 10897.962088270: branches: return ffffffffc13b30b6 __vmx_vcpu_run+0xd6 (vmlinux) => ffffffffc13b2f2e vmx_vcpu_enter_exit+0x4e (vmlinux) [SNIP] tsc_msrs_test 18436 [007] 10897.962089321: branches: call ffffffffc13b2ff5 __vmx_vcpu_run+0x15 (vmlinux) => ffffffffc13b2f50 vmx_update_host_rsp+0x0 (vmlinux) tsc_msrs_test 18436 [007] 10897.962089321: branches: return ffffffffc13b2f5d vmx_update_host_rsp+0xd (vmlinux) => ffffffffc13b2ffa __vmx_vcpu_run+0x1a (vmlinux) tsc_msrs_test 18436 [007] 10897.962089321: branches: call ffffffffc13b303b __vmx_vcpu_run+0x5b (vmlinux) => ffffffffc13b2f80 vmx_vmenter+0x0 (vmlinux) tsc_msrs_test 18436 [007] 10897.962089424: branches: vmentry ffffffffc13b2f82 vmx_vmenter+0x2 (vmlinux) => 0 [unknown] ([unknown]) [guest/18436] 18436 [007] 10897.962089424: branches: vmentry 0 [unknown] ([unknown]) => 40dba0 ucall+0x0 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) [guest/18436] 18436 [007] 10897.962089701: branches: jmp 40dc1b ucall+0x7b (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 40dc39 ucall+0x99 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) [guest/18436] 18436 [007] 10897.962089701: branches: jcc 40dc3c ucall+0x9c (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 40dc20 ucall+0x80 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) [guest/18436] 18436 [007] 10897.962089701: branches: jcc 40dc3c ucall+0x9c (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 40dc20 ucall+0x80 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) [guest/18436] 18436 [007] 10897.962089701: branches: jcc 40dc37 ucall+0x97 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 40dc50 ucall+0xb0 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) [guest/18436] 18436 [007] 10897.962089878: branches: vmexit 40dc55 ucall+0xb5 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 0 [unknown] ([unknown]) tsc_msrs_test 18436 [007] 10897.962089878: branches: vmexit 0 [unknown] ([unknown]) => ffffffffc13b2fa0 vmx_vmexit+0x0 (vmlinux) tsc_msrs_test 18436 [007] 10897.962089878: branches: jmp ffffffffc13b2fa0 vmx_vmexit+0x0 (vmlinux) => ffffffffc13b2fd2 vmx_vmexit+0x32 (vmlinux) tsc_msrs_test 18436 [007] 10897.962089887: branches: return ffffffffc13b2fd2 vmx_vmexit+0x32 (vmlinux) => ffffffffc13b3040 __vmx_vcpu_run+0x60 (vmlinux) tsc_msrs_test 18436 [007] 10897.962089901: branches: return ffffffffc13b30b6 __vmx_vcpu_run+0xd6 (vmlinux) => ffffffffc13b2f2e vmx_vcpu_enter_exit+0x4e (vmlinux) [SNIP] # perf kvm --guest-code --guest --host report -i perf.data --stdio | head -20 # To display the perf.data header info, please use --header/--header-only options. # # # Total Lost Samples: 0 # # Samples: 12 of event 'instructions' # Event count (approx.): 2274583 # # Children Self Command Shared Object Symbol # ........ ........ ............. .................... ........................................... # 54.70% 0.00% tsc_msrs_test [kernel.vmlinux] [k] entry_SYSCALL_64_after_hwframe | ---entry_SYSCALL_64_after_hwframe do_syscall_64 | |--29.44%--syscall_exit_to_user_mode | exit_to_user_mode_prepare | task_work_run | __fput For more information about Perf tools support for Intel® Processor Trace refer: https://perf.wiki.kernel.org/index.php/Perf_tools_support_for_Intel%C2%AE_Processor_Trace Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Ian Rogers <irogers@google.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Leo Yan <leo.yan@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: kvm@vger.kernel.org Link: https://lore.kernel.org/r/20220517131011.6117-7-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-05-17 21:10:11 +08:00
struct thread *guest_thread;
struct thread *unknown_guest_thread;
pid_t guest_machine_pid;
pid_t guest_pid;
pid_t guest_tid;
int vcpu;
bool exclude_kernel;
bool have_sample;
u64 time;
u64 timestamp;
u64 sel_timestamp;
bool sel_start;
unsigned int sel_idx;
u32 flags;
u16 insn_len;
u64 last_insn_cnt;
u64 ipc_insn_cnt;
u64 ipc_cyc_cnt;
u64 last_in_insn_cnt;
u64 last_in_cyc_cnt;
perf intel-pt: Synthesize cycle events There is no good reason why we cannot synthesize "cycle" events from Intel PT just as we can synthesize "instruction" events, in particular when CYC packets are available. This enables using PT to getting much more accurate cycle profiles than regular sampling (record -e cycles) when the work last for very short periods (<10 ms). Thus, add support for this, based off of the existing IPC calculation framework. The new option to --itrace is "y" (for cYcles), as c was taken for calls. Cycle and instruction events can be synthesized together, and are by default. The only real caveat is that CYC packets are only emitted whenever some other packet is, which in practice is when a branch instruction is encountered (and not even all branches). Thus, even at no subsampling (e.g. --itrace=y0ns), it is impossible to get more accuracy than a single basic block, and all cycles spent executing that block will get attributed to the branch instruction that ends the packet. Thus, one cannot know whether the cycles came from e.g. a specific load, a mispredicted branch, or something else. When subsampling (which is the default), the cycle events will get smeared out even more, but will still be generally useful to attribute cycle counts to functions. Reviewed-by: Adrian Hunter <adrian.hunter@intel.com> Signed-off-by: Steinar H. Gunderson <sesse@google.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: https://lore.kernel.org/r/20220322082452.1429091-1-sesse@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-03-22 16:24:52 +08:00
u64 last_cy_insn_cnt;
u64 last_cy_cyc_cnt;
u64 last_br_insn_cnt;
u64 last_br_cyc_cnt;
unsigned int cbr_seen;
char insn[INTEL_PT_INSN_BUF_SZ];
struct intel_pt_pebs_event pebs[INTEL_PT_MAX_PEBS];
};
static void intel_pt_dump(struct intel_pt *pt __maybe_unused,
unsigned char *buf, size_t len)
{
struct intel_pt_pkt packet;
size_t pos = 0;
int ret, pkt_len, i;
char desc[INTEL_PT_PKT_DESC_MAX];
const char *color = PERF_COLOR_BLUE;
enum intel_pt_pkt_ctx ctx = INTEL_PT_NO_CTX;
color_fprintf(stdout, color,
". ... Intel Processor Trace data: size %zu bytes\n",
len);
while (len) {
ret = intel_pt_get_packet(buf, len, &packet, &ctx);
if (ret > 0)
pkt_len = ret;
else
pkt_len = 1;
printf(".");
color_fprintf(stdout, color, " %08x: ", pos);
for (i = 0; i < pkt_len; i++)
color_fprintf(stdout, color, " %02x", buf[i]);
for (; i < 16; i++)
color_fprintf(stdout, color, " ");
if (ret > 0) {
ret = intel_pt_pkt_desc(&packet, desc,
INTEL_PT_PKT_DESC_MAX);
if (ret > 0)
color_fprintf(stdout, color, " %s\n", desc);
} else {
color_fprintf(stdout, color, " Bad packet!\n");
}
pos += pkt_len;
buf += pkt_len;
len -= pkt_len;
}
}
static void intel_pt_dump_event(struct intel_pt *pt, unsigned char *buf,
size_t len)
{
printf(".\n");
intel_pt_dump(pt, buf, len);
}
static void intel_pt_log_event(union perf_event *event)
{
FILE *f = intel_pt_log_fp();
if (!intel_pt_enable_logging || !f)
return;
perf_event__fprintf(event, NULL, f);
}
static void intel_pt_dump_sample(struct perf_session *session,
struct perf_sample *sample)
{
struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
auxtrace);
printf("\n");
intel_pt_dump(pt, sample->aux_sample.data, sample->aux_sample.size);
}
static bool intel_pt_log_events(struct intel_pt *pt, u64 tm)
{
struct perf_time_interval *range = pt->synth_opts.ptime_range;
int n = pt->synth_opts.range_num;
if (pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_ALL_PERF_EVTS)
return true;
if (pt->synth_opts.log_minus_flags & AUXTRACE_LOG_FLG_ALL_PERF_EVTS)
return false;
/* perf_time__ranges_skip_sample does not work if time is zero */
if (!tm)
tm = 1;
return !n || !perf_time__ranges_skip_sample(range, n, tm);
}
static struct intel_pt_vmcs_info *intel_pt_findnew_vmcs(struct rb_root *rb_root,
u64 vmcs,
u64 dflt_tsc_offset)
{
struct rb_node **p = &rb_root->rb_node;
struct rb_node *parent = NULL;
struct intel_pt_vmcs_info *v;
while (*p) {
parent = *p;
v = rb_entry(parent, struct intel_pt_vmcs_info, rb_node);
if (v->vmcs == vmcs)
return v;
if (vmcs < v->vmcs)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
v = zalloc(sizeof(*v));
if (v) {
v->vmcs = vmcs;
v->tsc_offset = dflt_tsc_offset;
v->reliable = dflt_tsc_offset;
rb_link_node(&v->rb_node, parent, p);
rb_insert_color(&v->rb_node, rb_root);
}
return v;
}
static struct intel_pt_vmcs_info *intel_pt_findnew_vmcs_info(void *data, uint64_t vmcs)
{
struct intel_pt_queue *ptq = data;
struct intel_pt *pt = ptq->pt;
if (!vmcs && !pt->dflt_tsc_offset)
return NULL;
return intel_pt_findnew_vmcs(&pt->vmcs_info, vmcs, pt->dflt_tsc_offset);
}
static void intel_pt_free_vmcs_info(struct intel_pt *pt)
{
struct intel_pt_vmcs_info *v;
struct rb_node *n;
n = rb_first(&pt->vmcs_info);
while (n) {
v = rb_entry(n, struct intel_pt_vmcs_info, rb_node);
n = rb_next(n);
rb_erase(&v->rb_node, &pt->vmcs_info);
free(v);
}
}
static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a,
struct auxtrace_buffer *b)
{
bool consecutive = false;
void *start;
start = intel_pt_find_overlap(a->data, a->size, b->data, b->size,
pt->have_tsc, &consecutive,
pt->synth_opts.vm_time_correlation);
if (!start)
return -EINVAL;
/*
* In the case of vm_time_correlation, the overlap might contain TSC
* packets that will not be fixed, and that will then no longer work for
* overlap detection. Avoid that by zeroing out the overlap.
*/
if (pt->synth_opts.vm_time_correlation)
memset(b->data, 0, start - b->data);
b->use_size = b->data + b->size - start;
b->use_data = start;
if (b->use_size && consecutive)
b->consecutive = true;
return 0;
}
static int intel_pt_get_buffer(struct intel_pt_queue *ptq,
struct auxtrace_buffer *buffer,
struct auxtrace_buffer *old_buffer,
struct intel_pt_buffer *b)
{
bool might_overlap;
if (!buffer->data) {
int fd = perf_data__fd(ptq->pt->session->data);
buffer->data = auxtrace_buffer__get_data(buffer, fd);
if (!buffer->data)
return -ENOMEM;
}
might_overlap = ptq->pt->snapshot_mode || ptq->pt->sampling_mode;
if (might_overlap && !buffer->consecutive && old_buffer &&
intel_pt_do_fix_overlap(ptq->pt, old_buffer, buffer))
return -ENOMEM;
if (buffer->use_data) {
b->len = buffer->use_size;
b->buf = buffer->use_data;
} else {
b->len = buffer->size;
b->buf = buffer->data;
}
b->ref_timestamp = buffer->reference;
if (!old_buffer || (might_overlap && !buffer->consecutive)) {
b->consecutive = false;
b->trace_nr = buffer->buffer_nr + 1;
} else {
b->consecutive = true;
}
return 0;
}
/* Do not drop buffers with references - refer intel_pt_get_trace() */
static void intel_pt_lookahead_drop_buffer(struct intel_pt_queue *ptq,
struct auxtrace_buffer *buffer)
{
if (!buffer || buffer == ptq->buffer || buffer == ptq->old_buffer)
return;
auxtrace_buffer__drop_data(buffer);
}
/* Must be serialized with respect to intel_pt_get_trace() */
static int intel_pt_lookahead(void *data, intel_pt_lookahead_cb_t cb,
void *cb_data)
{
struct intel_pt_queue *ptq = data;
struct auxtrace_buffer *buffer = ptq->buffer;
struct auxtrace_buffer *old_buffer = ptq->old_buffer;
struct auxtrace_queue *queue;
int err = 0;
queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
while (1) {
struct intel_pt_buffer b = { .len = 0 };
buffer = auxtrace_buffer__next(queue, buffer);
if (!buffer)
break;
err = intel_pt_get_buffer(ptq, buffer, old_buffer, &b);
if (err)
break;
if (b.len) {
intel_pt_lookahead_drop_buffer(ptq, old_buffer);
old_buffer = buffer;
} else {
intel_pt_lookahead_drop_buffer(ptq, buffer);
continue;
}
err = cb(&b, cb_data);
if (err)
break;
}
if (buffer != old_buffer)
intel_pt_lookahead_drop_buffer(ptq, buffer);
intel_pt_lookahead_drop_buffer(ptq, old_buffer);
return err;
}
/*
* This function assumes data is processed sequentially only.
* Must be serialized with respect to intel_pt_lookahead()
*/
static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data)
{
struct intel_pt_queue *ptq = data;
struct auxtrace_buffer *buffer = ptq->buffer;
struct auxtrace_buffer *old_buffer = ptq->old_buffer;
struct auxtrace_queue *queue;
int err;
if (ptq->stop) {
b->len = 0;
return 0;
}
queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
buffer = auxtrace_buffer__next(queue, buffer);
if (!buffer) {
if (old_buffer)
auxtrace_buffer__drop_data(old_buffer);
b->len = 0;
return 0;
}
ptq->buffer = buffer;
err = intel_pt_get_buffer(ptq, buffer, old_buffer, b);
if (err)
return err;
if (ptq->step_through_buffers)
ptq->stop = true;
if (b->len) {
if (old_buffer)
auxtrace_buffer__drop_data(old_buffer);
ptq->old_buffer = buffer;
} else {
auxtrace_buffer__drop_data(buffer);
return intel_pt_get_trace(b, data);
}
return 0;
}
struct intel_pt_cache_entry {
struct auxtrace_cache_entry entry;
u64 insn_cnt;
u64 byte_cnt;
enum intel_pt_insn_op op;
enum intel_pt_insn_branch branch;
bool emulated_ptwrite;
int length;
int32_t rel;
char insn[INTEL_PT_INSN_BUF_SZ];
};
static int intel_pt_config_div(const char *var, const char *value, void *data)
{
int *d = data;
long val;
if (!strcmp(var, "intel-pt.cache-divisor")) {
val = strtol(value, NULL, 0);
if (val > 0 && val <= INT_MAX)
*d = val;
}
return 0;
}
static int intel_pt_cache_divisor(void)
{
static int d;
if (d)
return d;
perf_config(intel_pt_config_div, &d);
if (!d)
d = 64;
return d;
}
static unsigned int intel_pt_cache_size(struct dso *dso,
struct machine *machine)
{
off_t size;
size = dso__data_size(dso, machine);
size /= intel_pt_cache_divisor();
if (size < 1000)
return 10;
if (size > (1 << 21))
return 21;
return 32 - __builtin_clz(size);
}
static struct auxtrace_cache *intel_pt_cache(struct dso *dso,
struct machine *machine)
{
struct auxtrace_cache *c;
unsigned int bits;
if (dso->auxtrace_cache)
return dso->auxtrace_cache;
bits = intel_pt_cache_size(dso, machine);
/* Ignoring cache creation failure */
c = auxtrace_cache__new(bits, sizeof(struct intel_pt_cache_entry), 200);
dso->auxtrace_cache = c;
return c;
}
static int intel_pt_cache_add(struct dso *dso, struct machine *machine,
u64 offset, u64 insn_cnt, u64 byte_cnt,
struct intel_pt_insn *intel_pt_insn)
{
struct auxtrace_cache *c = intel_pt_cache(dso, machine);
struct intel_pt_cache_entry *e;
int err;
if (!c)
return -ENOMEM;
e = auxtrace_cache__alloc_entry(c);
if (!e)
return -ENOMEM;
e->insn_cnt = insn_cnt;
e->byte_cnt = byte_cnt;
e->op = intel_pt_insn->op;
e->branch = intel_pt_insn->branch;
e->emulated_ptwrite = intel_pt_insn->emulated_ptwrite;
e->length = intel_pt_insn->length;
e->rel = intel_pt_insn->rel;
memcpy(e->insn, intel_pt_insn->buf, INTEL_PT_INSN_BUF_SZ);
err = auxtrace_cache__add(c, offset, &e->entry);
if (err)
auxtrace_cache__free_entry(c, e);
return err;
}
static struct intel_pt_cache_entry *
intel_pt_cache_lookup(struct dso *dso, struct machine *machine, u64 offset)
{
struct auxtrace_cache *c = intel_pt_cache(dso, machine);
if (!c)
return NULL;
return auxtrace_cache__lookup(dso->auxtrace_cache, offset);
}
perf intel-pt: Add support for text poke events Select text poke events when available and the kernel is being traced. Process text poke events to invalidate entries in Intel PT's instruction cache. Example: The example requires kernel config: CONFIG_PROC_SYSCTL=y CONFIG_SCHED_DEBUG=y CONFIG_SCHEDSTATS=y Before: # perf record -o perf.data.before --kcore -a -e intel_pt//k -m,64M & # cat /proc/sys/kernel/sched_schedstats 0 # echo 1 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 1 # echo 0 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 0 # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 3.341 MB perf.data.before ] [1]+ Terminated perf record -o perf.data.before --kcore -a -e intel_pt//k -m,64M # perf script -i perf.data.before --itrace=e >/dev/null Warning: 474 instruction trace errors After: # perf record -o perf.data.after --kcore -a -e intel_pt//k -m,64M & # cat /proc/sys/kernel/sched_schedstats 0 # echo 1 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 1 # echo 0 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 0 # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 2.646 MB perf.data.after ] [1]+ Terminated perf record -o perf.data.after --kcore -a -e intel_pt//k -m,64M # perf script -i perf.data.after --itrace=e >/dev/null Example: The example requires kernel config: # CONFIG_FUNCTION_TRACER is not set Before: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __schedule Added new event: probe:__schedule (on __schedule) You can now use it in all perf tools, such as: perf record -e probe:__schedule -aR sleep 1 # perf record -e probe:__schedule -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.026 MB perf.data (68 samples) ] # perf probe -d probe:__schedule Removed event: probe:__schedule # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 41.268 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null Warning: 207 instruction trace errors After: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __schedule Added new event: probe:__schedule (on __schedule) You can now use it in all perf tools, such as: perf record -e probe:__schedule -aR sleep 1 # perf record -e probe:__schedule -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.028 MB perf.data (107 samples) ] # perf probe -d probe:__schedule Removed event: probe:__schedule # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 39.978 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null # perf script -i t1 --no-itrace -D | grep 'POKE\|KSYMBOL' 6 565303693547 0x291f18 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc027a000 len 4096 type 2 flags 0x0 name kprobe_insn_page 6 565303697010 0x291f68 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027a000 old len 0 new len 6 6 565303838278 0x291fa8 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc027c000 len 4096 type 2 flags 0x0 name kprobe_optinsn_page 6 565303848286 0x291ff8 [0xa0]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027c000 old len 0 new len 106 6 565369336743 0x292af8 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffff88ab8890 old len 5 new len 5 7 566434327704 0x217c208 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffff88ab8890 old len 5 new len 5 6 566456313475 0x293198 [0xa0]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027c000 old len 106 new len 0 6 566456314935 0x293238 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027a000 old len 6 new len 0 Example: The example requires kernel config: CONFIG_FUNCTION_TRACER=y Before: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __kmalloc Added new event: probe:__kmalloc (on __kmalloc) You can now use it in all perf tools, such as: perf record -e probe:__kmalloc -aR sleep 1 # perf record -e probe:__kmalloc -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.022 MB perf.data (6 samples) ] # perf probe -d probe:__kmalloc Removed event: probe:__kmalloc # kill %1 [ perf record: Woken up 2 times to write data ] [ perf record: Captured and wrote 43.850 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null Warning: 8 instruction trace errors After: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __kmalloc Added new event: probe:__kmalloc (on __kmalloc) You can now use it in all perf tools, such as: perf record -e probe:__kmalloc -aR sleep 1 # perf record -e probe:__kmalloc -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.037 MB perf.data (206 samples) ] # perf probe -d probe:__kmalloc Removed event: probe:__kmalloc # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 41.442 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null # perf script -i t1 --no-itrace -D | grep 'POKE\|KSYMBOL' 5 312216133258 0x8bafe0 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc0360000 len 415 type 2 flags 0x0 name ftrace_trampoline 5 312216133494 0x8bb030 [0x1d8]: PERF_RECORD_TEXT_POKE addr 0xffffffffc0360000 old len 0 new len 415 5 312216229563 0x8bb208 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 5 312216239063 0x8bb248 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 5 312216727230 0x8bb288 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffabbea190 old len 5 new len 5 5 312216739322 0x8bb2c8 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 5 312216748321 0x8bb308 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 7 313287163462 0x2817430 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 7 313287174890 0x2817470 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 7 313287818979 0x28174b0 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffabbea190 old len 5 new len 5 7 313287829357 0x28174f0 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 7 313287841246 0x2817530 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Borislav Petkov <bp@alien8.de> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Mathieu Poirier <mathieu.poirier@linaro.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: x86@kernel.org Link: http://lore.kernel.org/lkml/20200512121922.8997-14-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-05-12 20:19:20 +08:00
static void intel_pt_cache_invalidate(struct dso *dso, struct machine *machine,
u64 offset)
{
struct auxtrace_cache *c = intel_pt_cache(dso, machine);
if (!c)
return;
auxtrace_cache__remove(dso->auxtrace_cache, offset);
}
static inline bool intel_pt_guest_kernel_ip(uint64_t ip)
{
/* Assumes 64-bit kernel */
return ip & (1ULL << 63);
}
static inline u8 intel_pt_nr_cpumode(struct intel_pt_queue *ptq, uint64_t ip, bool nr)
{
if (nr) {
return intel_pt_guest_kernel_ip(ip) ?
PERF_RECORD_MISC_GUEST_KERNEL :
PERF_RECORD_MISC_GUEST_USER;
}
return ip >= ptq->pt->kernel_start ?
PERF_RECORD_MISC_KERNEL :
PERF_RECORD_MISC_USER;
}
static inline u8 intel_pt_cpumode(struct intel_pt_queue *ptq, uint64_t from_ip, uint64_t to_ip)
{
/* No support for non-zero CS base */
if (from_ip)
return intel_pt_nr_cpumode(ptq, from_ip, ptq->state->from_nr);
return intel_pt_nr_cpumode(ptq, to_ip, ptq->state->to_nr);
}
static int intel_pt_get_guest(struct intel_pt_queue *ptq)
{
struct machines *machines = &ptq->pt->session->machines;
struct machine *machine;
pid_t pid = ptq->pid <= 0 ? DEFAULT_GUEST_KERNEL_ID : ptq->pid;
if (ptq->guest_machine && pid == ptq->guest_machine->pid)
return 0;
ptq->guest_machine = NULL;
thread__zput(ptq->unknown_guest_thread);
perf intel-pt: Add guest_code support A common case for KVM test programs is that the test program acts as the hypervisor, creating, running and destroying the virtual machine, and providing the guest object code from its own object code. In this case, the VM is not running an OS, but only the functions loaded into it by the hypervisor test program, and conveniently, loaded at the same virtual addresses. To support that, a new option "--guest-code" has been added in previous patches. In this patch, add support also to Intel PT. In particular, ensure guest_code thread is set up before attempting to walk object code or synthesize samples. Example: # perf record --kcore -e intel_pt/cyc/ -- tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.280 MB perf.data ] # perf script --guest-code --itrace=bep --ns -F-period,+addr,+flags [SNIP] tsc_msrs_test 18436 [007] 10897.962087733: branches: call ffffffffc13b2ff5 __vmx_vcpu_run+0x15 (vmlinux) => ffffffffc13b2f50 vmx_update_host_rsp+0x0 (vmlinux) tsc_msrs_test 18436 [007] 10897.962087733: branches: return ffffffffc13b2f5d vmx_update_host_rsp+0xd (vmlinux) => ffffffffc13b2ffa __vmx_vcpu_run+0x1a (vmlinux) tsc_msrs_test 18436 [007] 10897.962087733: branches: call ffffffffc13b303b __vmx_vcpu_run+0x5b (vmlinux) => ffffffffc13b2f80 vmx_vmenter+0x0 (vmlinux) tsc_msrs_test 18436 [007] 10897.962087836: branches: vmentry ffffffffc13b2f82 vmx_vmenter+0x2 (vmlinux) => 0 [unknown] ([unknown]) [guest/18436] 18436 [007] 10897.962087836: branches: vmentry 0 [unknown] ([unknown]) => 402c81 guest_code+0x131 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) [guest/18436] 18436 [007] 10897.962087836: branches: call 402c81 guest_code+0x131 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 40dba0 ucall+0x0 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) [guest/18436] 18436 [007] 10897.962088248: branches: vmexit 40dba0 ucall+0x0 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 0 [unknown] ([unknown]) tsc_msrs_test 18436 [007] 10897.962088248: branches: vmexit 0 [unknown] ([unknown]) => ffffffffc13b2fa0 vmx_vmexit+0x0 (vmlinux) tsc_msrs_test 18436 [007] 10897.962088248: branches: jmp ffffffffc13b2fa0 vmx_vmexit+0x0 (vmlinux) => ffffffffc13b2fd2 vmx_vmexit+0x32 (vmlinux) tsc_msrs_test 18436 [007] 10897.962088256: branches: return ffffffffc13b2fd2 vmx_vmexit+0x32 (vmlinux) => ffffffffc13b3040 __vmx_vcpu_run+0x60 (vmlinux) tsc_msrs_test 18436 [007] 10897.962088270: branches: return ffffffffc13b30b6 __vmx_vcpu_run+0xd6 (vmlinux) => ffffffffc13b2f2e vmx_vcpu_enter_exit+0x4e (vmlinux) [SNIP] tsc_msrs_test 18436 [007] 10897.962089321: branches: call ffffffffc13b2ff5 __vmx_vcpu_run+0x15 (vmlinux) => ffffffffc13b2f50 vmx_update_host_rsp+0x0 (vmlinux) tsc_msrs_test 18436 [007] 10897.962089321: branches: return ffffffffc13b2f5d vmx_update_host_rsp+0xd (vmlinux) => ffffffffc13b2ffa __vmx_vcpu_run+0x1a (vmlinux) tsc_msrs_test 18436 [007] 10897.962089321: branches: call ffffffffc13b303b __vmx_vcpu_run+0x5b (vmlinux) => ffffffffc13b2f80 vmx_vmenter+0x0 (vmlinux) tsc_msrs_test 18436 [007] 10897.962089424: branches: vmentry ffffffffc13b2f82 vmx_vmenter+0x2 (vmlinux) => 0 [unknown] ([unknown]) [guest/18436] 18436 [007] 10897.962089424: branches: vmentry 0 [unknown] ([unknown]) => 40dba0 ucall+0x0 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) [guest/18436] 18436 [007] 10897.962089701: branches: jmp 40dc1b ucall+0x7b (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 40dc39 ucall+0x99 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) [guest/18436] 18436 [007] 10897.962089701: branches: jcc 40dc3c ucall+0x9c (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 40dc20 ucall+0x80 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) [guest/18436] 18436 [007] 10897.962089701: branches: jcc 40dc3c ucall+0x9c (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 40dc20 ucall+0x80 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) [guest/18436] 18436 [007] 10897.962089701: branches: jcc 40dc37 ucall+0x97 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 40dc50 ucall+0xb0 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) [guest/18436] 18436 [007] 10897.962089878: branches: vmexit 40dc55 ucall+0xb5 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 0 [unknown] ([unknown]) tsc_msrs_test 18436 [007] 10897.962089878: branches: vmexit 0 [unknown] ([unknown]) => ffffffffc13b2fa0 vmx_vmexit+0x0 (vmlinux) tsc_msrs_test 18436 [007] 10897.962089878: branches: jmp ffffffffc13b2fa0 vmx_vmexit+0x0 (vmlinux) => ffffffffc13b2fd2 vmx_vmexit+0x32 (vmlinux) tsc_msrs_test 18436 [007] 10897.962089887: branches: return ffffffffc13b2fd2 vmx_vmexit+0x32 (vmlinux) => ffffffffc13b3040 __vmx_vcpu_run+0x60 (vmlinux) tsc_msrs_test 18436 [007] 10897.962089901: branches: return ffffffffc13b30b6 __vmx_vcpu_run+0xd6 (vmlinux) => ffffffffc13b2f2e vmx_vcpu_enter_exit+0x4e (vmlinux) [SNIP] # perf kvm --guest-code --guest --host report -i perf.data --stdio | head -20 # To display the perf.data header info, please use --header/--header-only options. # # # Total Lost Samples: 0 # # Samples: 12 of event 'instructions' # Event count (approx.): 2274583 # # Children Self Command Shared Object Symbol # ........ ........ ............. .................... ........................................... # 54.70% 0.00% tsc_msrs_test [kernel.vmlinux] [k] entry_SYSCALL_64_after_hwframe | ---entry_SYSCALL_64_after_hwframe do_syscall_64 | |--29.44%--syscall_exit_to_user_mode | exit_to_user_mode_prepare | task_work_run | __fput For more information about Perf tools support for Intel® Processor Trace refer: https://perf.wiki.kernel.org/index.php/Perf_tools_support_for_Intel%C2%AE_Processor_Trace Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Ian Rogers <irogers@google.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Leo Yan <leo.yan@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: kvm@vger.kernel.org Link: https://lore.kernel.org/r/20220517131011.6117-7-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-05-17 21:10:11 +08:00
if (symbol_conf.guest_code) {
thread__zput(ptq->guest_thread);
ptq->guest_thread = machines__findnew_guest_code(machines, pid);
}
machine = machines__find_guest(machines, pid);
if (!machine)
return -1;
ptq->unknown_guest_thread = machine__idle_thread(machine);
if (!ptq->unknown_guest_thread)
return -1;
ptq->guest_machine = machine;
return 0;
}
static inline bool intel_pt_jmp_16(struct intel_pt_insn *intel_pt_insn)
{
return intel_pt_insn->rel == 16 && intel_pt_insn->branch == INTEL_PT_BR_UNCONDITIONAL;
}
#define PTWRITE_MAGIC "\x0f\x0bperf,ptwrite "
#define PTWRITE_MAGIC_LEN 16
static bool intel_pt_emulated_ptwrite(struct dso *dso, struct machine *machine, u64 offset)
{
unsigned char buf[PTWRITE_MAGIC_LEN];
ssize_t len;
len = dso__data_read_offset(dso, machine, offset, buf, PTWRITE_MAGIC_LEN);
if (len == PTWRITE_MAGIC_LEN && !memcmp(buf, PTWRITE_MAGIC, PTWRITE_MAGIC_LEN)) {
intel_pt_log("Emulated ptwrite signature found\n");
return true;
}
intel_pt_log("Emulated ptwrite signature not found\n");
return false;
}
static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
uint64_t *insn_cnt_ptr, uint64_t *ip,
uint64_t to_ip, uint64_t max_insn_cnt,
void *data)
{
struct intel_pt_queue *ptq = data;
struct machine *machine = ptq->pt->machine;
struct thread *thread;
struct addr_location al;
unsigned char buf[INTEL_PT_INSN_BUF_SZ];
ssize_t len;
perf addr_location: Add init/exit/copy functions struct addr_location holds references to multiple reference counted objects. Add init/exit functions to make maintenance of those more consistent with the rest of the code and to try to avoid leaks. Modification of thread reference counts isn't included in this change. Committer notes: I needed to initialize result to sample->ip to make sure is set to something, fixing a compile time error, mostly keeping the previous logic as build_alloc_func_list() already does debugging/error prints about what went wrong if it takes the 'goto out'. Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ali Saidi <alisaidi@amazon.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Brian Robbins <brianrob@linux.microsoft.com> Cc: Changbin Du <changbin.du@huawei.com> Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: Fangrui Song <maskray@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ivan Babrou <ivan@cloudflare.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: K Prateek Nayak <kprateek.nayak@amd.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Mike Leach <mike.leach@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Steinar H. Gunderson <sesse@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Wenyu Liu <liuwenyu7@huawei.com> Cc: Will Deacon <will@kernel.org> Cc: Yang Jihong <yangjihong1@huawei.com> Cc: Ye Xingchen <ye.xingchen@zte.com.cn> Cc: Yuan Can <yuancan@huawei.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230608232823.4027869-7-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-09 07:28:03 +08:00
int x86_64, ret = 0;
u8 cpumode;
u64 offset, start_offset, start_ip;
u64 insn_cnt = 0;
bool one_map = true;
bool nr;
perf addr_location: Add init/exit/copy functions struct addr_location holds references to multiple reference counted objects. Add init/exit functions to make maintenance of those more consistent with the rest of the code and to try to avoid leaks. Modification of thread reference counts isn't included in this change. Committer notes: I needed to initialize result to sample->ip to make sure is set to something, fixing a compile time error, mostly keeping the previous logic as build_alloc_func_list() already does debugging/error prints about what went wrong if it takes the 'goto out'. Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ali Saidi <alisaidi@amazon.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Brian Robbins <brianrob@linux.microsoft.com> Cc: Changbin Du <changbin.du@huawei.com> Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: Fangrui Song <maskray@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ivan Babrou <ivan@cloudflare.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: K Prateek Nayak <kprateek.nayak@amd.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Mike Leach <mike.leach@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Steinar H. Gunderson <sesse@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Wenyu Liu <liuwenyu7@huawei.com> Cc: Will Deacon <will@kernel.org> Cc: Yang Jihong <yangjihong1@huawei.com> Cc: Ye Xingchen <ye.xingchen@zte.com.cn> Cc: Yuan Can <yuancan@huawei.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230608232823.4027869-7-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-09 07:28:03 +08:00
addr_location__init(&al);
intel_pt_insn->length = 0;
if (to_ip && *ip == to_ip)
goto out_no_cache;
nr = ptq->state->to_nr;
cpumode = intel_pt_nr_cpumode(ptq, *ip, nr);
if (nr) {
if (ptq->pt->have_guest_sideband) {
if (!ptq->guest_machine || ptq->guest_machine_pid != ptq->pid) {
intel_pt_log("ERROR: guest sideband but no guest machine\n");
perf addr_location: Add init/exit/copy functions struct addr_location holds references to multiple reference counted objects. Add init/exit functions to make maintenance of those more consistent with the rest of the code and to try to avoid leaks. Modification of thread reference counts isn't included in this change. Committer notes: I needed to initialize result to sample->ip to make sure is set to something, fixing a compile time error, mostly keeping the previous logic as build_alloc_func_list() already does debugging/error prints about what went wrong if it takes the 'goto out'. Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ali Saidi <alisaidi@amazon.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Brian Robbins <brianrob@linux.microsoft.com> Cc: Changbin Du <changbin.du@huawei.com> Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: Fangrui Song <maskray@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ivan Babrou <ivan@cloudflare.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: K Prateek Nayak <kprateek.nayak@amd.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Mike Leach <mike.leach@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Steinar H. Gunderson <sesse@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Wenyu Liu <liuwenyu7@huawei.com> Cc: Will Deacon <will@kernel.org> Cc: Yang Jihong <yangjihong1@huawei.com> Cc: Ye Xingchen <ye.xingchen@zte.com.cn> Cc: Yuan Can <yuancan@huawei.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230608232823.4027869-7-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-09 07:28:03 +08:00
ret = -EINVAL;
goto out_ret;
}
} else if ((!symbol_conf.guest_code && cpumode != PERF_RECORD_MISC_GUEST_KERNEL) ||
intel_pt_get_guest(ptq)) {
intel_pt_log("ERROR: no guest machine\n");
perf addr_location: Add init/exit/copy functions struct addr_location holds references to multiple reference counted objects. Add init/exit functions to make maintenance of those more consistent with the rest of the code and to try to avoid leaks. Modification of thread reference counts isn't included in this change. Committer notes: I needed to initialize result to sample->ip to make sure is set to something, fixing a compile time error, mostly keeping the previous logic as build_alloc_func_list() already does debugging/error prints about what went wrong if it takes the 'goto out'. Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ali Saidi <alisaidi@amazon.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Brian Robbins <brianrob@linux.microsoft.com> Cc: Changbin Du <changbin.du@huawei.com> Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: Fangrui Song <maskray@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ivan Babrou <ivan@cloudflare.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: K Prateek Nayak <kprateek.nayak@amd.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Mike Leach <mike.leach@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Steinar H. Gunderson <sesse@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Wenyu Liu <liuwenyu7@huawei.com> Cc: Will Deacon <will@kernel.org> Cc: Yang Jihong <yangjihong1@huawei.com> Cc: Ye Xingchen <ye.xingchen@zte.com.cn> Cc: Yuan Can <yuancan@huawei.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230608232823.4027869-7-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-09 07:28:03 +08:00
ret = -EINVAL;
goto out_ret;
}
machine = ptq->guest_machine;
perf intel-pt: Add guest_code support A common case for KVM test programs is that the test program acts as the hypervisor, creating, running and destroying the virtual machine, and providing the guest object code from its own object code. In this case, the VM is not running an OS, but only the functions loaded into it by the hypervisor test program, and conveniently, loaded at the same virtual addresses. To support that, a new option "--guest-code" has been added in previous patches. In this patch, add support also to Intel PT. In particular, ensure guest_code thread is set up before attempting to walk object code or synthesize samples. Example: # perf record --kcore -e intel_pt/cyc/ -- tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.280 MB perf.data ] # perf script --guest-code --itrace=bep --ns -F-period,+addr,+flags [SNIP] tsc_msrs_test 18436 [007] 10897.962087733: branches: call ffffffffc13b2ff5 __vmx_vcpu_run+0x15 (vmlinux) => ffffffffc13b2f50 vmx_update_host_rsp+0x0 (vmlinux) tsc_msrs_test 18436 [007] 10897.962087733: branches: return ffffffffc13b2f5d vmx_update_host_rsp+0xd (vmlinux) => ffffffffc13b2ffa __vmx_vcpu_run+0x1a (vmlinux) tsc_msrs_test 18436 [007] 10897.962087733: branches: call ffffffffc13b303b __vmx_vcpu_run+0x5b (vmlinux) => ffffffffc13b2f80 vmx_vmenter+0x0 (vmlinux) tsc_msrs_test 18436 [007] 10897.962087836: branches: vmentry ffffffffc13b2f82 vmx_vmenter+0x2 (vmlinux) => 0 [unknown] ([unknown]) [guest/18436] 18436 [007] 10897.962087836: branches: vmentry 0 [unknown] ([unknown]) => 402c81 guest_code+0x131 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) [guest/18436] 18436 [007] 10897.962087836: branches: call 402c81 guest_code+0x131 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 40dba0 ucall+0x0 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) [guest/18436] 18436 [007] 10897.962088248: branches: vmexit 40dba0 ucall+0x0 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 0 [unknown] ([unknown]) tsc_msrs_test 18436 [007] 10897.962088248: branches: vmexit 0 [unknown] ([unknown]) => ffffffffc13b2fa0 vmx_vmexit+0x0 (vmlinux) tsc_msrs_test 18436 [007] 10897.962088248: branches: jmp ffffffffc13b2fa0 vmx_vmexit+0x0 (vmlinux) => ffffffffc13b2fd2 vmx_vmexit+0x32 (vmlinux) tsc_msrs_test 18436 [007] 10897.962088256: branches: return ffffffffc13b2fd2 vmx_vmexit+0x32 (vmlinux) => ffffffffc13b3040 __vmx_vcpu_run+0x60 (vmlinux) tsc_msrs_test 18436 [007] 10897.962088270: branches: return ffffffffc13b30b6 __vmx_vcpu_run+0xd6 (vmlinux) => ffffffffc13b2f2e vmx_vcpu_enter_exit+0x4e (vmlinux) [SNIP] tsc_msrs_test 18436 [007] 10897.962089321: branches: call ffffffffc13b2ff5 __vmx_vcpu_run+0x15 (vmlinux) => ffffffffc13b2f50 vmx_update_host_rsp+0x0 (vmlinux) tsc_msrs_test 18436 [007] 10897.962089321: branches: return ffffffffc13b2f5d vmx_update_host_rsp+0xd (vmlinux) => ffffffffc13b2ffa __vmx_vcpu_run+0x1a (vmlinux) tsc_msrs_test 18436 [007] 10897.962089321: branches: call ffffffffc13b303b __vmx_vcpu_run+0x5b (vmlinux) => ffffffffc13b2f80 vmx_vmenter+0x0 (vmlinux) tsc_msrs_test 18436 [007] 10897.962089424: branches: vmentry ffffffffc13b2f82 vmx_vmenter+0x2 (vmlinux) => 0 [unknown] ([unknown]) [guest/18436] 18436 [007] 10897.962089424: branches: vmentry 0 [unknown] ([unknown]) => 40dba0 ucall+0x0 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) [guest/18436] 18436 [007] 10897.962089701: branches: jmp 40dc1b ucall+0x7b (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 40dc39 ucall+0x99 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) [guest/18436] 18436 [007] 10897.962089701: branches: jcc 40dc3c ucall+0x9c (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 40dc20 ucall+0x80 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) [guest/18436] 18436 [007] 10897.962089701: branches: jcc 40dc3c ucall+0x9c (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 40dc20 ucall+0x80 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) [guest/18436] 18436 [007] 10897.962089701: branches: jcc 40dc37 ucall+0x97 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 40dc50 ucall+0xb0 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) [guest/18436] 18436 [007] 10897.962089878: branches: vmexit 40dc55 ucall+0xb5 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 0 [unknown] ([unknown]) tsc_msrs_test 18436 [007] 10897.962089878: branches: vmexit 0 [unknown] ([unknown]) => ffffffffc13b2fa0 vmx_vmexit+0x0 (vmlinux) tsc_msrs_test 18436 [007] 10897.962089878: branches: jmp ffffffffc13b2fa0 vmx_vmexit+0x0 (vmlinux) => ffffffffc13b2fd2 vmx_vmexit+0x32 (vmlinux) tsc_msrs_test 18436 [007] 10897.962089887: branches: return ffffffffc13b2fd2 vmx_vmexit+0x32 (vmlinux) => ffffffffc13b3040 __vmx_vcpu_run+0x60 (vmlinux) tsc_msrs_test 18436 [007] 10897.962089901: branches: return ffffffffc13b30b6 __vmx_vcpu_run+0xd6 (vmlinux) => ffffffffc13b2f2e vmx_vcpu_enter_exit+0x4e (vmlinux) [SNIP] # perf kvm --guest-code --guest --host report -i perf.data --stdio | head -20 # To display the perf.data header info, please use --header/--header-only options. # # # Total Lost Samples: 0 # # Samples: 12 of event 'instructions' # Event count (approx.): 2274583 # # Children Self Command Shared Object Symbol # ........ ........ ............. .................... ........................................... # 54.70% 0.00% tsc_msrs_test [kernel.vmlinux] [k] entry_SYSCALL_64_after_hwframe | ---entry_SYSCALL_64_after_hwframe do_syscall_64 | |--29.44%--syscall_exit_to_user_mode | exit_to_user_mode_prepare | task_work_run | __fput For more information about Perf tools support for Intel® Processor Trace refer: https://perf.wiki.kernel.org/index.php/Perf_tools_support_for_Intel%C2%AE_Processor_Trace Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Ian Rogers <irogers@google.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Leo Yan <leo.yan@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: kvm@vger.kernel.org Link: https://lore.kernel.org/r/20220517131011.6117-7-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-05-17 21:10:11 +08:00
thread = ptq->guest_thread;
if (!thread) {
if (cpumode != PERF_RECORD_MISC_GUEST_KERNEL) {
intel_pt_log("ERROR: no guest thread\n");
perf addr_location: Add init/exit/copy functions struct addr_location holds references to multiple reference counted objects. Add init/exit functions to make maintenance of those more consistent with the rest of the code and to try to avoid leaks. Modification of thread reference counts isn't included in this change. Committer notes: I needed to initialize result to sample->ip to make sure is set to something, fixing a compile time error, mostly keeping the previous logic as build_alloc_func_list() already does debugging/error prints about what went wrong if it takes the 'goto out'. Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ali Saidi <alisaidi@amazon.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Brian Robbins <brianrob@linux.microsoft.com> Cc: Changbin Du <changbin.du@huawei.com> Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: Fangrui Song <maskray@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ivan Babrou <ivan@cloudflare.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: K Prateek Nayak <kprateek.nayak@amd.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Mike Leach <mike.leach@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Steinar H. Gunderson <sesse@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Wenyu Liu <liuwenyu7@huawei.com> Cc: Will Deacon <will@kernel.org> Cc: Yang Jihong <yangjihong1@huawei.com> Cc: Ye Xingchen <ye.xingchen@zte.com.cn> Cc: Yuan Can <yuancan@huawei.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230608232823.4027869-7-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-09 07:28:03 +08:00
ret = -EINVAL;
goto out_ret;
}
perf intel-pt: Add guest_code support A common case for KVM test programs is that the test program acts as the hypervisor, creating, running and destroying the virtual machine, and providing the guest object code from its own object code. In this case, the VM is not running an OS, but only the functions loaded into it by the hypervisor test program, and conveniently, loaded at the same virtual addresses. To support that, a new option "--guest-code" has been added in previous patches. In this patch, add support also to Intel PT. In particular, ensure guest_code thread is set up before attempting to walk object code or synthesize samples. Example: # perf record --kcore -e intel_pt/cyc/ -- tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.280 MB perf.data ] # perf script --guest-code --itrace=bep --ns -F-period,+addr,+flags [SNIP] tsc_msrs_test 18436 [007] 10897.962087733: branches: call ffffffffc13b2ff5 __vmx_vcpu_run+0x15 (vmlinux) => ffffffffc13b2f50 vmx_update_host_rsp+0x0 (vmlinux) tsc_msrs_test 18436 [007] 10897.962087733: branches: return ffffffffc13b2f5d vmx_update_host_rsp+0xd (vmlinux) => ffffffffc13b2ffa __vmx_vcpu_run+0x1a (vmlinux) tsc_msrs_test 18436 [007] 10897.962087733: branches: call ffffffffc13b303b __vmx_vcpu_run+0x5b (vmlinux) => ffffffffc13b2f80 vmx_vmenter+0x0 (vmlinux) tsc_msrs_test 18436 [007] 10897.962087836: branches: vmentry ffffffffc13b2f82 vmx_vmenter+0x2 (vmlinux) => 0 [unknown] ([unknown]) [guest/18436] 18436 [007] 10897.962087836: branches: vmentry 0 [unknown] ([unknown]) => 402c81 guest_code+0x131 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) [guest/18436] 18436 [007] 10897.962087836: branches: call 402c81 guest_code+0x131 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 40dba0 ucall+0x0 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) [guest/18436] 18436 [007] 10897.962088248: branches: vmexit 40dba0 ucall+0x0 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 0 [unknown] ([unknown]) tsc_msrs_test 18436 [007] 10897.962088248: branches: vmexit 0 [unknown] ([unknown]) => ffffffffc13b2fa0 vmx_vmexit+0x0 (vmlinux) tsc_msrs_test 18436 [007] 10897.962088248: branches: jmp ffffffffc13b2fa0 vmx_vmexit+0x0 (vmlinux) => ffffffffc13b2fd2 vmx_vmexit+0x32 (vmlinux) tsc_msrs_test 18436 [007] 10897.962088256: branches: return ffffffffc13b2fd2 vmx_vmexit+0x32 (vmlinux) => ffffffffc13b3040 __vmx_vcpu_run+0x60 (vmlinux) tsc_msrs_test 18436 [007] 10897.962088270: branches: return ffffffffc13b30b6 __vmx_vcpu_run+0xd6 (vmlinux) => ffffffffc13b2f2e vmx_vcpu_enter_exit+0x4e (vmlinux) [SNIP] tsc_msrs_test 18436 [007] 10897.962089321: branches: call ffffffffc13b2ff5 __vmx_vcpu_run+0x15 (vmlinux) => ffffffffc13b2f50 vmx_update_host_rsp+0x0 (vmlinux) tsc_msrs_test 18436 [007] 10897.962089321: branches: return ffffffffc13b2f5d vmx_update_host_rsp+0xd (vmlinux) => ffffffffc13b2ffa __vmx_vcpu_run+0x1a (vmlinux) tsc_msrs_test 18436 [007] 10897.962089321: branches: call ffffffffc13b303b __vmx_vcpu_run+0x5b (vmlinux) => ffffffffc13b2f80 vmx_vmenter+0x0 (vmlinux) tsc_msrs_test 18436 [007] 10897.962089424: branches: vmentry ffffffffc13b2f82 vmx_vmenter+0x2 (vmlinux) => 0 [unknown] ([unknown]) [guest/18436] 18436 [007] 10897.962089424: branches: vmentry 0 [unknown] ([unknown]) => 40dba0 ucall+0x0 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) [guest/18436] 18436 [007] 10897.962089701: branches: jmp 40dc1b ucall+0x7b (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 40dc39 ucall+0x99 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) [guest/18436] 18436 [007] 10897.962089701: branches: jcc 40dc3c ucall+0x9c (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 40dc20 ucall+0x80 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) [guest/18436] 18436 [007] 10897.962089701: branches: jcc 40dc3c ucall+0x9c (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 40dc20 ucall+0x80 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) [guest/18436] 18436 [007] 10897.962089701: branches: jcc 40dc37 ucall+0x97 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 40dc50 ucall+0xb0 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) [guest/18436] 18436 [007] 10897.962089878: branches: vmexit 40dc55 ucall+0xb5 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 0 [unknown] ([unknown]) tsc_msrs_test 18436 [007] 10897.962089878: branches: vmexit 0 [unknown] ([unknown]) => ffffffffc13b2fa0 vmx_vmexit+0x0 (vmlinux) tsc_msrs_test 18436 [007] 10897.962089878: branches: jmp ffffffffc13b2fa0 vmx_vmexit+0x0 (vmlinux) => ffffffffc13b2fd2 vmx_vmexit+0x32 (vmlinux) tsc_msrs_test 18436 [007] 10897.962089887: branches: return ffffffffc13b2fd2 vmx_vmexit+0x32 (vmlinux) => ffffffffc13b3040 __vmx_vcpu_run+0x60 (vmlinux) tsc_msrs_test 18436 [007] 10897.962089901: branches: return ffffffffc13b30b6 __vmx_vcpu_run+0xd6 (vmlinux) => ffffffffc13b2f2e vmx_vcpu_enter_exit+0x4e (vmlinux) [SNIP] # perf kvm --guest-code --guest --host report -i perf.data --stdio | head -20 # To display the perf.data header info, please use --header/--header-only options. # # # Total Lost Samples: 0 # # Samples: 12 of event 'instructions' # Event count (approx.): 2274583 # # Children Self Command Shared Object Symbol # ........ ........ ............. .................... ........................................... # 54.70% 0.00% tsc_msrs_test [kernel.vmlinux] [k] entry_SYSCALL_64_after_hwframe | ---entry_SYSCALL_64_after_hwframe do_syscall_64 | |--29.44%--syscall_exit_to_user_mode | exit_to_user_mode_prepare | task_work_run | __fput For more information about Perf tools support for Intel® Processor Trace refer: https://perf.wiki.kernel.org/index.php/Perf_tools_support_for_Intel%C2%AE_Processor_Trace Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Ian Rogers <irogers@google.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Leo Yan <leo.yan@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: kvm@vger.kernel.org Link: https://lore.kernel.org/r/20220517131011.6117-7-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-05-17 21:10:11 +08:00
thread = ptq->unknown_guest_thread;
}
} else {
thread = ptq->thread;
if (!thread) {
if (cpumode != PERF_RECORD_MISC_KERNEL) {
intel_pt_log("ERROR: no thread\n");
perf addr_location: Add init/exit/copy functions struct addr_location holds references to multiple reference counted objects. Add init/exit functions to make maintenance of those more consistent with the rest of the code and to try to avoid leaks. Modification of thread reference counts isn't included in this change. Committer notes: I needed to initialize result to sample->ip to make sure is set to something, fixing a compile time error, mostly keeping the previous logic as build_alloc_func_list() already does debugging/error prints about what went wrong if it takes the 'goto out'. Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ali Saidi <alisaidi@amazon.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Brian Robbins <brianrob@linux.microsoft.com> Cc: Changbin Du <changbin.du@huawei.com> Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: Fangrui Song <maskray@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ivan Babrou <ivan@cloudflare.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: K Prateek Nayak <kprateek.nayak@amd.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Mike Leach <mike.leach@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Steinar H. Gunderson <sesse@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Wenyu Liu <liuwenyu7@huawei.com> Cc: Will Deacon <will@kernel.org> Cc: Yang Jihong <yangjihong1@huawei.com> Cc: Ye Xingchen <ye.xingchen@zte.com.cn> Cc: Yuan Can <yuancan@huawei.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230608232823.4027869-7-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-09 07:28:03 +08:00
ret = -EINVAL;
goto out_ret;
}
thread = ptq->pt->unknown_thread;
}
}
while (1) {
perf map: Add accessor for dso Later changes will add reference count checking for struct map, with dso being the most frequently accessed variable. Add an accessor so that the reference count check is only necessary in one place. Additional changes: - add a dso variable to avoid repeated map__dso calls. - in builtin-mem.c dump_raw_samples, code only partially tested for dso == NULL. Make the possibility of NULL consistent. - in thread.c thread__memcpy fix use of spaces and use tabs. Committer notes: Did missing conversions on these files: tools/perf/arch/powerpc/util/skip-callchain-idx.c tools/perf/arch/powerpc/util/sym-handling.c tools/perf/ui/browsers/hists.c tools/perf/ui/gtk/annotate.c tools/perf/util/cs-etm.c tools/perf/util/thread.c tools/perf/util/unwind-libunwind-local.c tools/perf/util/unwind-libunwind.c Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Darren Hart <dvhart@infradead.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Dmitriy Vyukov <dvyukov@google.com> Cc: Eric Dumazet <edumazet@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Hao Luo <haoluo@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Miaoqian Lin <linmq006@gmail.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Riccardo Mancini <rickyman7@gmail.com> Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com> Cc: Song Liu <song@kernel.org> Cc: Stephane Eranian <eranian@google.com> Cc: Stephen Brennan <stephen.s.brennan@oracle.com> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Yury Norov <yury.norov@gmail.com> Link: https://lore.kernel.org/r/20230320212248.1175731-2-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-03-21 05:22:35 +08:00
struct dso *dso;
if (!thread__find_map(thread, cpumode, *ip, &al) || !map__dso(al.map)) {
if (al.map)
intel_pt_log("ERROR: thread has no dso for %#" PRIx64 "\n", *ip);
else
intel_pt_log("ERROR: thread has no map for %#" PRIx64 "\n", *ip);
perf addr_location: Add init/exit/copy functions struct addr_location holds references to multiple reference counted objects. Add init/exit functions to make maintenance of those more consistent with the rest of the code and to try to avoid leaks. Modification of thread reference counts isn't included in this change. Committer notes: I needed to initialize result to sample->ip to make sure is set to something, fixing a compile time error, mostly keeping the previous logic as build_alloc_func_list() already does debugging/error prints about what went wrong if it takes the 'goto out'. Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ali Saidi <alisaidi@amazon.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Brian Robbins <brianrob@linux.microsoft.com> Cc: Changbin Du <changbin.du@huawei.com> Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: Fangrui Song <maskray@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ivan Babrou <ivan@cloudflare.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: K Prateek Nayak <kprateek.nayak@amd.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Mike Leach <mike.leach@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Steinar H. Gunderson <sesse@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Wenyu Liu <liuwenyu7@huawei.com> Cc: Will Deacon <will@kernel.org> Cc: Yang Jihong <yangjihong1@huawei.com> Cc: Ye Xingchen <ye.xingchen@zte.com.cn> Cc: Yuan Can <yuancan@huawei.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230608232823.4027869-7-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-09 07:28:03 +08:00
addr_location__exit(&al);
ret = -EINVAL;
goto out_ret;
}
perf map: Add accessor for dso Later changes will add reference count checking for struct map, with dso being the most frequently accessed variable. Add an accessor so that the reference count check is only necessary in one place. Additional changes: - add a dso variable to avoid repeated map__dso calls. - in builtin-mem.c dump_raw_samples, code only partially tested for dso == NULL. Make the possibility of NULL consistent. - in thread.c thread__memcpy fix use of spaces and use tabs. Committer notes: Did missing conversions on these files: tools/perf/arch/powerpc/util/skip-callchain-idx.c tools/perf/arch/powerpc/util/sym-handling.c tools/perf/ui/browsers/hists.c tools/perf/ui/gtk/annotate.c tools/perf/util/cs-etm.c tools/perf/util/thread.c tools/perf/util/unwind-libunwind-local.c tools/perf/util/unwind-libunwind.c Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Darren Hart <dvhart@infradead.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Dmitriy Vyukov <dvyukov@google.com> Cc: Eric Dumazet <edumazet@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Hao Luo <haoluo@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Miaoqian Lin <linmq006@gmail.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Riccardo Mancini <rickyman7@gmail.com> Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com> Cc: Song Liu <song@kernel.org> Cc: Stephane Eranian <eranian@google.com> Cc: Stephen Brennan <stephen.s.brennan@oracle.com> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Yury Norov <yury.norov@gmail.com> Link: https://lore.kernel.org/r/20230320212248.1175731-2-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-03-21 05:22:35 +08:00
dso = map__dso(al.map);
perf map: Add accessor for dso Later changes will add reference count checking for struct map, with dso being the most frequently accessed variable. Add an accessor so that the reference count check is only necessary in one place. Additional changes: - add a dso variable to avoid repeated map__dso calls. - in builtin-mem.c dump_raw_samples, code only partially tested for dso == NULL. Make the possibility of NULL consistent. - in thread.c thread__memcpy fix use of spaces and use tabs. Committer notes: Did missing conversions on these files: tools/perf/arch/powerpc/util/skip-callchain-idx.c tools/perf/arch/powerpc/util/sym-handling.c tools/perf/ui/browsers/hists.c tools/perf/ui/gtk/annotate.c tools/perf/util/cs-etm.c tools/perf/util/thread.c tools/perf/util/unwind-libunwind-local.c tools/perf/util/unwind-libunwind.c Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Darren Hart <dvhart@infradead.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Dmitriy Vyukov <dvyukov@google.com> Cc: Eric Dumazet <edumazet@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Hao Luo <haoluo@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Miaoqian Lin <linmq006@gmail.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Riccardo Mancini <rickyman7@gmail.com> Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com> Cc: Song Liu <song@kernel.org> Cc: Stephane Eranian <eranian@google.com> Cc: Stephen Brennan <stephen.s.brennan@oracle.com> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Yury Norov <yury.norov@gmail.com> Link: https://lore.kernel.org/r/20230320212248.1175731-2-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-03-21 05:22:35 +08:00
if (dso->data.status == DSO_DATA_STATUS_ERROR &&
perf addr_location: Add init/exit/copy functions struct addr_location holds references to multiple reference counted objects. Add init/exit functions to make maintenance of those more consistent with the rest of the code and to try to avoid leaks. Modification of thread reference counts isn't included in this change. Committer notes: I needed to initialize result to sample->ip to make sure is set to something, fixing a compile time error, mostly keeping the previous logic as build_alloc_func_list() already does debugging/error prints about what went wrong if it takes the 'goto out'. Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ali Saidi <alisaidi@amazon.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Brian Robbins <brianrob@linux.microsoft.com> Cc: Changbin Du <changbin.du@huawei.com> Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: Fangrui Song <maskray@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ivan Babrou <ivan@cloudflare.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: K Prateek Nayak <kprateek.nayak@amd.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Mike Leach <mike.leach@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Steinar H. Gunderson <sesse@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Wenyu Liu <liuwenyu7@huawei.com> Cc: Will Deacon <will@kernel.org> Cc: Yang Jihong <yangjihong1@huawei.com> Cc: Ye Xingchen <ye.xingchen@zte.com.cn> Cc: Yuan Can <yuancan@huawei.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230608232823.4027869-7-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-09 07:28:03 +08:00
dso__data_status_seen(dso, DSO_DATA_STATUS_SEEN_ITRACE)) {
ret = -ENOENT;
goto out_ret;
}
perf map: Add helper for ->map_ip() and ->unmap_ip() Later changes will add reference count checking for struct map, add a helper function to invoke the map_ip and unmap_ip function pointers. The helper allows the reference count check to be in fewer places. Committer notes: Add missing conversions to: tools/perf/util/map.c tools/perf/util/cs-etm.c tools/perf/util/annotate.c tools/perf/arch/powerpc/util/sym-handling.c tools/perf/arch/s390/annotate/instructions.c Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Darren Hart <dvhart@infradead.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Dmitriy Vyukov <dvyukov@google.com> Cc: Eric Dumazet <edumazet@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Hao Luo <haoluo@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Miaoqian Lin <linmq006@gmail.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Riccardo Mancini <rickyman7@gmail.com> Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com> Cc: Song Liu <song@kernel.org> Cc: Stephane Eranian <eranian@google.com> Cc: Stephen Brennan <stephen.s.brennan@oracle.com> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Yury Norov <yury.norov@gmail.com> Link: https://lore.kernel.org/r/20230404205954.2245628-2-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-04-05 04:59:44 +08:00
offset = map__map_ip(al.map, *ip);
if (!to_ip && one_map) {
struct intel_pt_cache_entry *e;
perf map: Add accessor for dso Later changes will add reference count checking for struct map, with dso being the most frequently accessed variable. Add an accessor so that the reference count check is only necessary in one place. Additional changes: - add a dso variable to avoid repeated map__dso calls. - in builtin-mem.c dump_raw_samples, code only partially tested for dso == NULL. Make the possibility of NULL consistent. - in thread.c thread__memcpy fix use of spaces and use tabs. Committer notes: Did missing conversions on these files: tools/perf/arch/powerpc/util/skip-callchain-idx.c tools/perf/arch/powerpc/util/sym-handling.c tools/perf/ui/browsers/hists.c tools/perf/ui/gtk/annotate.c tools/perf/util/cs-etm.c tools/perf/util/thread.c tools/perf/util/unwind-libunwind-local.c tools/perf/util/unwind-libunwind.c Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Darren Hart <dvhart@infradead.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Dmitriy Vyukov <dvyukov@google.com> Cc: Eric Dumazet <edumazet@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Hao Luo <haoluo@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Miaoqian Lin <linmq006@gmail.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Riccardo Mancini <rickyman7@gmail.com> Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com> Cc: Song Liu <song@kernel.org> Cc: Stephane Eranian <eranian@google.com> Cc: Stephen Brennan <stephen.s.brennan@oracle.com> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Yury Norov <yury.norov@gmail.com> Link: https://lore.kernel.org/r/20230320212248.1175731-2-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-03-21 05:22:35 +08:00
e = intel_pt_cache_lookup(dso, machine, offset);
if (e &&
(!max_insn_cnt || e->insn_cnt <= max_insn_cnt)) {
*insn_cnt_ptr = e->insn_cnt;
*ip += e->byte_cnt;
intel_pt_insn->op = e->op;
intel_pt_insn->branch = e->branch;
intel_pt_insn->emulated_ptwrite = e->emulated_ptwrite;
intel_pt_insn->length = e->length;
intel_pt_insn->rel = e->rel;
perf map: Add accessor for dso Later changes will add reference count checking for struct map, with dso being the most frequently accessed variable. Add an accessor so that the reference count check is only necessary in one place. Additional changes: - add a dso variable to avoid repeated map__dso calls. - in builtin-mem.c dump_raw_samples, code only partially tested for dso == NULL. Make the possibility of NULL consistent. - in thread.c thread__memcpy fix use of spaces and use tabs. Committer notes: Did missing conversions on these files: tools/perf/arch/powerpc/util/skip-callchain-idx.c tools/perf/arch/powerpc/util/sym-handling.c tools/perf/ui/browsers/hists.c tools/perf/ui/gtk/annotate.c tools/perf/util/cs-etm.c tools/perf/util/thread.c tools/perf/util/unwind-libunwind-local.c tools/perf/util/unwind-libunwind.c Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Darren Hart <dvhart@infradead.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Dmitriy Vyukov <dvyukov@google.com> Cc: Eric Dumazet <edumazet@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Hao Luo <haoluo@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Miaoqian Lin <linmq006@gmail.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Riccardo Mancini <rickyman7@gmail.com> Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com> Cc: Song Liu <song@kernel.org> Cc: Stephane Eranian <eranian@google.com> Cc: Stephen Brennan <stephen.s.brennan@oracle.com> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Yury Norov <yury.norov@gmail.com> Link: https://lore.kernel.org/r/20230320212248.1175731-2-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-03-21 05:22:35 +08:00
memcpy(intel_pt_insn->buf, e->insn, INTEL_PT_INSN_BUF_SZ);
intel_pt_log_insn_no_data(intel_pt_insn, *ip);
perf addr_location: Add init/exit/copy functions struct addr_location holds references to multiple reference counted objects. Add init/exit functions to make maintenance of those more consistent with the rest of the code and to try to avoid leaks. Modification of thread reference counts isn't included in this change. Committer notes: I needed to initialize result to sample->ip to make sure is set to something, fixing a compile time error, mostly keeping the previous logic as build_alloc_func_list() already does debugging/error prints about what went wrong if it takes the 'goto out'. Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ali Saidi <alisaidi@amazon.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Brian Robbins <brianrob@linux.microsoft.com> Cc: Changbin Du <changbin.du@huawei.com> Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: Fangrui Song <maskray@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ivan Babrou <ivan@cloudflare.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: K Prateek Nayak <kprateek.nayak@amd.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Mike Leach <mike.leach@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Steinar H. Gunderson <sesse@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Wenyu Liu <liuwenyu7@huawei.com> Cc: Will Deacon <will@kernel.org> Cc: Yang Jihong <yangjihong1@huawei.com> Cc: Ye Xingchen <ye.xingchen@zte.com.cn> Cc: Yuan Can <yuancan@huawei.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230608232823.4027869-7-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-09 07:28:03 +08:00
ret = 0;
goto out_ret;
}
}
start_offset = offset;
start_ip = *ip;
/* Load maps to ensure dso->is_64_bit has been updated */
map__load(al.map);
perf map: Add accessor for dso Later changes will add reference count checking for struct map, with dso being the most frequently accessed variable. Add an accessor so that the reference count check is only necessary in one place. Additional changes: - add a dso variable to avoid repeated map__dso calls. - in builtin-mem.c dump_raw_samples, code only partially tested for dso == NULL. Make the possibility of NULL consistent. - in thread.c thread__memcpy fix use of spaces and use tabs. Committer notes: Did missing conversions on these files: tools/perf/arch/powerpc/util/skip-callchain-idx.c tools/perf/arch/powerpc/util/sym-handling.c tools/perf/ui/browsers/hists.c tools/perf/ui/gtk/annotate.c tools/perf/util/cs-etm.c tools/perf/util/thread.c tools/perf/util/unwind-libunwind-local.c tools/perf/util/unwind-libunwind.c Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Darren Hart <dvhart@infradead.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Dmitriy Vyukov <dvyukov@google.com> Cc: Eric Dumazet <edumazet@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Hao Luo <haoluo@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Miaoqian Lin <linmq006@gmail.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Riccardo Mancini <rickyman7@gmail.com> Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com> Cc: Song Liu <song@kernel.org> Cc: Stephane Eranian <eranian@google.com> Cc: Stephen Brennan <stephen.s.brennan@oracle.com> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Yury Norov <yury.norov@gmail.com> Link: https://lore.kernel.org/r/20230320212248.1175731-2-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-03-21 05:22:35 +08:00
x86_64 = dso->is_64_bit;
while (1) {
perf map: Add accessor for dso Later changes will add reference count checking for struct map, with dso being the most frequently accessed variable. Add an accessor so that the reference count check is only necessary in one place. Additional changes: - add a dso variable to avoid repeated map__dso calls. - in builtin-mem.c dump_raw_samples, code only partially tested for dso == NULL. Make the possibility of NULL consistent. - in thread.c thread__memcpy fix use of spaces and use tabs. Committer notes: Did missing conversions on these files: tools/perf/arch/powerpc/util/skip-callchain-idx.c tools/perf/arch/powerpc/util/sym-handling.c tools/perf/ui/browsers/hists.c tools/perf/ui/gtk/annotate.c tools/perf/util/cs-etm.c tools/perf/util/thread.c tools/perf/util/unwind-libunwind-local.c tools/perf/util/unwind-libunwind.c Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Darren Hart <dvhart@infradead.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Dmitriy Vyukov <dvyukov@google.com> Cc: Eric Dumazet <edumazet@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Hao Luo <haoluo@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Miaoqian Lin <linmq006@gmail.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Riccardo Mancini <rickyman7@gmail.com> Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com> Cc: Song Liu <song@kernel.org> Cc: Stephane Eranian <eranian@google.com> Cc: Stephen Brennan <stephen.s.brennan@oracle.com> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Yury Norov <yury.norov@gmail.com> Link: https://lore.kernel.org/r/20230320212248.1175731-2-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-03-21 05:22:35 +08:00
len = dso__data_read_offset(dso, machine,
offset, buf,
INTEL_PT_INSN_BUF_SZ);
if (len <= 0) {
intel_pt_log("ERROR: failed to read at offset %#" PRIx64 " ",
offset);
if (intel_pt_enable_logging)
perf map: Add accessor for dso Later changes will add reference count checking for struct map, with dso being the most frequently accessed variable. Add an accessor so that the reference count check is only necessary in one place. Additional changes: - add a dso variable to avoid repeated map__dso calls. - in builtin-mem.c dump_raw_samples, code only partially tested for dso == NULL. Make the possibility of NULL consistent. - in thread.c thread__memcpy fix use of spaces and use tabs. Committer notes: Did missing conversions on these files: tools/perf/arch/powerpc/util/skip-callchain-idx.c tools/perf/arch/powerpc/util/sym-handling.c tools/perf/ui/browsers/hists.c tools/perf/ui/gtk/annotate.c tools/perf/util/cs-etm.c tools/perf/util/thread.c tools/perf/util/unwind-libunwind-local.c tools/perf/util/unwind-libunwind.c Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Darren Hart <dvhart@infradead.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Dmitriy Vyukov <dvyukov@google.com> Cc: Eric Dumazet <edumazet@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Hao Luo <haoluo@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Miaoqian Lin <linmq006@gmail.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Riccardo Mancini <rickyman7@gmail.com> Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com> Cc: Song Liu <song@kernel.org> Cc: Stephane Eranian <eranian@google.com> Cc: Stephen Brennan <stephen.s.brennan@oracle.com> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Yury Norov <yury.norov@gmail.com> Link: https://lore.kernel.org/r/20230320212248.1175731-2-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-03-21 05:22:35 +08:00
dso__fprintf(dso, intel_pt_log_fp());
perf addr_location: Add init/exit/copy functions struct addr_location holds references to multiple reference counted objects. Add init/exit functions to make maintenance of those more consistent with the rest of the code and to try to avoid leaks. Modification of thread reference counts isn't included in this change. Committer notes: I needed to initialize result to sample->ip to make sure is set to something, fixing a compile time error, mostly keeping the previous logic as build_alloc_func_list() already does debugging/error prints about what went wrong if it takes the 'goto out'. Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ali Saidi <alisaidi@amazon.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Brian Robbins <brianrob@linux.microsoft.com> Cc: Changbin Du <changbin.du@huawei.com> Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: Fangrui Song <maskray@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ivan Babrou <ivan@cloudflare.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: K Prateek Nayak <kprateek.nayak@amd.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Mike Leach <mike.leach@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Steinar H. Gunderson <sesse@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Wenyu Liu <liuwenyu7@huawei.com> Cc: Will Deacon <will@kernel.org> Cc: Yang Jihong <yangjihong1@huawei.com> Cc: Ye Xingchen <ye.xingchen@zte.com.cn> Cc: Yuan Can <yuancan@huawei.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230608232823.4027869-7-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-09 07:28:03 +08:00
ret = -EINVAL;
goto out_ret;
}
perf addr_location: Add init/exit/copy functions struct addr_location holds references to multiple reference counted objects. Add init/exit functions to make maintenance of those more consistent with the rest of the code and to try to avoid leaks. Modification of thread reference counts isn't included in this change. Committer notes: I needed to initialize result to sample->ip to make sure is set to something, fixing a compile time error, mostly keeping the previous logic as build_alloc_func_list() already does debugging/error prints about what went wrong if it takes the 'goto out'. Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ali Saidi <alisaidi@amazon.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Brian Robbins <brianrob@linux.microsoft.com> Cc: Changbin Du <changbin.du@huawei.com> Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: Fangrui Song <maskray@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ivan Babrou <ivan@cloudflare.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: K Prateek Nayak <kprateek.nayak@amd.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Mike Leach <mike.leach@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Steinar H. Gunderson <sesse@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Wenyu Liu <liuwenyu7@huawei.com> Cc: Will Deacon <will@kernel.org> Cc: Yang Jihong <yangjihong1@huawei.com> Cc: Ye Xingchen <ye.xingchen@zte.com.cn> Cc: Yuan Can <yuancan@huawei.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230608232823.4027869-7-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-09 07:28:03 +08:00
if (intel_pt_get_insn(buf, len, x86_64, intel_pt_insn)) {
ret = -EINVAL;
goto out_ret;
}
intel_pt_log_insn(intel_pt_insn, *ip);
insn_cnt += 1;
if (intel_pt_insn->branch != INTEL_PT_BR_NO_BRANCH) {
bool eptw;
u64 offs;
if (!intel_pt_jmp_16(intel_pt_insn))
goto out;
/* Check for emulated ptwrite */
offs = offset + intel_pt_insn->length;
perf map: Add accessor for dso Later changes will add reference count checking for struct map, with dso being the most frequently accessed variable. Add an accessor so that the reference count check is only necessary in one place. Additional changes: - add a dso variable to avoid repeated map__dso calls. - in builtin-mem.c dump_raw_samples, code only partially tested for dso == NULL. Make the possibility of NULL consistent. - in thread.c thread__memcpy fix use of spaces and use tabs. Committer notes: Did missing conversions on these files: tools/perf/arch/powerpc/util/skip-callchain-idx.c tools/perf/arch/powerpc/util/sym-handling.c tools/perf/ui/browsers/hists.c tools/perf/ui/gtk/annotate.c tools/perf/util/cs-etm.c tools/perf/util/thread.c tools/perf/util/unwind-libunwind-local.c tools/perf/util/unwind-libunwind.c Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Darren Hart <dvhart@infradead.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Dmitriy Vyukov <dvyukov@google.com> Cc: Eric Dumazet <edumazet@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Hao Luo <haoluo@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Miaoqian Lin <linmq006@gmail.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Riccardo Mancini <rickyman7@gmail.com> Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com> Cc: Song Liu <song@kernel.org> Cc: Stephane Eranian <eranian@google.com> Cc: Stephen Brennan <stephen.s.brennan@oracle.com> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Yury Norov <yury.norov@gmail.com> Link: https://lore.kernel.org/r/20230320212248.1175731-2-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-03-21 05:22:35 +08:00
eptw = intel_pt_emulated_ptwrite(dso, machine, offs);
intel_pt_insn->emulated_ptwrite = eptw;
goto out;
}
if (max_insn_cnt && insn_cnt >= max_insn_cnt)
goto out_no_cache;
*ip += intel_pt_insn->length;
perf intel-pt: Fix sample instruction bytes The decoder reports the current instruction if it was decoded. In some cases the current instruction is not decoded, in which case the instruction bytes length must be set to zero. Ensure that is always done. Note perf script can anyway get the instruction bytes for any samples where they are not present. Also note, that there is a redundant "ptq->insn_len = 0" statement which is not removed until a subsequent patch in order to make this patch apply cleanly to stable branches. Example: A machne that supports TSX is required. It will have flag "rtm". Kernel parameter tsx=on may be required. # for w in `cat /proc/cpuinfo | grep -m1 flags `;do echo $w | grep rtm ; done rtm Test program: #include <stdio.h> #include <immintrin.h> int main() { int x = 0; if (_xbegin() == _XBEGIN_STARTED) { x = 1; _xabort(1); } else { printf("x = %d\n", x); } return 0; } Compile with -mrtm i.e. gcc -Wall -Wextra -mrtm xabort.c -o xabort Record: perf record -e intel_pt/cyc/u --filter 'filter main @ ./xabort' ./xabort Before: # perf script --itrace=xe -F+flags,+insn,-period --xed --ns xabort 1478 [007] 92161.431348581: transactions: x 400b81 main+0x14 (/root/xabort) mov $0xffffffff, %eax xabort 1478 [007] 92161.431348624: transactions: tx abrt 400b93 main+0x26 (/root/xabort) mov $0xffffffff, %eax After: # perf script --itrace=xe -F+flags,+insn,-period --xed --ns xabort 1478 [007] 92161.431348581: transactions: x 400b81 main+0x14 (/root/xabort) xbegin 0x6 xabort 1478 [007] 92161.431348624: transactions: tx abrt 400b93 main+0x26 (/root/xabort) xabort $0x1 Fixes: faaa87680b25d ("perf intel-pt/bts: Report instruction bytes and length in sample") Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: stable@vger.kernel.org Link: http://lore.kernel.org/lkml/20210519074515.9262-3-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2021-05-19 15:45:14 +08:00
if (to_ip && *ip == to_ip) {
intel_pt_insn->length = 0;
goto out_no_cache;
perf intel-pt: Fix sample instruction bytes The decoder reports the current instruction if it was decoded. In some cases the current instruction is not decoded, in which case the instruction bytes length must be set to zero. Ensure that is always done. Note perf script can anyway get the instruction bytes for any samples where they are not present. Also note, that there is a redundant "ptq->insn_len = 0" statement which is not removed until a subsequent patch in order to make this patch apply cleanly to stable branches. Example: A machne that supports TSX is required. It will have flag "rtm". Kernel parameter tsx=on may be required. # for w in `cat /proc/cpuinfo | grep -m1 flags `;do echo $w | grep rtm ; done rtm Test program: #include <stdio.h> #include <immintrin.h> int main() { int x = 0; if (_xbegin() == _XBEGIN_STARTED) { x = 1; _xabort(1); } else { printf("x = %d\n", x); } return 0; } Compile with -mrtm i.e. gcc -Wall -Wextra -mrtm xabort.c -o xabort Record: perf record -e intel_pt/cyc/u --filter 'filter main @ ./xabort' ./xabort Before: # perf script --itrace=xe -F+flags,+insn,-period --xed --ns xabort 1478 [007] 92161.431348581: transactions: x 400b81 main+0x14 (/root/xabort) mov $0xffffffff, %eax xabort 1478 [007] 92161.431348624: transactions: tx abrt 400b93 main+0x26 (/root/xabort) mov $0xffffffff, %eax After: # perf script --itrace=xe -F+flags,+insn,-period --xed --ns xabort 1478 [007] 92161.431348581: transactions: x 400b81 main+0x14 (/root/xabort) xbegin 0x6 xabort 1478 [007] 92161.431348624: transactions: tx abrt 400b93 main+0x26 (/root/xabort) xabort $0x1 Fixes: faaa87680b25d ("perf intel-pt/bts: Report instruction bytes and length in sample") Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: stable@vger.kernel.org Link: http://lore.kernel.org/lkml/20210519074515.9262-3-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2021-05-19 15:45:14 +08:00
}
perf map: Add accessor for start and end Later changes will add reference count checking for struct map, start and end are frequently accessed variables. Add an accessor so that the reference count check is only necessary in one place. Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Darren Hart <dvhart@infradead.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Dmitriy Vyukov <dvyukov@google.com> Cc: Eric Dumazet <edumazet@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Hao Luo <haoluo@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Miaoqian Lin <linmq006@gmail.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Riccardo Mancini <rickyman7@gmail.com> Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com> Cc: Song Liu <song@kernel.org> Cc: Stephane Eranian <eranian@google.com> Cc: Stephen Brennan <stephen.s.brennan@oracle.com> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Yury Norov <yury.norov@gmail.com> Link: https://lore.kernel.org/r/20230320212248.1175731-2-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-03-21 05:22:36 +08:00
if (*ip >= map__end(al.map))
break;
offset += intel_pt_insn->length;
}
one_map = false;
}
out:
*insn_cnt_ptr = insn_cnt;
if (!one_map)
goto out_no_cache;
/*
* Didn't lookup in the 'to_ip' case, so do it now to prevent duplicate
* entries.
*/
if (to_ip) {
struct intel_pt_cache_entry *e;
perf map: Add accessor for dso Later changes will add reference count checking for struct map, with dso being the most frequently accessed variable. Add an accessor so that the reference count check is only necessary in one place. Additional changes: - add a dso variable to avoid repeated map__dso calls. - in builtin-mem.c dump_raw_samples, code only partially tested for dso == NULL. Make the possibility of NULL consistent. - in thread.c thread__memcpy fix use of spaces and use tabs. Committer notes: Did missing conversions on these files: tools/perf/arch/powerpc/util/skip-callchain-idx.c tools/perf/arch/powerpc/util/sym-handling.c tools/perf/ui/browsers/hists.c tools/perf/ui/gtk/annotate.c tools/perf/util/cs-etm.c tools/perf/util/thread.c tools/perf/util/unwind-libunwind-local.c tools/perf/util/unwind-libunwind.c Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Darren Hart <dvhart@infradead.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Dmitriy Vyukov <dvyukov@google.com> Cc: Eric Dumazet <edumazet@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Hao Luo <haoluo@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Miaoqian Lin <linmq006@gmail.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Riccardo Mancini <rickyman7@gmail.com> Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com> Cc: Song Liu <song@kernel.org> Cc: Stephane Eranian <eranian@google.com> Cc: Stephen Brennan <stephen.s.brennan@oracle.com> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Yury Norov <yury.norov@gmail.com> Link: https://lore.kernel.org/r/20230320212248.1175731-2-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-03-21 05:22:35 +08:00
e = intel_pt_cache_lookup(map__dso(al.map), machine, start_offset);
if (e)
perf addr_location: Add init/exit/copy functions struct addr_location holds references to multiple reference counted objects. Add init/exit functions to make maintenance of those more consistent with the rest of the code and to try to avoid leaks. Modification of thread reference counts isn't included in this change. Committer notes: I needed to initialize result to sample->ip to make sure is set to something, fixing a compile time error, mostly keeping the previous logic as build_alloc_func_list() already does debugging/error prints about what went wrong if it takes the 'goto out'. Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ali Saidi <alisaidi@amazon.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Brian Robbins <brianrob@linux.microsoft.com> Cc: Changbin Du <changbin.du@huawei.com> Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: Fangrui Song <maskray@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ivan Babrou <ivan@cloudflare.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: K Prateek Nayak <kprateek.nayak@amd.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Mike Leach <mike.leach@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Steinar H. Gunderson <sesse@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Wenyu Liu <liuwenyu7@huawei.com> Cc: Will Deacon <will@kernel.org> Cc: Yang Jihong <yangjihong1@huawei.com> Cc: Ye Xingchen <ye.xingchen@zte.com.cn> Cc: Yuan Can <yuancan@huawei.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230608232823.4027869-7-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-09 07:28:03 +08:00
goto out_ret;
}
/* Ignore cache errors */
perf map: Add accessor for dso Later changes will add reference count checking for struct map, with dso being the most frequently accessed variable. Add an accessor so that the reference count check is only necessary in one place. Additional changes: - add a dso variable to avoid repeated map__dso calls. - in builtin-mem.c dump_raw_samples, code only partially tested for dso == NULL. Make the possibility of NULL consistent. - in thread.c thread__memcpy fix use of spaces and use tabs. Committer notes: Did missing conversions on these files: tools/perf/arch/powerpc/util/skip-callchain-idx.c tools/perf/arch/powerpc/util/sym-handling.c tools/perf/ui/browsers/hists.c tools/perf/ui/gtk/annotate.c tools/perf/util/cs-etm.c tools/perf/util/thread.c tools/perf/util/unwind-libunwind-local.c tools/perf/util/unwind-libunwind.c Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Darren Hart <dvhart@infradead.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Dmitriy Vyukov <dvyukov@google.com> Cc: Eric Dumazet <edumazet@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Hao Luo <haoluo@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Miaoqian Lin <linmq006@gmail.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Riccardo Mancini <rickyman7@gmail.com> Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com> Cc: Song Liu <song@kernel.org> Cc: Stephane Eranian <eranian@google.com> Cc: Stephen Brennan <stephen.s.brennan@oracle.com> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Yury Norov <yury.norov@gmail.com> Link: https://lore.kernel.org/r/20230320212248.1175731-2-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-03-21 05:22:35 +08:00
intel_pt_cache_add(map__dso(al.map), machine, start_offset, insn_cnt,
*ip - start_ip, intel_pt_insn);
perf addr_location: Add init/exit/copy functions struct addr_location holds references to multiple reference counted objects. Add init/exit functions to make maintenance of those more consistent with the rest of the code and to try to avoid leaks. Modification of thread reference counts isn't included in this change. Committer notes: I needed to initialize result to sample->ip to make sure is set to something, fixing a compile time error, mostly keeping the previous logic as build_alloc_func_list() already does debugging/error prints about what went wrong if it takes the 'goto out'. Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ali Saidi <alisaidi@amazon.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Brian Robbins <brianrob@linux.microsoft.com> Cc: Changbin Du <changbin.du@huawei.com> Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: Fangrui Song <maskray@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ivan Babrou <ivan@cloudflare.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: K Prateek Nayak <kprateek.nayak@amd.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Mike Leach <mike.leach@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Steinar H. Gunderson <sesse@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Wenyu Liu <liuwenyu7@huawei.com> Cc: Will Deacon <will@kernel.org> Cc: Yang Jihong <yangjihong1@huawei.com> Cc: Ye Xingchen <ye.xingchen@zte.com.cn> Cc: Yuan Can <yuancan@huawei.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230608232823.4027869-7-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-09 07:28:03 +08:00
out_ret:
addr_location__exit(&al);
return ret;
out_no_cache:
*insn_cnt_ptr = insn_cnt;
perf addr_location: Add init/exit/copy functions struct addr_location holds references to multiple reference counted objects. Add init/exit functions to make maintenance of those more consistent with the rest of the code and to try to avoid leaks. Modification of thread reference counts isn't included in this change. Committer notes: I needed to initialize result to sample->ip to make sure is set to something, fixing a compile time error, mostly keeping the previous logic as build_alloc_func_list() already does debugging/error prints about what went wrong if it takes the 'goto out'. Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ali Saidi <alisaidi@amazon.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Brian Robbins <brianrob@linux.microsoft.com> Cc: Changbin Du <changbin.du@huawei.com> Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: Fangrui Song <maskray@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ivan Babrou <ivan@cloudflare.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: K Prateek Nayak <kprateek.nayak@amd.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Mike Leach <mike.leach@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Steinar H. Gunderson <sesse@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Wenyu Liu <liuwenyu7@huawei.com> Cc: Will Deacon <will@kernel.org> Cc: Yang Jihong <yangjihong1@huawei.com> Cc: Ye Xingchen <ye.xingchen@zte.com.cn> Cc: Yuan Can <yuancan@huawei.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230608232823.4027869-7-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-09 07:28:03 +08:00
addr_location__exit(&al);
return 0;
}
static bool intel_pt_match_pgd_ip(struct intel_pt *pt, uint64_t ip,
uint64_t offset, const char *filename)
{
struct addr_filter *filt;
bool have_filter = false;
bool hit_tracestop = false;
bool hit_filter = false;
list_for_each_entry(filt, &pt->filts.head, list) {
if (filt->start)
have_filter = true;
if ((filename && !filt->filename) ||
(!filename && filt->filename) ||
(filename && strcmp(filename, filt->filename)))
continue;
if (!(offset >= filt->addr && offset < filt->addr + filt->size))
continue;
intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s hit filter: %s offset %#"PRIx64" size %#"PRIx64"\n",
ip, offset, filename ? filename : "[kernel]",
filt->start ? "filter" : "stop",
filt->addr, filt->size);
if (filt->start)
hit_filter = true;
else
hit_tracestop = true;
}
if (!hit_tracestop && !hit_filter)
intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s is not in a filter region\n",
ip, offset, filename ? filename : "[kernel]");
return hit_tracestop || (have_filter && !hit_filter);
}
static int __intel_pt_pgd_ip(uint64_t ip, void *data)
{
struct intel_pt_queue *ptq = data;
struct thread *thread;
struct addr_location al;
u8 cpumode;
u64 offset;
perf addr_location: Add init/exit/copy functions struct addr_location holds references to multiple reference counted objects. Add init/exit functions to make maintenance of those more consistent with the rest of the code and to try to avoid leaks. Modification of thread reference counts isn't included in this change. Committer notes: I needed to initialize result to sample->ip to make sure is set to something, fixing a compile time error, mostly keeping the previous logic as build_alloc_func_list() already does debugging/error prints about what went wrong if it takes the 'goto out'. Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ali Saidi <alisaidi@amazon.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Brian Robbins <brianrob@linux.microsoft.com> Cc: Changbin Du <changbin.du@huawei.com> Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: Fangrui Song <maskray@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ivan Babrou <ivan@cloudflare.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: K Prateek Nayak <kprateek.nayak@amd.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Mike Leach <mike.leach@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Steinar H. Gunderson <sesse@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Wenyu Liu <liuwenyu7@huawei.com> Cc: Will Deacon <will@kernel.org> Cc: Yang Jihong <yangjihong1@huawei.com> Cc: Ye Xingchen <ye.xingchen@zte.com.cn> Cc: Yuan Can <yuancan@huawei.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230608232823.4027869-7-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-09 07:28:03 +08:00
int res;
if (ptq->state->to_nr) {
if (intel_pt_guest_kernel_ip(ip))
return intel_pt_match_pgd_ip(ptq->pt, ip, ip, NULL);
/* No support for decoding guest user space */
return -EINVAL;
} else if (ip >= ptq->pt->kernel_start) {
return intel_pt_match_pgd_ip(ptq->pt, ip, ip, NULL);
}
cpumode = PERF_RECORD_MISC_USER;
thread = ptq->thread;
if (!thread)
return -EINVAL;
perf addr_location: Add init/exit/copy functions struct addr_location holds references to multiple reference counted objects. Add init/exit functions to make maintenance of those more consistent with the rest of the code and to try to avoid leaks. Modification of thread reference counts isn't included in this change. Committer notes: I needed to initialize result to sample->ip to make sure is set to something, fixing a compile time error, mostly keeping the previous logic as build_alloc_func_list() already does debugging/error prints about what went wrong if it takes the 'goto out'. Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ali Saidi <alisaidi@amazon.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Brian Robbins <brianrob@linux.microsoft.com> Cc: Changbin Du <changbin.du@huawei.com> Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: Fangrui Song <maskray@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ivan Babrou <ivan@cloudflare.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: K Prateek Nayak <kprateek.nayak@amd.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Mike Leach <mike.leach@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Steinar H. Gunderson <sesse@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Wenyu Liu <liuwenyu7@huawei.com> Cc: Will Deacon <will@kernel.org> Cc: Yang Jihong <yangjihong1@huawei.com> Cc: Ye Xingchen <ye.xingchen@zte.com.cn> Cc: Yuan Can <yuancan@huawei.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230608232823.4027869-7-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-09 07:28:03 +08:00
addr_location__init(&al);
perf map: Add accessor for dso Later changes will add reference count checking for struct map, with dso being the most frequently accessed variable. Add an accessor so that the reference count check is only necessary in one place. Additional changes: - add a dso variable to avoid repeated map__dso calls. - in builtin-mem.c dump_raw_samples, code only partially tested for dso == NULL. Make the possibility of NULL consistent. - in thread.c thread__memcpy fix use of spaces and use tabs. Committer notes: Did missing conversions on these files: tools/perf/arch/powerpc/util/skip-callchain-idx.c tools/perf/arch/powerpc/util/sym-handling.c tools/perf/ui/browsers/hists.c tools/perf/ui/gtk/annotate.c tools/perf/util/cs-etm.c tools/perf/util/thread.c tools/perf/util/unwind-libunwind-local.c tools/perf/util/unwind-libunwind.c Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Darren Hart <dvhart@infradead.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Dmitriy Vyukov <dvyukov@google.com> Cc: Eric Dumazet <edumazet@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Hao Luo <haoluo@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Miaoqian Lin <linmq006@gmail.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Riccardo Mancini <rickyman7@gmail.com> Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com> Cc: Song Liu <song@kernel.org> Cc: Stephane Eranian <eranian@google.com> Cc: Stephen Brennan <stephen.s.brennan@oracle.com> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Yury Norov <yury.norov@gmail.com> Link: https://lore.kernel.org/r/20230320212248.1175731-2-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-03-21 05:22:35 +08:00
if (!thread__find_map(thread, cpumode, ip, &al) || !map__dso(al.map))
return -EINVAL;
perf map: Add helper for ->map_ip() and ->unmap_ip() Later changes will add reference count checking for struct map, add a helper function to invoke the map_ip and unmap_ip function pointers. The helper allows the reference count check to be in fewer places. Committer notes: Add missing conversions to: tools/perf/util/map.c tools/perf/util/cs-etm.c tools/perf/util/annotate.c tools/perf/arch/powerpc/util/sym-handling.c tools/perf/arch/s390/annotate/instructions.c Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Darren Hart <dvhart@infradead.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Dmitriy Vyukov <dvyukov@google.com> Cc: Eric Dumazet <edumazet@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Hao Luo <haoluo@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Miaoqian Lin <linmq006@gmail.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Riccardo Mancini <rickyman7@gmail.com> Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com> Cc: Song Liu <song@kernel.org> Cc: Stephane Eranian <eranian@google.com> Cc: Stephen Brennan <stephen.s.brennan@oracle.com> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Yury Norov <yury.norov@gmail.com> Link: https://lore.kernel.org/r/20230404205954.2245628-2-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-04-05 04:59:44 +08:00
offset = map__map_ip(al.map, ip);
perf addr_location: Add init/exit/copy functions struct addr_location holds references to multiple reference counted objects. Add init/exit functions to make maintenance of those more consistent with the rest of the code and to try to avoid leaks. Modification of thread reference counts isn't included in this change. Committer notes: I needed to initialize result to sample->ip to make sure is set to something, fixing a compile time error, mostly keeping the previous logic as build_alloc_func_list() already does debugging/error prints about what went wrong if it takes the 'goto out'. Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ali Saidi <alisaidi@amazon.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Brian Robbins <brianrob@linux.microsoft.com> Cc: Changbin Du <changbin.du@huawei.com> Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: Fangrui Song <maskray@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ivan Babrou <ivan@cloudflare.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: K Prateek Nayak <kprateek.nayak@amd.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Mike Leach <mike.leach@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Steinar H. Gunderson <sesse@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Wenyu Liu <liuwenyu7@huawei.com> Cc: Will Deacon <will@kernel.org> Cc: Yang Jihong <yangjihong1@huawei.com> Cc: Ye Xingchen <ye.xingchen@zte.com.cn> Cc: Yuan Can <yuancan@huawei.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230608232823.4027869-7-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-09 07:28:03 +08:00
res = intel_pt_match_pgd_ip(ptq->pt, ip, offset, map__dso(al.map)->long_name);
addr_location__exit(&al);
return res;
}
static bool intel_pt_pgd_ip(uint64_t ip, void *data)
{
return __intel_pt_pgd_ip(ip, data) > 0;
}
static bool intel_pt_get_config(struct intel_pt *pt,
struct perf_event_attr *attr, u64 *config)
{
if (attr->type == pt->pmu_type) {
if (config)
*config = attr->config;
return true;
}
return false;
}
static bool intel_pt_exclude_kernel(struct intel_pt *pt)
{
struct evsel *evsel;
evlist__for_each_entry(pt->session->evlist, evsel) {
libperf: Move perf_event_attr field from perf's evsel to libperf's perf_evsel Move the perf_event_attr struct fron 'struct evsel' to 'struct perf_evsel'. Committer notes: Fixed up these: tools/perf/arch/arm/util/auxtrace.c tools/perf/arch/arm/util/cs-etm.c tools/perf/arch/arm64/util/arm-spe.c tools/perf/arch/s390/util/auxtrace.c tools/perf/util/cs-etm.c Also cc1: warnings being treated as errors tests/sample-parsing.c: In function 'do_test': tests/sample-parsing.c:162: error: missing initializer tests/sample-parsing.c:162: error: (near initialization for 'evsel.core.cpus') struct evsel evsel = { .needs_swap = false, - .core.attr = { - .sample_type = sample_type, - .read_format = read_format, + .core = { + . attr = { + .sample_type = sample_type, + .read_format = read_format, + }, [perfbuilder@a70e4eeb5549 /]$ gcc --version |& head -1 gcc (GCC) 4.4.7 Also we don't need to include perf_event.h in tools/perf/lib/include/perf/evsel.h, forward declaring 'struct perf_event_attr' is enough. And this even fixes the build in some systems where things are used somewhere down the include path from perf_event.h without defining __always_inline. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Budankov <alexey.budankov@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Michael Petlan <mpetlan@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20190721112506.12306-43-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-07-21 19:24:29 +08:00
if (intel_pt_get_config(pt, &evsel->core.attr, NULL) &&
!evsel->core.attr.exclude_kernel)
return false;
}
return true;
}
static bool intel_pt_return_compression(struct intel_pt *pt)
{
struct evsel *evsel;
u64 config;
if (!pt->noretcomp_bit)
return true;
evlist__for_each_entry(pt->session->evlist, evsel) {
libperf: Move perf_event_attr field from perf's evsel to libperf's perf_evsel Move the perf_event_attr struct fron 'struct evsel' to 'struct perf_evsel'. Committer notes: Fixed up these: tools/perf/arch/arm/util/auxtrace.c tools/perf/arch/arm/util/cs-etm.c tools/perf/arch/arm64/util/arm-spe.c tools/perf/arch/s390/util/auxtrace.c tools/perf/util/cs-etm.c Also cc1: warnings being treated as errors tests/sample-parsing.c: In function 'do_test': tests/sample-parsing.c:162: error: missing initializer tests/sample-parsing.c:162: error: (near initialization for 'evsel.core.cpus') struct evsel evsel = { .needs_swap = false, - .core.attr = { - .sample_type = sample_type, - .read_format = read_format, + .core = { + . attr = { + .sample_type = sample_type, + .read_format = read_format, + }, [perfbuilder@a70e4eeb5549 /]$ gcc --version |& head -1 gcc (GCC) 4.4.7 Also we don't need to include perf_event.h in tools/perf/lib/include/perf/evsel.h, forward declaring 'struct perf_event_attr' is enough. And this even fixes the build in some systems where things are used somewhere down the include path from perf_event.h without defining __always_inline. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Budankov <alexey.budankov@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Michael Petlan <mpetlan@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20190721112506.12306-43-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-07-21 19:24:29 +08:00
if (intel_pt_get_config(pt, &evsel->core.attr, &config) &&
(config & pt->noretcomp_bit))
return false;
}
return true;
}
static bool intel_pt_branch_enable(struct intel_pt *pt)
{
struct evsel *evsel;
u64 config;
evlist__for_each_entry(pt->session->evlist, evsel) {
libperf: Move perf_event_attr field from perf's evsel to libperf's perf_evsel Move the perf_event_attr struct fron 'struct evsel' to 'struct perf_evsel'. Committer notes: Fixed up these: tools/perf/arch/arm/util/auxtrace.c tools/perf/arch/arm/util/cs-etm.c tools/perf/arch/arm64/util/arm-spe.c tools/perf/arch/s390/util/auxtrace.c tools/perf/util/cs-etm.c Also cc1: warnings being treated as errors tests/sample-parsing.c: In function 'do_test': tests/sample-parsing.c:162: error: missing initializer tests/sample-parsing.c:162: error: (near initialization for 'evsel.core.cpus') struct evsel evsel = { .needs_swap = false, - .core.attr = { - .sample_type = sample_type, - .read_format = read_format, + .core = { + . attr = { + .sample_type = sample_type, + .read_format = read_format, + }, [perfbuilder@a70e4eeb5549 /]$ gcc --version |& head -1 gcc (GCC) 4.4.7 Also we don't need to include perf_event.h in tools/perf/lib/include/perf/evsel.h, forward declaring 'struct perf_event_attr' is enough. And this even fixes the build in some systems where things are used somewhere down the include path from perf_event.h without defining __always_inline. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Budankov <alexey.budankov@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Michael Petlan <mpetlan@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20190721112506.12306-43-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-07-21 19:24:29 +08:00
if (intel_pt_get_config(pt, &evsel->core.attr, &config) &&
(config & INTEL_PT_CFG_PASS_THRU) &&
!(config & INTEL_PT_CFG_BRANCH_EN))
return false;
}
return true;
}
static bool intel_pt_disabled_tnt(struct intel_pt *pt)
{
struct evsel *evsel;
u64 config;
evlist__for_each_entry(pt->session->evlist, evsel) {
if (intel_pt_get_config(pt, &evsel->core.attr, &config) &&
config & INTEL_PT_CFG_TNT_DIS)
return true;
}
return false;
}
static unsigned int intel_pt_mtc_period(struct intel_pt *pt)
{
struct evsel *evsel;
unsigned int shift;
u64 config;
if (!pt->mtc_freq_bits)
return 0;
for (shift = 0, config = pt->mtc_freq_bits; !(config & 1); shift++)
config >>= 1;
evlist__for_each_entry(pt->session->evlist, evsel) {
libperf: Move perf_event_attr field from perf's evsel to libperf's perf_evsel Move the perf_event_attr struct fron 'struct evsel' to 'struct perf_evsel'. Committer notes: Fixed up these: tools/perf/arch/arm/util/auxtrace.c tools/perf/arch/arm/util/cs-etm.c tools/perf/arch/arm64/util/arm-spe.c tools/perf/arch/s390/util/auxtrace.c tools/perf/util/cs-etm.c Also cc1: warnings being treated as errors tests/sample-parsing.c: In function 'do_test': tests/sample-parsing.c:162: error: missing initializer tests/sample-parsing.c:162: error: (near initialization for 'evsel.core.cpus') struct evsel evsel = { .needs_swap = false, - .core.attr = { - .sample_type = sample_type, - .read_format = read_format, + .core = { + . attr = { + .sample_type = sample_type, + .read_format = read_format, + }, [perfbuilder@a70e4eeb5549 /]$ gcc --version |& head -1 gcc (GCC) 4.4.7 Also we don't need to include perf_event.h in tools/perf/lib/include/perf/evsel.h, forward declaring 'struct perf_event_attr' is enough. And this even fixes the build in some systems where things are used somewhere down the include path from perf_event.h without defining __always_inline. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Budankov <alexey.budankov@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Michael Petlan <mpetlan@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20190721112506.12306-43-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-07-21 19:24:29 +08:00
if (intel_pt_get_config(pt, &evsel->core.attr, &config))
return (config & pt->mtc_freq_bits) >> shift;
}
return 0;
}
static bool intel_pt_timeless_decoding(struct intel_pt *pt)
{
struct evsel *evsel;
bool timeless_decoding = true;
u64 config;
if (!pt->tsc_bit || !pt->cap_user_time_zero || pt->synth_opts.timeless_decoding)
return true;
evlist__for_each_entry(pt->session->evlist, evsel) {
libperf: Move perf_event_attr field from perf's evsel to libperf's perf_evsel Move the perf_event_attr struct fron 'struct evsel' to 'struct perf_evsel'. Committer notes: Fixed up these: tools/perf/arch/arm/util/auxtrace.c tools/perf/arch/arm/util/cs-etm.c tools/perf/arch/arm64/util/arm-spe.c tools/perf/arch/s390/util/auxtrace.c tools/perf/util/cs-etm.c Also cc1: warnings being treated as errors tests/sample-parsing.c: In function 'do_test': tests/sample-parsing.c:162: error: missing initializer tests/sample-parsing.c:162: error: (near initialization for 'evsel.core.cpus') struct evsel evsel = { .needs_swap = false, - .core.attr = { - .sample_type = sample_type, - .read_format = read_format, + .core = { + . attr = { + .sample_type = sample_type, + .read_format = read_format, + }, [perfbuilder@a70e4eeb5549 /]$ gcc --version |& head -1 gcc (GCC) 4.4.7 Also we don't need to include perf_event.h in tools/perf/lib/include/perf/evsel.h, forward declaring 'struct perf_event_attr' is enough. And this even fixes the build in some systems where things are used somewhere down the include path from perf_event.h without defining __always_inline. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Budankov <alexey.budankov@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Michael Petlan <mpetlan@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20190721112506.12306-43-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-07-21 19:24:29 +08:00
if (!(evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
return true;
libperf: Move perf_event_attr field from perf's evsel to libperf's perf_evsel Move the perf_event_attr struct fron 'struct evsel' to 'struct perf_evsel'. Committer notes: Fixed up these: tools/perf/arch/arm/util/auxtrace.c tools/perf/arch/arm/util/cs-etm.c tools/perf/arch/arm64/util/arm-spe.c tools/perf/arch/s390/util/auxtrace.c tools/perf/util/cs-etm.c Also cc1: warnings being treated as errors tests/sample-parsing.c: In function 'do_test': tests/sample-parsing.c:162: error: missing initializer tests/sample-parsing.c:162: error: (near initialization for 'evsel.core.cpus') struct evsel evsel = { .needs_swap = false, - .core.attr = { - .sample_type = sample_type, - .read_format = read_format, + .core = { + . attr = { + .sample_type = sample_type, + .read_format = read_format, + }, [perfbuilder@a70e4eeb5549 /]$ gcc --version |& head -1 gcc (GCC) 4.4.7 Also we don't need to include perf_event.h in tools/perf/lib/include/perf/evsel.h, forward declaring 'struct perf_event_attr' is enough. And this even fixes the build in some systems where things are used somewhere down the include path from perf_event.h without defining __always_inline. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Budankov <alexey.budankov@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Michael Petlan <mpetlan@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20190721112506.12306-43-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-07-21 19:24:29 +08:00
if (intel_pt_get_config(pt, &evsel->core.attr, &config)) {
if (config & pt->tsc_bit)
timeless_decoding = false;
else
return true;
}
}
return timeless_decoding;
}
static bool intel_pt_tracing_kernel(struct intel_pt *pt)
{
struct evsel *evsel;
evlist__for_each_entry(pt->session->evlist, evsel) {
libperf: Move perf_event_attr field from perf's evsel to libperf's perf_evsel Move the perf_event_attr struct fron 'struct evsel' to 'struct perf_evsel'. Committer notes: Fixed up these: tools/perf/arch/arm/util/auxtrace.c tools/perf/arch/arm/util/cs-etm.c tools/perf/arch/arm64/util/arm-spe.c tools/perf/arch/s390/util/auxtrace.c tools/perf/util/cs-etm.c Also cc1: warnings being treated as errors tests/sample-parsing.c: In function 'do_test': tests/sample-parsing.c:162: error: missing initializer tests/sample-parsing.c:162: error: (near initialization for 'evsel.core.cpus') struct evsel evsel = { .needs_swap = false, - .core.attr = { - .sample_type = sample_type, - .read_format = read_format, + .core = { + . attr = { + .sample_type = sample_type, + .read_format = read_format, + }, [perfbuilder@a70e4eeb5549 /]$ gcc --version |& head -1 gcc (GCC) 4.4.7 Also we don't need to include perf_event.h in tools/perf/lib/include/perf/evsel.h, forward declaring 'struct perf_event_attr' is enough. And this even fixes the build in some systems where things are used somewhere down the include path from perf_event.h without defining __always_inline. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Budankov <alexey.budankov@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Michael Petlan <mpetlan@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20190721112506.12306-43-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-07-21 19:24:29 +08:00
if (intel_pt_get_config(pt, &evsel->core.attr, NULL) &&
!evsel->core.attr.exclude_kernel)
return true;
}
return false;
}
static bool intel_pt_have_tsc(struct intel_pt *pt)
{
struct evsel *evsel;
bool have_tsc = false;
u64 config;
if (!pt->tsc_bit)
return false;
evlist__for_each_entry(pt->session->evlist, evsel) {
libperf: Move perf_event_attr field from perf's evsel to libperf's perf_evsel Move the perf_event_attr struct fron 'struct evsel' to 'struct perf_evsel'. Committer notes: Fixed up these: tools/perf/arch/arm/util/auxtrace.c tools/perf/arch/arm/util/cs-etm.c tools/perf/arch/arm64/util/arm-spe.c tools/perf/arch/s390/util/auxtrace.c tools/perf/util/cs-etm.c Also cc1: warnings being treated as errors tests/sample-parsing.c: In function 'do_test': tests/sample-parsing.c:162: error: missing initializer tests/sample-parsing.c:162: error: (near initialization for 'evsel.core.cpus') struct evsel evsel = { .needs_swap = false, - .core.attr = { - .sample_type = sample_type, - .read_format = read_format, + .core = { + . attr = { + .sample_type = sample_type, + .read_format = read_format, + }, [perfbuilder@a70e4eeb5549 /]$ gcc --version |& head -1 gcc (GCC) 4.4.7 Also we don't need to include perf_event.h in tools/perf/lib/include/perf/evsel.h, forward declaring 'struct perf_event_attr' is enough. And this even fixes the build in some systems where things are used somewhere down the include path from perf_event.h without defining __always_inline. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Budankov <alexey.budankov@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Michael Petlan <mpetlan@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20190721112506.12306-43-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-07-21 19:24:29 +08:00
if (intel_pt_get_config(pt, &evsel->core.attr, &config)) {
if (config & pt->tsc_bit)
have_tsc = true;
else
return false;
}
}
return have_tsc;
}
static bool intel_pt_have_mtc(struct intel_pt *pt)
{
struct evsel *evsel;
u64 config;
evlist__for_each_entry(pt->session->evlist, evsel) {
if (intel_pt_get_config(pt, &evsel->core.attr, &config) &&
(config & pt->mtc_bit))
return true;
}
return false;
}
static bool intel_pt_sampling_mode(struct intel_pt *pt)
{
struct evsel *evsel;
evlist__for_each_entry(pt->session->evlist, evsel) {
if ((evsel->core.attr.sample_type & PERF_SAMPLE_AUX) &&
evsel->core.attr.aux_sample_size)
return true;
}
return false;
}
static u64 intel_pt_ctl(struct intel_pt *pt)
{
struct evsel *evsel;
u64 config;
evlist__for_each_entry(pt->session->evlist, evsel) {
if (intel_pt_get_config(pt, &evsel->core.attr, &config))
return config;
}
return 0;
}
static u64 intel_pt_ns_to_ticks(const struct intel_pt *pt, u64 ns)
{
u64 quot, rem;
quot = ns / pt->tc.time_mult;
rem = ns % pt->tc.time_mult;
return (quot << pt->tc.time_shift) + (rem << pt->tc.time_shift) /
pt->tc.time_mult;
}
perf intel-pt: Add support for synthesizing callchains for regular events Currently, callchains can be synthesized only for synthesized events. Support also synthesizing callchains for regular events. Example: # perf record --kcore --aux-sample -e '{intel_pt//,cycles}' -c 10000 uname Linux [ perf record: Woken up 3 times to write data ] [ perf record: Captured and wrote 0.532 MB perf.data ] # perf script --itrace=Ge | head -20 uname 4864 2419025.358181: 10000 cycles: ffffffffbba56965 apparmor_bprm_committing_creds+0x35 ([kernel.kallsyms]) ffffffffbc400cd5 __indirect_thunk_start+0x5 ([kernel.kallsyms]) ffffffffbba07422 security_bprm_committing_creds+0x22 ([kernel.kallsyms]) ffffffffbb89805d install_exec_creds+0xd ([kernel.kallsyms]) ffffffffbb90d9ac load_elf_binary+0x3ac ([kernel.kallsyms]) uname 4864 2419025.358185: 10000 cycles: ffffffffbba56db0 apparmor_bprm_committed_creds+0x20 ([kernel.kallsyms]) ffffffffbc400cd5 __indirect_thunk_start+0x5 ([kernel.kallsyms]) ffffffffbba07452 security_bprm_committed_creds+0x22 ([kernel.kallsyms]) ffffffffbb89809a install_exec_creds+0x4a ([kernel.kallsyms]) ffffffffbb90d9ac load_elf_binary+0x3ac ([kernel.kallsyms]) uname 4864 2419025.358189: 10000 cycles: ffffffffbb86fdf6 vma_adjust_trans_huge+0x6 ([kernel.kallsyms]) ffffffffbb821660 __vma_adjust+0x160 ([kernel.kallsyms]) ffffffffbb897be7 shift_arg_pages+0x97 ([kernel.kallsyms]) ffffffffbb897ed9 setup_arg_pages+0x1e9 ([kernel.kallsyms]) ffffffffbb90d9f2 load_elf_binary+0x3f2 ([kernel.kallsyms]) Committer testing: # perf record --kcore --aux-sample -e '{intel_pt//,cycles}' -c 10000 uname Linux [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.233 MB perf.data ] # Then, before this patch: # perf script --itrace=Ge | head -20 uname 28642 168664.856384: 10000 cycles: ffffffff9810aeaa commit_creds+0x2a ([kernel.kallsyms]) uname 28642 168664.856388: 10000 cycles: ffffffff982a24f1 mprotect_fixup+0x151 ([kernel.kallsyms]) uname 28642 168664.856392: 10000 cycles: ffffffff982a385b move_page_tables+0xbcb ([kernel.kallsyms]) uname 28642 168664.856396: 10000 cycles: ffffffff982fd4ec __mod_memcg_state+0x1c ([kernel.kallsyms]) uname 28642 168664.856400: 10000 cycles: ffffffff9829fddd do_mmap+0xfd ([kernel.kallsyms]) uname 28642 168664.856404: 10000 cycles: ffffffff9829c879 __vma_adjust+0x479 ([kernel.kallsyms]) uname 28642 168664.856408: 10000 cycles: ffffffff98238e94 __perf_addr_filters_adjust+0x34 ([kernel.kallsyms]) uname 28642 168664.856412: 10000 cycles: ffffffff98a38e0b down_write+0x1b ([kernel.kallsyms]) uname 28642 168664.856416: 10000 cycles: ffffffff983006a0 memcg_kmem_get_cache+0x0 ([kernel.kallsyms]) uname 28642 168664.856421: 10000 cycles: ffffffff98396eaf load_elf_binary+0x92f ([kernel.kallsyms]) uname 28642 168664.856425: 10000 cycles: ffffffff982e0222 kfree+0x62 ([kernel.kallsyms]) uname 28642 168664.856428: 10000 cycles: ffffffff9846dfd4 file_has_perm+0x54 ([kernel.kallsyms]) uname 28642 168664.856433: 10000 cycles: ffffffff98288911 vma_interval_tree_insert+0x51 ([kernel.kallsyms]) uname 28642 168664.856437: 10000 cycles: ffffffff9823e577 perf_event_mmap_output+0x27 ([kernel.kallsyms]) uname 28642 168664.856441: 10000 cycles: ffffffff98a26fa0 xas_load+0x40 ([kernel.kallsyms]) uname 28642 168664.856445: 10000 cycles: ffffffff98004f30 arch_setup_additional_pages+0x0 ([kernel.kallsyms]) uname 28642 168664.856448: 10000 cycles: ffffffff98a297c0 copy_user_generic_unrolled+0xa0 ([kernel.kallsyms]) uname 28642 168664.856452: 10000 cycles: ffffffff9853a87a strnlen_user+0x10a ([kernel.kallsyms]) uname 28642 168664.856456: 10000 cycles: ffffffff986638a7 randomize_page+0x27 ([kernel.kallsyms]) uname 28642 168664.856460: 10000 cycles: ffffffff98a3b645 _raw_spin_lock+0x5 ([kernel.kallsyms]) # And after: # perf script --itrace=Ge | head -20 uname 28642 168664.856384: 10000 cycles: ffffffff9810aeaa commit_creds+0x2a ([kernel.kallsyms]) ffffffff9831fe87 install_exec_creds+0x17 ([kernel.kallsyms]) ffffffff983968d9 load_elf_binary+0x359 ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) uname 28642 168664.856388: 10000 cycles: ffffffff982a24f1 mprotect_fixup+0x151 ([kernel.kallsyms]) ffffffff9831fa83 setup_arg_pages+0x123 ([kernel.kallsyms]) ffffffff9839691f load_elf_binary+0x39f ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) uname 28642 168664.856392: 10000 cycles: ffffffff982a385b move_page_tables+0xbcb ([kernel.kallsyms]) ffffffff9831f889 shift_arg_pages+0xa9 ([kernel.kallsyms]) ffffffff9831fb4f setup_arg_pages+0x1ef ([kernel.kallsyms]) ffffffff9839691f load_elf_binary+0x39f ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) # Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lore.kernel.org/lkml/20200401101613.6201-12-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-04-01 18:16:08 +08:00
static struct ip_callchain *intel_pt_alloc_chain(struct intel_pt *pt)
{
size_t sz = sizeof(struct ip_callchain);
/* Add 1 to callchain_sz for callchain context */
sz += (pt->synth_opts.callchain_sz + 1) * sizeof(u64);
return zalloc(sz);
}
static int intel_pt_callchain_init(struct intel_pt *pt)
{
struct evsel *evsel;
evlist__for_each_entry(pt->session->evlist, evsel) {
if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CALLCHAIN))
evsel->synth_sample_type |= PERF_SAMPLE_CALLCHAIN;
}
pt->chain = intel_pt_alloc_chain(pt);
if (!pt->chain)
return -ENOMEM;
return 0;
}
static void intel_pt_add_callchain(struct intel_pt *pt,
struct perf_sample *sample)
{
struct thread *thread = machine__findnew_thread(pt->machine,
sample->pid,
sample->tid);
thread_stack__sample_late(thread, sample->cpu, pt->chain,
pt->synth_opts.callchain_sz + 1, sample->ip,
pt->kernel_start);
sample->callchain = pt->chain;
}
static struct branch_stack *intel_pt_alloc_br_stack(unsigned int entry_cnt)
perf intel-pt: Add support for synthesizing branch stacks for regular events Use the new thread_stack__br_sample_late() function to create a thread stack for regular events. Example: # perf record --kcore --aux-sample -e '{intel_pt//,cycles:ppp}' -c 10000 uname Linux [ perf record: Woken up 2 times to write data ] [ perf record: Captured and wrote 0.743 MB perf.data ] # perf report --itrace=Le --stdio | head -30 | tail -18 # Samples: 11K of event 'cycles:ppp' # Event count (approx.): 11648 # # Overhead Command Source Shared Object Source Symbol Target Symbol Basic Block Cycles # ........ ....... .................... ............................ ............................ .................. # 5.49% uname libc-2.30.so [.] _dl_addr [.] _dl_addr - 2.41% uname ld-2.30.so [.] _dl_relocate_object [.] _dl_relocate_object - 2.31% uname ld-2.30.so [.] do_lookup_x [.] do_lookup_x - 2.17% uname [kernel.kallsyms] [k] unmap_page_range [k] unmap_page_range - 2.05% uname ld-2.30.so [k] _dl_start [k] _dl_start - 1.97% uname ld-2.30.so [.] _dl_lookup_symbol_x [.] _dl_lookup_symbol_x - 1.94% uname [kernel.kallsyms] [k] filemap_map_pages [k] filemap_map_pages - 1.60% uname [kernel.kallsyms] [k] __handle_mm_fault [k] __handle_mm_fault - 1.44% uname [kernel.kallsyms] [k] page_add_file_rmap [k] page_add_file_rmap - 1.12% uname [kernel.kallsyms] [k] vma_interval_tree_insert [k] vma_interval_tree_insert - 0.94% uname [kernel.kallsyms] [k] perf_iterate_ctx [k] perf_iterate_ctx - Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lore.kernel.org/lkml/20200429150751.12570-8-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-04-29 23:07:49 +08:00
{
size_t sz = sizeof(struct branch_stack);
sz += entry_cnt * sizeof(struct branch_entry);
perf intel-pt: Add support for synthesizing branch stacks for regular events Use the new thread_stack__br_sample_late() function to create a thread stack for regular events. Example: # perf record --kcore --aux-sample -e '{intel_pt//,cycles:ppp}' -c 10000 uname Linux [ perf record: Woken up 2 times to write data ] [ perf record: Captured and wrote 0.743 MB perf.data ] # perf report --itrace=Le --stdio | head -30 | tail -18 # Samples: 11K of event 'cycles:ppp' # Event count (approx.): 11648 # # Overhead Command Source Shared Object Source Symbol Target Symbol Basic Block Cycles # ........ ....... .................... ............................ ............................ .................. # 5.49% uname libc-2.30.so [.] _dl_addr [.] _dl_addr - 2.41% uname ld-2.30.so [.] _dl_relocate_object [.] _dl_relocate_object - 2.31% uname ld-2.30.so [.] do_lookup_x [.] do_lookup_x - 2.17% uname [kernel.kallsyms] [k] unmap_page_range [k] unmap_page_range - 2.05% uname ld-2.30.so [k] _dl_start [k] _dl_start - 1.97% uname ld-2.30.so [.] _dl_lookup_symbol_x [.] _dl_lookup_symbol_x - 1.94% uname [kernel.kallsyms] [k] filemap_map_pages [k] filemap_map_pages - 1.60% uname [kernel.kallsyms] [k] __handle_mm_fault [k] __handle_mm_fault - 1.44% uname [kernel.kallsyms] [k] page_add_file_rmap [k] page_add_file_rmap - 1.12% uname [kernel.kallsyms] [k] vma_interval_tree_insert [k] vma_interval_tree_insert - 0.94% uname [kernel.kallsyms] [k] perf_iterate_ctx [k] perf_iterate_ctx - Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lore.kernel.org/lkml/20200429150751.12570-8-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-04-29 23:07:49 +08:00
return zalloc(sz);
}
static int intel_pt_br_stack_init(struct intel_pt *pt)
{
struct evsel *evsel;
evlist__for_each_entry(pt->session->evlist, evsel) {
if (!(evsel->core.attr.sample_type & PERF_SAMPLE_BRANCH_STACK))
evsel->synth_sample_type |= PERF_SAMPLE_BRANCH_STACK;
}
pt->br_stack = intel_pt_alloc_br_stack(pt->br_stack_sz);
perf intel-pt: Add support for synthesizing branch stacks for regular events Use the new thread_stack__br_sample_late() function to create a thread stack for regular events. Example: # perf record --kcore --aux-sample -e '{intel_pt//,cycles:ppp}' -c 10000 uname Linux [ perf record: Woken up 2 times to write data ] [ perf record: Captured and wrote 0.743 MB perf.data ] # perf report --itrace=Le --stdio | head -30 | tail -18 # Samples: 11K of event 'cycles:ppp' # Event count (approx.): 11648 # # Overhead Command Source Shared Object Source Symbol Target Symbol Basic Block Cycles # ........ ....... .................... ............................ ............................ .................. # 5.49% uname libc-2.30.so [.] _dl_addr [.] _dl_addr - 2.41% uname ld-2.30.so [.] _dl_relocate_object [.] _dl_relocate_object - 2.31% uname ld-2.30.so [.] do_lookup_x [.] do_lookup_x - 2.17% uname [kernel.kallsyms] [k] unmap_page_range [k] unmap_page_range - 2.05% uname ld-2.30.so [k] _dl_start [k] _dl_start - 1.97% uname ld-2.30.so [.] _dl_lookup_symbol_x [.] _dl_lookup_symbol_x - 1.94% uname [kernel.kallsyms] [k] filemap_map_pages [k] filemap_map_pages - 1.60% uname [kernel.kallsyms] [k] __handle_mm_fault [k] __handle_mm_fault - 1.44% uname [kernel.kallsyms] [k] page_add_file_rmap [k] page_add_file_rmap - 1.12% uname [kernel.kallsyms] [k] vma_interval_tree_insert [k] vma_interval_tree_insert - 0.94% uname [kernel.kallsyms] [k] perf_iterate_ctx [k] perf_iterate_ctx - Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lore.kernel.org/lkml/20200429150751.12570-8-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-04-29 23:07:49 +08:00
if (!pt->br_stack)
return -ENOMEM;
return 0;
}
static void intel_pt_add_br_stack(struct intel_pt *pt,
struct perf_sample *sample)
{
struct thread *thread = machine__findnew_thread(pt->machine,
sample->pid,
sample->tid);
thread_stack__br_sample_late(thread, sample->cpu, pt->br_stack,
pt->br_stack_sz, sample->ip,
pt->kernel_start);
sample->branch_stack = pt->br_stack;
perf intel-pt: Fix missed put and leak Add missing put and free, detected with leak sanitizer. Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ali Saidi <alisaidi@amazon.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Brian Robbins <brianrob@linux.microsoft.com> Cc: Changbin Du <changbin.du@huawei.com> Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: Fangrui Song <maskray@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ivan Babrou <ivan@cloudflare.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: K Prateek Nayak <kprateek.nayak@amd.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Mike Leach <mike.leach@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Steinar H. Gunderson <sesse@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Wenyu Liu <liuwenyu7@huawei.com> Cc: Will Deacon <will@kernel.org> Cc: Yang Jihong <yangjihong1@huawei.com> Cc: Ye Xingchen <ye.xingchen@zte.com.cn> Cc: Yuan Can <yuancan@huawei.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230608232823.4027869-13-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-09 07:28:09 +08:00
thread__put(thread);
perf intel-pt: Add support for synthesizing branch stacks for regular events Use the new thread_stack__br_sample_late() function to create a thread stack for regular events. Example: # perf record --kcore --aux-sample -e '{intel_pt//,cycles:ppp}' -c 10000 uname Linux [ perf record: Woken up 2 times to write data ] [ perf record: Captured and wrote 0.743 MB perf.data ] # perf report --itrace=Le --stdio | head -30 | tail -18 # Samples: 11K of event 'cycles:ppp' # Event count (approx.): 11648 # # Overhead Command Source Shared Object Source Symbol Target Symbol Basic Block Cycles # ........ ....... .................... ............................ ............................ .................. # 5.49% uname libc-2.30.so [.] _dl_addr [.] _dl_addr - 2.41% uname ld-2.30.so [.] _dl_relocate_object [.] _dl_relocate_object - 2.31% uname ld-2.30.so [.] do_lookup_x [.] do_lookup_x - 2.17% uname [kernel.kallsyms] [k] unmap_page_range [k] unmap_page_range - 2.05% uname ld-2.30.so [k] _dl_start [k] _dl_start - 1.97% uname ld-2.30.so [.] _dl_lookup_symbol_x [.] _dl_lookup_symbol_x - 1.94% uname [kernel.kallsyms] [k] filemap_map_pages [k] filemap_map_pages - 1.60% uname [kernel.kallsyms] [k] __handle_mm_fault [k] __handle_mm_fault - 1.44% uname [kernel.kallsyms] [k] page_add_file_rmap [k] page_add_file_rmap - 1.12% uname [kernel.kallsyms] [k] vma_interval_tree_insert [k] vma_interval_tree_insert - 0.94% uname [kernel.kallsyms] [k] perf_iterate_ctx [k] perf_iterate_ctx - Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lore.kernel.org/lkml/20200429150751.12570-8-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-04-29 23:07:49 +08:00
}
/* INTEL_PT_LBR_0, INTEL_PT_LBR_1 and INTEL_PT_LBR_2 */
#define LBRS_MAX (INTEL_PT_BLK_ITEM_ID_CNT * 3U)
static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
unsigned int queue_nr)
{
struct intel_pt_params params = { .get_trace = 0, };
struct perf_env *env = pt->machine->env;
struct intel_pt_queue *ptq;
ptq = zalloc(sizeof(struct intel_pt_queue));
if (!ptq)
return NULL;
if (pt->synth_opts.callchain) {
perf intel-pt: Add support for synthesizing callchains for regular events Currently, callchains can be synthesized only for synthesized events. Support also synthesizing callchains for regular events. Example: # perf record --kcore --aux-sample -e '{intel_pt//,cycles}' -c 10000 uname Linux [ perf record: Woken up 3 times to write data ] [ perf record: Captured and wrote 0.532 MB perf.data ] # perf script --itrace=Ge | head -20 uname 4864 2419025.358181: 10000 cycles: ffffffffbba56965 apparmor_bprm_committing_creds+0x35 ([kernel.kallsyms]) ffffffffbc400cd5 __indirect_thunk_start+0x5 ([kernel.kallsyms]) ffffffffbba07422 security_bprm_committing_creds+0x22 ([kernel.kallsyms]) ffffffffbb89805d install_exec_creds+0xd ([kernel.kallsyms]) ffffffffbb90d9ac load_elf_binary+0x3ac ([kernel.kallsyms]) uname 4864 2419025.358185: 10000 cycles: ffffffffbba56db0 apparmor_bprm_committed_creds+0x20 ([kernel.kallsyms]) ffffffffbc400cd5 __indirect_thunk_start+0x5 ([kernel.kallsyms]) ffffffffbba07452 security_bprm_committed_creds+0x22 ([kernel.kallsyms]) ffffffffbb89809a install_exec_creds+0x4a ([kernel.kallsyms]) ffffffffbb90d9ac load_elf_binary+0x3ac ([kernel.kallsyms]) uname 4864 2419025.358189: 10000 cycles: ffffffffbb86fdf6 vma_adjust_trans_huge+0x6 ([kernel.kallsyms]) ffffffffbb821660 __vma_adjust+0x160 ([kernel.kallsyms]) ffffffffbb897be7 shift_arg_pages+0x97 ([kernel.kallsyms]) ffffffffbb897ed9 setup_arg_pages+0x1e9 ([kernel.kallsyms]) ffffffffbb90d9f2 load_elf_binary+0x3f2 ([kernel.kallsyms]) Committer testing: # perf record --kcore --aux-sample -e '{intel_pt//,cycles}' -c 10000 uname Linux [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.233 MB perf.data ] # Then, before this patch: # perf script --itrace=Ge | head -20 uname 28642 168664.856384: 10000 cycles: ffffffff9810aeaa commit_creds+0x2a ([kernel.kallsyms]) uname 28642 168664.856388: 10000 cycles: ffffffff982a24f1 mprotect_fixup+0x151 ([kernel.kallsyms]) uname 28642 168664.856392: 10000 cycles: ffffffff982a385b move_page_tables+0xbcb ([kernel.kallsyms]) uname 28642 168664.856396: 10000 cycles: ffffffff982fd4ec __mod_memcg_state+0x1c ([kernel.kallsyms]) uname 28642 168664.856400: 10000 cycles: ffffffff9829fddd do_mmap+0xfd ([kernel.kallsyms]) uname 28642 168664.856404: 10000 cycles: ffffffff9829c879 __vma_adjust+0x479 ([kernel.kallsyms]) uname 28642 168664.856408: 10000 cycles: ffffffff98238e94 __perf_addr_filters_adjust+0x34 ([kernel.kallsyms]) uname 28642 168664.856412: 10000 cycles: ffffffff98a38e0b down_write+0x1b ([kernel.kallsyms]) uname 28642 168664.856416: 10000 cycles: ffffffff983006a0 memcg_kmem_get_cache+0x0 ([kernel.kallsyms]) uname 28642 168664.856421: 10000 cycles: ffffffff98396eaf load_elf_binary+0x92f ([kernel.kallsyms]) uname 28642 168664.856425: 10000 cycles: ffffffff982e0222 kfree+0x62 ([kernel.kallsyms]) uname 28642 168664.856428: 10000 cycles: ffffffff9846dfd4 file_has_perm+0x54 ([kernel.kallsyms]) uname 28642 168664.856433: 10000 cycles: ffffffff98288911 vma_interval_tree_insert+0x51 ([kernel.kallsyms]) uname 28642 168664.856437: 10000 cycles: ffffffff9823e577 perf_event_mmap_output+0x27 ([kernel.kallsyms]) uname 28642 168664.856441: 10000 cycles: ffffffff98a26fa0 xas_load+0x40 ([kernel.kallsyms]) uname 28642 168664.856445: 10000 cycles: ffffffff98004f30 arch_setup_additional_pages+0x0 ([kernel.kallsyms]) uname 28642 168664.856448: 10000 cycles: ffffffff98a297c0 copy_user_generic_unrolled+0xa0 ([kernel.kallsyms]) uname 28642 168664.856452: 10000 cycles: ffffffff9853a87a strnlen_user+0x10a ([kernel.kallsyms]) uname 28642 168664.856456: 10000 cycles: ffffffff986638a7 randomize_page+0x27 ([kernel.kallsyms]) uname 28642 168664.856460: 10000 cycles: ffffffff98a3b645 _raw_spin_lock+0x5 ([kernel.kallsyms]) # And after: # perf script --itrace=Ge | head -20 uname 28642 168664.856384: 10000 cycles: ffffffff9810aeaa commit_creds+0x2a ([kernel.kallsyms]) ffffffff9831fe87 install_exec_creds+0x17 ([kernel.kallsyms]) ffffffff983968d9 load_elf_binary+0x359 ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) uname 28642 168664.856388: 10000 cycles: ffffffff982a24f1 mprotect_fixup+0x151 ([kernel.kallsyms]) ffffffff9831fa83 setup_arg_pages+0x123 ([kernel.kallsyms]) ffffffff9839691f load_elf_binary+0x39f ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) uname 28642 168664.856392: 10000 cycles: ffffffff982a385b move_page_tables+0xbcb ([kernel.kallsyms]) ffffffff9831f889 shift_arg_pages+0xa9 ([kernel.kallsyms]) ffffffff9831fb4f setup_arg_pages+0x1ef ([kernel.kallsyms]) ffffffff9839691f load_elf_binary+0x39f ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) # Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lore.kernel.org/lkml/20200401101613.6201-12-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-04-01 18:16:08 +08:00
ptq->chain = intel_pt_alloc_chain(pt);
if (!ptq->chain)
goto out_free;
}
if (pt->synth_opts.last_branch || pt->synth_opts.other_events) {
unsigned int entry_cnt = max(LBRS_MAX, pt->br_stack_sz);
ptq->last_branch = intel_pt_alloc_br_stack(entry_cnt);
if (!ptq->last_branch)
goto out_free;
}
ptq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
if (!ptq->event_buf)
goto out_free;
ptq->pt = pt;
ptq->queue_nr = queue_nr;
ptq->exclude_kernel = intel_pt_exclude_kernel(pt);
ptq->pid = -1;
ptq->tid = -1;
ptq->cpu = -1;
ptq->next_tid = -1;
params.get_trace = intel_pt_get_trace;
params.walk_insn = intel_pt_walk_next_insn;
params.lookahead = intel_pt_lookahead;
params.findnew_vmcs_info = intel_pt_findnew_vmcs_info;
params.data = ptq;
params.return_compression = intel_pt_return_compression(pt);
params.branch_enable = intel_pt_branch_enable(pt);
params.ctl = intel_pt_ctl(pt);
params.max_non_turbo_ratio = pt->max_non_turbo_ratio;
params.mtc_period = intel_pt_mtc_period(pt);
params.tsc_ctc_ratio_n = pt->tsc_ctc_ratio_n;
params.tsc_ctc_ratio_d = pt->tsc_ctc_ratio_d;
params.quick = pt->synth_opts.quick;
params.vm_time_correlation = pt->synth_opts.vm_time_correlation;
params.vm_tm_corr_dry_run = pt->synth_opts.vm_tm_corr_dry_run;
params.first_timestamp = pt->first_timestamp;
params.max_loops = pt->max_loops;
/* Cannot walk code without TNT, so force 'quick' mode */
if (params.branch_enable && intel_pt_disabled_tnt(pt) && !params.quick)
params.quick = 1;
if (pt->filts.cnt > 0)
params.pgd_ip = intel_pt_pgd_ip;
perf intel-pt: Synthesize cycle events There is no good reason why we cannot synthesize "cycle" events from Intel PT just as we can synthesize "instruction" events, in particular when CYC packets are available. This enables using PT to getting much more accurate cycle profiles than regular sampling (record -e cycles) when the work last for very short periods (<10 ms). Thus, add support for this, based off of the existing IPC calculation framework. The new option to --itrace is "y" (for cYcles), as c was taken for calls. Cycle and instruction events can be synthesized together, and are by default. The only real caveat is that CYC packets are only emitted whenever some other packet is, which in practice is when a branch instruction is encountered (and not even all branches). Thus, even at no subsampling (e.g. --itrace=y0ns), it is impossible to get more accuracy than a single basic block, and all cycles spent executing that block will get attributed to the branch instruction that ends the packet. Thus, one cannot know whether the cycles came from e.g. a specific load, a mispredicted branch, or something else. When subsampling (which is the default), the cycle events will get smeared out even more, but will still be generally useful to attribute cycle counts to functions. Reviewed-by: Adrian Hunter <adrian.hunter@intel.com> Signed-off-by: Steinar H. Gunderson <sesse@google.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: https://lore.kernel.org/r/20220322082452.1429091-1-sesse@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-03-22 16:24:52 +08:00
if (pt->synth_opts.instructions || pt->synth_opts.cycles) {
if (pt->synth_opts.period) {
switch (pt->synth_opts.period_type) {
case PERF_ITRACE_PERIOD_INSTRUCTIONS:
params.period_type =
INTEL_PT_PERIOD_INSTRUCTIONS;
params.period = pt->synth_opts.period;
break;
case PERF_ITRACE_PERIOD_TICKS:
params.period_type = INTEL_PT_PERIOD_TICKS;
params.period = pt->synth_opts.period;
break;
case PERF_ITRACE_PERIOD_NANOSECS:
params.period_type = INTEL_PT_PERIOD_TICKS;
params.period = intel_pt_ns_to_ticks(pt,
pt->synth_opts.period);
break;
default:
break;
}
}
if (!params.period) {
params.period_type = INTEL_PT_PERIOD_INSTRUCTIONS;
params.period = 1;
}
}
if (env->cpuid && !strncmp(env->cpuid, "GenuineIntel,6,92,", 18))
params.flags |= INTEL_PT_FUP_WITH_NLIP;
ptq->decoder = intel_pt_decoder_new(&params);
if (!ptq->decoder)
goto out_free;
return ptq;
out_free:
zfree(&ptq->event_buf);
zfree(&ptq->last_branch);
zfree(&ptq->chain);
free(ptq);
return NULL;
}
static void intel_pt_free_queue(void *priv)
{
struct intel_pt_queue *ptq = priv;
if (!ptq)
return;
thread__zput(ptq->thread);
perf intel-pt: Add guest_code support A common case for KVM test programs is that the test program acts as the hypervisor, creating, running and destroying the virtual machine, and providing the guest object code from its own object code. In this case, the VM is not running an OS, but only the functions loaded into it by the hypervisor test program, and conveniently, loaded at the same virtual addresses. To support that, a new option "--guest-code" has been added in previous patches. In this patch, add support also to Intel PT. In particular, ensure guest_code thread is set up before attempting to walk object code or synthesize samples. Example: # perf record --kcore -e intel_pt/cyc/ -- tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.280 MB perf.data ] # perf script --guest-code --itrace=bep --ns -F-period,+addr,+flags [SNIP] tsc_msrs_test 18436 [007] 10897.962087733: branches: call ffffffffc13b2ff5 __vmx_vcpu_run+0x15 (vmlinux) => ffffffffc13b2f50 vmx_update_host_rsp+0x0 (vmlinux) tsc_msrs_test 18436 [007] 10897.962087733: branches: return ffffffffc13b2f5d vmx_update_host_rsp+0xd (vmlinux) => ffffffffc13b2ffa __vmx_vcpu_run+0x1a (vmlinux) tsc_msrs_test 18436 [007] 10897.962087733: branches: call ffffffffc13b303b __vmx_vcpu_run+0x5b (vmlinux) => ffffffffc13b2f80 vmx_vmenter+0x0 (vmlinux) tsc_msrs_test 18436 [007] 10897.962087836: branches: vmentry ffffffffc13b2f82 vmx_vmenter+0x2 (vmlinux) => 0 [unknown] ([unknown]) [guest/18436] 18436 [007] 10897.962087836: branches: vmentry 0 [unknown] ([unknown]) => 402c81 guest_code+0x131 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) [guest/18436] 18436 [007] 10897.962087836: branches: call 402c81 guest_code+0x131 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 40dba0 ucall+0x0 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) [guest/18436] 18436 [007] 10897.962088248: branches: vmexit 40dba0 ucall+0x0 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 0 [unknown] ([unknown]) tsc_msrs_test 18436 [007] 10897.962088248: branches: vmexit 0 [unknown] ([unknown]) => ffffffffc13b2fa0 vmx_vmexit+0x0 (vmlinux) tsc_msrs_test 18436 [007] 10897.962088248: branches: jmp ffffffffc13b2fa0 vmx_vmexit+0x0 (vmlinux) => ffffffffc13b2fd2 vmx_vmexit+0x32 (vmlinux) tsc_msrs_test 18436 [007] 10897.962088256: branches: return ffffffffc13b2fd2 vmx_vmexit+0x32 (vmlinux) => ffffffffc13b3040 __vmx_vcpu_run+0x60 (vmlinux) tsc_msrs_test 18436 [007] 10897.962088270: branches: return ffffffffc13b30b6 __vmx_vcpu_run+0xd6 (vmlinux) => ffffffffc13b2f2e vmx_vcpu_enter_exit+0x4e (vmlinux) [SNIP] tsc_msrs_test 18436 [007] 10897.962089321: branches: call ffffffffc13b2ff5 __vmx_vcpu_run+0x15 (vmlinux) => ffffffffc13b2f50 vmx_update_host_rsp+0x0 (vmlinux) tsc_msrs_test 18436 [007] 10897.962089321: branches: return ffffffffc13b2f5d vmx_update_host_rsp+0xd (vmlinux) => ffffffffc13b2ffa __vmx_vcpu_run+0x1a (vmlinux) tsc_msrs_test 18436 [007] 10897.962089321: branches: call ffffffffc13b303b __vmx_vcpu_run+0x5b (vmlinux) => ffffffffc13b2f80 vmx_vmenter+0x0 (vmlinux) tsc_msrs_test 18436 [007] 10897.962089424: branches: vmentry ffffffffc13b2f82 vmx_vmenter+0x2 (vmlinux) => 0 [unknown] ([unknown]) [guest/18436] 18436 [007] 10897.962089424: branches: vmentry 0 [unknown] ([unknown]) => 40dba0 ucall+0x0 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) [guest/18436] 18436 [007] 10897.962089701: branches: jmp 40dc1b ucall+0x7b (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 40dc39 ucall+0x99 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) [guest/18436] 18436 [007] 10897.962089701: branches: jcc 40dc3c ucall+0x9c (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 40dc20 ucall+0x80 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) [guest/18436] 18436 [007] 10897.962089701: branches: jcc 40dc3c ucall+0x9c (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 40dc20 ucall+0x80 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) [guest/18436] 18436 [007] 10897.962089701: branches: jcc 40dc37 ucall+0x97 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 40dc50 ucall+0xb0 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) [guest/18436] 18436 [007] 10897.962089878: branches: vmexit 40dc55 ucall+0xb5 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 0 [unknown] ([unknown]) tsc_msrs_test 18436 [007] 10897.962089878: branches: vmexit 0 [unknown] ([unknown]) => ffffffffc13b2fa0 vmx_vmexit+0x0 (vmlinux) tsc_msrs_test 18436 [007] 10897.962089878: branches: jmp ffffffffc13b2fa0 vmx_vmexit+0x0 (vmlinux) => ffffffffc13b2fd2 vmx_vmexit+0x32 (vmlinux) tsc_msrs_test 18436 [007] 10897.962089887: branches: return ffffffffc13b2fd2 vmx_vmexit+0x32 (vmlinux) => ffffffffc13b3040 __vmx_vcpu_run+0x60 (vmlinux) tsc_msrs_test 18436 [007] 10897.962089901: branches: return ffffffffc13b30b6 __vmx_vcpu_run+0xd6 (vmlinux) => ffffffffc13b2f2e vmx_vcpu_enter_exit+0x4e (vmlinux) [SNIP] # perf kvm --guest-code --guest --host report -i perf.data --stdio | head -20 # To display the perf.data header info, please use --header/--header-only options. # # # Total Lost Samples: 0 # # Samples: 12 of event 'instructions' # Event count (approx.): 2274583 # # Children Self Command Shared Object Symbol # ........ ........ ............. .................... ........................................... # 54.70% 0.00% tsc_msrs_test [kernel.vmlinux] [k] entry_SYSCALL_64_after_hwframe | ---entry_SYSCALL_64_after_hwframe do_syscall_64 | |--29.44%--syscall_exit_to_user_mode | exit_to_user_mode_prepare | task_work_run | __fput For more information about Perf tools support for Intel® Processor Trace refer: https://perf.wiki.kernel.org/index.php/Perf_tools_support_for_Intel%C2%AE_Processor_Trace Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Ian Rogers <irogers@google.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Leo Yan <leo.yan@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: kvm@vger.kernel.org Link: https://lore.kernel.org/r/20220517131011.6117-7-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-05-17 21:10:11 +08:00
thread__zput(ptq->guest_thread);
thread__zput(ptq->unknown_guest_thread);
intel_pt_decoder_free(ptq->decoder);
zfree(&ptq->event_buf);
zfree(&ptq->last_branch);
zfree(&ptq->chain);
free(ptq);
}
static void intel_pt_first_timestamp(struct intel_pt *pt, u64 timestamp)
{
unsigned int i;
pt->first_timestamp = timestamp;
for (i = 0; i < pt->queues.nr_queues; i++) {
struct auxtrace_queue *queue = &pt->queues.queue_array[i];
struct intel_pt_queue *ptq = queue->priv;
if (ptq && ptq->decoder)
intel_pt_set_first_timestamp(ptq->decoder, timestamp);
}
}
static int intel_pt_get_guest_from_sideband(struct intel_pt_queue *ptq)
{
struct machines *machines = &ptq->pt->session->machines;
struct machine *machine;
pid_t machine_pid = ptq->pid;
pid_t tid;
int vcpu;
if (machine_pid <= 0)
return 0; /* Not a guest machine */
machine = machines__find(machines, machine_pid);
if (!machine)
return 0; /* Not a guest machine */
if (ptq->guest_machine != machine) {
ptq->guest_machine = NULL;
thread__zput(ptq->guest_thread);
thread__zput(ptq->unknown_guest_thread);
ptq->unknown_guest_thread = machine__find_thread(machine, 0, 0);
if (!ptq->unknown_guest_thread)
return -1;
ptq->guest_machine = machine;
}
perf thread: Add accessor functions for thread Using accessors will make it easier to add reference count checking in later patches. Committer notes: thread->nsinfo wasn't wrapped as it is used together with nsinfo__zput(), where does a trick to set the field with a refcount being dropped to NULL, and that doesn't work well with using thread__nsinfo(thread), that loses the &thread->nsinfo pointer. When refcount checking is added to 'struct thread', later in this series, nsinfo__zput(RC_CHK_ACCESS(thread)->nsinfo) will be used to check the thread pointer. Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ali Saidi <alisaidi@amazon.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Brian Robbins <brianrob@linux.microsoft.com> Cc: Changbin Du <changbin.du@huawei.com> Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: Fangrui Song <maskray@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ivan Babrou <ivan@cloudflare.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: K Prateek Nayak <kprateek.nayak@amd.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Mike Leach <mike.leach@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Steinar H. Gunderson <sesse@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Wenyu Liu <liuwenyu7@huawei.com> Cc: Will Deacon <will@kernel.org> Cc: Yang Jihong <yangjihong1@huawei.com> Cc: Ye Xingchen <ye.xingchen@zte.com.cn> Cc: Yuan Can <yuancan@huawei.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230608232823.4027869-4-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-09 07:28:00 +08:00
vcpu = ptq->thread ? thread__guest_cpu(ptq->thread) : -1;
if (vcpu < 0)
return -1;
tid = machine__get_current_tid(machine, vcpu);
perf thread: Add accessor functions for thread Using accessors will make it easier to add reference count checking in later patches. Committer notes: thread->nsinfo wasn't wrapped as it is used together with nsinfo__zput(), where does a trick to set the field with a refcount being dropped to NULL, and that doesn't work well with using thread__nsinfo(thread), that loses the &thread->nsinfo pointer. When refcount checking is added to 'struct thread', later in this series, nsinfo__zput(RC_CHK_ACCESS(thread)->nsinfo) will be used to check the thread pointer. Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ali Saidi <alisaidi@amazon.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Brian Robbins <brianrob@linux.microsoft.com> Cc: Changbin Du <changbin.du@huawei.com> Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: Fangrui Song <maskray@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ivan Babrou <ivan@cloudflare.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: K Prateek Nayak <kprateek.nayak@amd.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Mike Leach <mike.leach@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Steinar H. Gunderson <sesse@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Wenyu Liu <liuwenyu7@huawei.com> Cc: Will Deacon <will@kernel.org> Cc: Yang Jihong <yangjihong1@huawei.com> Cc: Ye Xingchen <ye.xingchen@zte.com.cn> Cc: Yuan Can <yuancan@huawei.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230608232823.4027869-4-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-09 07:28:00 +08:00
if (ptq->guest_thread && thread__tid(ptq->guest_thread) != tid)
thread__zput(ptq->guest_thread);
if (!ptq->guest_thread) {
ptq->guest_thread = machine__find_thread(machine, -1, tid);
if (!ptq->guest_thread)
return -1;
}
ptq->guest_machine_pid = machine_pid;
perf thread: Add accessor functions for thread Using accessors will make it easier to add reference count checking in later patches. Committer notes: thread->nsinfo wasn't wrapped as it is used together with nsinfo__zput(), where does a trick to set the field with a refcount being dropped to NULL, and that doesn't work well with using thread__nsinfo(thread), that loses the &thread->nsinfo pointer. When refcount checking is added to 'struct thread', later in this series, nsinfo__zput(RC_CHK_ACCESS(thread)->nsinfo) will be used to check the thread pointer. Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ali Saidi <alisaidi@amazon.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Brian Robbins <brianrob@linux.microsoft.com> Cc: Changbin Du <changbin.du@huawei.com> Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: Fangrui Song <maskray@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ivan Babrou <ivan@cloudflare.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: K Prateek Nayak <kprateek.nayak@amd.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Mike Leach <mike.leach@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Steinar H. Gunderson <sesse@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Wenyu Liu <liuwenyu7@huawei.com> Cc: Will Deacon <will@kernel.org> Cc: Yang Jihong <yangjihong1@huawei.com> Cc: Ye Xingchen <ye.xingchen@zte.com.cn> Cc: Yuan Can <yuancan@huawei.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230608232823.4027869-4-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-09 07:28:00 +08:00
ptq->guest_pid = thread__pid(ptq->guest_thread);
ptq->guest_tid = tid;
ptq->vcpu = vcpu;
return 0;
}
static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt,
struct auxtrace_queue *queue)
{
struct intel_pt_queue *ptq = queue->priv;
if (queue->tid == -1 || pt->have_sched_switch) {
ptq->tid = machine__get_current_tid(pt->machine, ptq->cpu);
perf intel-pt: Fix "context_switch event has no tid" error A context_switch event can have no tid because pids can be detached from a task while the task is still running (in do_exit()). Note this won't happen with per-task contexts because then tracing stops at perf_event_exit_task() If a task with no tid gets preempted, or a dying task gets preempted and its parent releases it, when it subsequently gets switched back in, Intel PT will not be able to determine what task is running and prints an error "context_switch event has no tid". However, it is not really an error because the task is in kernel space and the decoder can continue to decode successfully. Fix by changing the error to be only a logged message, and make allowance for tid == -1. Example: Using 5.9-rc4 with Preemptible Kernel (Low-Latency Desktop) e.g. $ uname -r 5.9.0-rc4 $ grep PREEMPT .config # CONFIG_PREEMPT_NONE is not set # CONFIG_PREEMPT_VOLUNTARY is not set CONFIG_PREEMPT=y CONFIG_PREEMPT_COUNT=y CONFIG_PREEMPTION=y CONFIG_PREEMPT_RCU=y CONFIG_PREEMPT_NOTIFIERS=y CONFIG_DRM_I915_PREEMPT_TIMEOUT=640 CONFIG_DEBUG_PREEMPT=y # CONFIG_PREEMPT_TRACER is not set # CONFIG_PREEMPTIRQ_DELAY_TEST is not set Before: $ cat forkit.c #include <sys/types.h> #include <unistd.h> #include <sys/wait.h> int main() { pid_t child; int status = 0; child = fork(); if (child == 0) return 123; wait(&status); return 0; } $ gcc -o forkit forkit.c $ sudo ~/bin/perf record --kcore -a -m,64M -e intel_pt/cyc/k & [1] 11016 $ taskset 2 ./forkit $ sudo pkill perf $ [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 17.262 MB perf.data ] [1]+ Terminated sudo ~/bin/perf record --kcore -a -m,64M -e intel_pt/cyc/k $ sudo ~/bin/perf script --show-task-events --show-switch-events --itrace=iqqe-o -C 1 --ns | grep -C 2 forkit context_switch event has no tid taskset 11019 [001] 66663.270045029: 1 instructions:k: ffffffffb1d9f844 strnlen_user+0xb4 ([kernel.kallsyms]) taskset 11019 [001] 66663.270201816: 1 instructions:k: ffffffffb1a83121 unmap_page_range+0x561 ([kernel.kallsyms]) forkit 11019 [001] 66663.270327553: PERF_RECORD_COMM exec: forkit:11019/11019 forkit 11019 [001] 66663.270420028: 1 instructions:k: ffffffffb1db9537 __clear_user+0x27 ([kernel.kallsyms]) forkit 11019 [001] 66663.270648704: 1 instructions:k: ffffffffb18829e6 do_user_addr_fault+0xf6 ([kernel.kallsyms]) forkit 11019 [001] 66663.270833163: 1 instructions:k: ffffffffb230a825 irqentry_exit_to_user_mode+0x15 ([kernel.kallsyms]) forkit 11019 [001] 66663.271092359: 1 instructions:k: ffffffffb1aea3d9 lock_page_memcg+0x9 ([kernel.kallsyms]) forkit 11019 [001] 66663.271207092: PERF_RECORD_FORK(11020:11020):(11019:11019) forkit 11019 [001] 66663.271234775: PERF_RECORD_SWITCH_CPU_WIDE OUT next pid/tid: 11020/11020 forkit 11020 [001] 66663.271238407: PERF_RECORD_SWITCH_CPU_WIDE IN prev pid/tid: 11019/11019 forkit 11020 [001] 66663.271312066: 1 instructions:k: ffffffffb1a88140 handle_mm_fault+0x10 ([kernel.kallsyms]) forkit 11020 [001] 66663.271476225: PERF_RECORD_EXIT(11020:11020):(11019:11019) forkit 11020 [001] 66663.271497488: PERF_RECORD_SWITCH_CPU_WIDE OUT preempt next pid/tid: 11019/11019 forkit 11019 [001] 66663.271500523: PERF_RECORD_SWITCH_CPU_WIDE IN prev pid/tid: 11020/11020 forkit 11019 [001] 66663.271517241: 1 instructions:k: ffffffffb24012cd error_entry+0x6d ([kernel.kallsyms]) forkit 11019 [001] 66663.271664080: PERF_RECORD_EXIT(11019:11019):(1386:1386) After: $ sudo ~/bin/perf script --show-task-events --show-switch-events --itrace=iqqe-o -C 1 --ns | grep -C 2 forkit taskset 11019 [001] 66663.270045029: 1 instructions:k: ffffffffb1d9f844 strnlen_user+0xb4 ([kernel.kallsyms]) taskset 11019 [001] 66663.270201816: 1 instructions:k: ffffffffb1a83121 unmap_page_range+0x561 ([kernel.kallsyms]) forkit 11019 [001] 66663.270327553: PERF_RECORD_COMM exec: forkit:11019/11019 forkit 11019 [001] 66663.270420028: 1 instructions:k: ffffffffb1db9537 __clear_user+0x27 ([kernel.kallsyms]) forkit 11019 [001] 66663.270648704: 1 instructions:k: ffffffffb18829e6 do_user_addr_fault+0xf6 ([kernel.kallsyms]) forkit 11019 [001] 66663.270833163: 1 instructions:k: ffffffffb230a825 irqentry_exit_to_user_mode+0x15 ([kernel.kallsyms]) forkit 11019 [001] 66663.271092359: 1 instructions:k: ffffffffb1aea3d9 lock_page_memcg+0x9 ([kernel.kallsyms]) forkit 11019 [001] 66663.271207092: PERF_RECORD_FORK(11020:11020):(11019:11019) forkit 11019 [001] 66663.271234775: PERF_RECORD_SWITCH_CPU_WIDE OUT next pid/tid: 11020/11020 forkit 11020 [001] 66663.271238407: PERF_RECORD_SWITCH_CPU_WIDE IN prev pid/tid: 11019/11019 forkit 11020 [001] 66663.271312066: 1 instructions:k: ffffffffb1a88140 handle_mm_fault+0x10 ([kernel.kallsyms]) forkit 11020 [001] 66663.271476225: PERF_RECORD_EXIT(11020:11020):(11019:11019) forkit 11020 [001] 66663.271497488: PERF_RECORD_SWITCH_CPU_WIDE OUT preempt next pid/tid: 11019/11019 forkit 11019 [001] 66663.271500523: PERF_RECORD_SWITCH_CPU_WIDE IN prev pid/tid: 11020/11020 forkit 11019 [001] 66663.271517241: 1 instructions:k: ffffffffb24012cd error_entry+0x6d ([kernel.kallsyms]) forkit 11019 [001] 66663.271664080: PERF_RECORD_EXIT(11019:11019):(1386:1386) forkit 11019 [001] 66663.271688752: PERF_RECORD_SWITCH_CPU_WIDE OUT next pid/tid: -1/-1 :-1 -1 [001] 66663.271692086: PERF_RECORD_SWITCH_CPU_WIDE IN prev pid/tid: 11019/11019 :-1 -1 [001] 66663.271707466: 1 instructions:k: ffffffffb18eb096 update_load_avg+0x306 ([kernel.kallsyms]) Fixes: 86c2786994bd7c ("perf intel-pt: Add support for PERF_RECORD_SWITCH") Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Yu-cheng Yu <yu-cheng.yu@intel.com> Link: http://lore.kernel.org/lkml/20200909084923.9096-3-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-09-09 16:49:23 +08:00
if (ptq->tid == -1)
ptq->pid = -1;
thread__zput(ptq->thread);
}
if (!ptq->thread && ptq->tid != -1)
ptq->thread = machine__find_thread(pt->machine, -1, ptq->tid);
if (ptq->thread) {
perf thread: Add accessor functions for thread Using accessors will make it easier to add reference count checking in later patches. Committer notes: thread->nsinfo wasn't wrapped as it is used together with nsinfo__zput(), where does a trick to set the field with a refcount being dropped to NULL, and that doesn't work well with using thread__nsinfo(thread), that loses the &thread->nsinfo pointer. When refcount checking is added to 'struct thread', later in this series, nsinfo__zput(RC_CHK_ACCESS(thread)->nsinfo) will be used to check the thread pointer. Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ali Saidi <alisaidi@amazon.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Brian Robbins <brianrob@linux.microsoft.com> Cc: Changbin Du <changbin.du@huawei.com> Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: Fangrui Song <maskray@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ivan Babrou <ivan@cloudflare.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: K Prateek Nayak <kprateek.nayak@amd.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Mike Leach <mike.leach@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Steinar H. Gunderson <sesse@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Wenyu Liu <liuwenyu7@huawei.com> Cc: Will Deacon <will@kernel.org> Cc: Yang Jihong <yangjihong1@huawei.com> Cc: Ye Xingchen <ye.xingchen@zte.com.cn> Cc: Yuan Can <yuancan@huawei.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230608232823.4027869-4-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-09 07:28:00 +08:00
ptq->pid = thread__pid(ptq->thread);
if (queue->cpu == -1)
perf thread: Add accessor functions for thread Using accessors will make it easier to add reference count checking in later patches. Committer notes: thread->nsinfo wasn't wrapped as it is used together with nsinfo__zput(), where does a trick to set the field with a refcount being dropped to NULL, and that doesn't work well with using thread__nsinfo(thread), that loses the &thread->nsinfo pointer. When refcount checking is added to 'struct thread', later in this series, nsinfo__zput(RC_CHK_ACCESS(thread)->nsinfo) will be used to check the thread pointer. Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ali Saidi <alisaidi@amazon.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Brian Robbins <brianrob@linux.microsoft.com> Cc: Changbin Du <changbin.du@huawei.com> Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: Fangrui Song <maskray@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ivan Babrou <ivan@cloudflare.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: K Prateek Nayak <kprateek.nayak@amd.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Mike Leach <mike.leach@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Steinar H. Gunderson <sesse@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Wenyu Liu <liuwenyu7@huawei.com> Cc: Will Deacon <will@kernel.org> Cc: Yang Jihong <yangjihong1@huawei.com> Cc: Ye Xingchen <ye.xingchen@zte.com.cn> Cc: Yuan Can <yuancan@huawei.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230608232823.4027869-4-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-09 07:28:00 +08:00
ptq->cpu = thread__cpu(ptq->thread);
}
if (pt->have_guest_sideband && intel_pt_get_guest_from_sideband(ptq)) {
ptq->guest_machine_pid = 0;
ptq->guest_pid = -1;
ptq->guest_tid = -1;
ptq->vcpu = -1;
}
}
static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
perf intel-pt: Fix sample instruction bytes The decoder reports the current instruction if it was decoded. In some cases the current instruction is not decoded, in which case the instruction bytes length must be set to zero. Ensure that is always done. Note perf script can anyway get the instruction bytes for any samples where they are not present. Also note, that there is a redundant "ptq->insn_len = 0" statement which is not removed until a subsequent patch in order to make this patch apply cleanly to stable branches. Example: A machne that supports TSX is required. It will have flag "rtm". Kernel parameter tsx=on may be required. # for w in `cat /proc/cpuinfo | grep -m1 flags `;do echo $w | grep rtm ; done rtm Test program: #include <stdio.h> #include <immintrin.h> int main() { int x = 0; if (_xbegin() == _XBEGIN_STARTED) { x = 1; _xabort(1); } else { printf("x = %d\n", x); } return 0; } Compile with -mrtm i.e. gcc -Wall -Wextra -mrtm xabort.c -o xabort Record: perf record -e intel_pt/cyc/u --filter 'filter main @ ./xabort' ./xabort Before: # perf script --itrace=xe -F+flags,+insn,-period --xed --ns xabort 1478 [007] 92161.431348581: transactions: x 400b81 main+0x14 (/root/xabort) mov $0xffffffff, %eax xabort 1478 [007] 92161.431348624: transactions: tx abrt 400b93 main+0x26 (/root/xabort) mov $0xffffffff, %eax After: # perf script --itrace=xe -F+flags,+insn,-period --xed --ns xabort 1478 [007] 92161.431348581: transactions: x 400b81 main+0x14 (/root/xabort) xbegin 0x6 xabort 1478 [007] 92161.431348624: transactions: tx abrt 400b93 main+0x26 (/root/xabort) xabort $0x1 Fixes: faaa87680b25d ("perf intel-pt/bts: Report instruction bytes and length in sample") Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: stable@vger.kernel.org Link: http://lore.kernel.org/lkml/20210519074515.9262-3-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2021-05-19 15:45:14 +08:00
ptq->insn_len = 0;
if (ptq->state->flags & INTEL_PT_ABORT_TX) {
ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT;
} else if (ptq->state->flags & INTEL_PT_ASYNC) {
if (!ptq->state->to_ip)
ptq->flags = PERF_IP_FLAG_BRANCH |
PERF_IP_FLAG_ASYNC |
PERF_IP_FLAG_TRACE_END;
else if (ptq->state->from_nr && !ptq->state->to_nr)
ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
PERF_IP_FLAG_ASYNC |
PERF_IP_FLAG_VMEXIT;
else
ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
PERF_IP_FLAG_ASYNC |
PERF_IP_FLAG_INTERRUPT;
} else {
if (ptq->state->from_ip)
ptq->flags = intel_pt_insn_type(ptq->state->insn_op);
else
ptq->flags = PERF_IP_FLAG_BRANCH |
PERF_IP_FLAG_TRACE_BEGIN;
if (ptq->state->flags & INTEL_PT_IN_TX)
ptq->flags |= PERF_IP_FLAG_IN_TX;
ptq->insn_len = ptq->state->insn_len;
memcpy(ptq->insn, ptq->state->insn, INTEL_PT_INSN_BUF_SZ);
}
if (ptq->state->type & INTEL_PT_TRACE_BEGIN)
ptq->flags |= PERF_IP_FLAG_TRACE_BEGIN;
if (ptq->state->type & INTEL_PT_TRACE_END)
ptq->flags |= PERF_IP_FLAG_TRACE_END;
if (pt->cap_event_trace) {
if (ptq->state->type & INTEL_PT_IFLAG_CHG) {
if (!ptq->state->from_iflag)
ptq->flags |= PERF_IP_FLAG_INTR_DISABLE;
if (ptq->state->from_iflag != ptq->state->to_iflag)
ptq->flags |= PERF_IP_FLAG_INTR_TOGGLE;
} else if (!ptq->state->to_iflag) {
ptq->flags |= PERF_IP_FLAG_INTR_DISABLE;
}
}
}
static void intel_pt_setup_time_range(struct intel_pt *pt,
struct intel_pt_queue *ptq)
{
if (!pt->range_cnt)
return;
ptq->sel_timestamp = pt->time_ranges[0].start;
ptq->sel_idx = 0;
if (ptq->sel_timestamp) {
ptq->sel_start = true;
} else {
ptq->sel_timestamp = pt->time_ranges[0].end;
ptq->sel_start = false;
}
}
static int intel_pt_setup_queue(struct intel_pt *pt,
struct auxtrace_queue *queue,
unsigned int queue_nr)
{
struct intel_pt_queue *ptq = queue->priv;
if (list_empty(&queue->head))
return 0;
if (!ptq) {
ptq = intel_pt_alloc_queue(pt, queue_nr);
if (!ptq)
return -ENOMEM;
queue->priv = ptq;
if (queue->cpu != -1)
ptq->cpu = queue->cpu;
ptq->tid = queue->tid;
ptq->cbr_seen = UINT_MAX;
if (pt->sampling_mode && !pt->snapshot_mode &&
pt->timeless_decoding)
ptq->step_through_buffers = true;
ptq->sync_switch = pt->sync_switch;
intel_pt_setup_time_range(pt, ptq);
}
if (!ptq->on_heap &&
(!ptq->sync_switch ||
ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) {
const struct intel_pt_state *state;
int ret;
if (pt->timeless_decoding)
return 0;
intel_pt_log("queue %u getting timestamp\n", queue_nr);
intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
queue_nr, ptq->cpu, ptq->pid, ptq->tid);
if (ptq->sel_start && ptq->sel_timestamp) {
ret = intel_pt_fast_forward(ptq->decoder,
ptq->sel_timestamp);
if (ret)
return ret;
}
while (1) {
state = intel_pt_decode(ptq->decoder);
if (state->err) {
if (state->err == INTEL_PT_ERR_NODATA) {
intel_pt_log("queue %u has no timestamp\n",
queue_nr);
return 0;
}
continue;
}
if (state->timestamp)
break;
}
ptq->timestamp = state->timestamp;
intel_pt_log("queue %u timestamp 0x%" PRIx64 "\n",
queue_nr, ptq->timestamp);
ptq->state = state;
ptq->have_sample = true;
if (ptq->sel_start && ptq->sel_timestamp &&
ptq->timestamp < ptq->sel_timestamp)
ptq->have_sample = false;
intel_pt_sample_flags(ptq);
ret = auxtrace_heap__add(&pt->heap, queue_nr, ptq->timestamp);
if (ret)
return ret;
ptq->on_heap = true;
}
return 0;
}
static int intel_pt_setup_queues(struct intel_pt *pt)
{
unsigned int i;
int ret;
for (i = 0; i < pt->queues.nr_queues; i++) {
ret = intel_pt_setup_queue(pt, &pt->queues.queue_array[i], i);
if (ret)
return ret;
}
return 0;
}
static inline bool intel_pt_skip_event(struct intel_pt *pt)
{
return pt->synth_opts.initial_skip &&
pt->num_events++ < pt->synth_opts.initial_skip;
}
/*
* Cannot count CBR as skipped because it won't go away until cbr == cbr_seen.
* Also ensure CBR is first non-skipped event by allowing for 4 more samples
* from this decoder state.
*/
static inline bool intel_pt_skip_cbr_event(struct intel_pt *pt)
{
return pt->synth_opts.initial_skip &&
pt->num_events + 4 < pt->synth_opts.initial_skip;
}
static void intel_pt_prep_a_sample(struct intel_pt_queue *ptq,
union perf_event *event,
struct perf_sample *sample)
{
event->sample.header.type = PERF_RECORD_SAMPLE;
event->sample.header.size = sizeof(struct perf_event_header);
sample->pid = ptq->pid;
sample->tid = ptq->tid;
if (ptq->pt->have_guest_sideband) {
if ((ptq->state->from_ip && ptq->state->from_nr) ||
(ptq->state->to_ip && ptq->state->to_nr)) {
sample->pid = ptq->guest_pid;
sample->tid = ptq->guest_tid;
sample->machine_pid = ptq->guest_machine_pid;
sample->vcpu = ptq->vcpu;
}
}
sample->cpu = ptq->cpu;
sample->insn_len = ptq->insn_len;
memcpy(sample->insn, ptq->insn, INTEL_PT_INSN_BUF_SZ);
}
static void intel_pt_prep_b_sample(struct intel_pt *pt,
struct intel_pt_queue *ptq,
union perf_event *event,
struct perf_sample *sample)
{
intel_pt_prep_a_sample(ptq, event, sample);
if (!pt->timeless_decoding)
sample->time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
sample->ip = ptq->state->from_ip;
sample->addr = ptq->state->to_ip;
sample->cpumode = intel_pt_cpumode(ptq, sample->ip, sample->addr);
sample->period = 1;
sample->flags = ptq->flags;
event->sample.header.misc = sample->cpumode;
}
static int intel_pt_inject_event(union perf_event *event,
struct perf_sample *sample, u64 type)
{
event->header.size = perf_event__sample_event_size(sample, type, 0);
return perf_event__synthesize_sample(event, type, 0, sample);
}
static inline int intel_pt_opt_inject(struct intel_pt *pt,
union perf_event *event,
struct perf_sample *sample, u64 type)
{
if (!pt->synth_opts.inject)
return 0;
return intel_pt_inject_event(event, sample, type);
}
static int intel_pt_deliver_synth_event(struct intel_pt *pt,
union perf_event *event,
struct perf_sample *sample, u64 type)
{
int ret;
ret = intel_pt_opt_inject(pt, event, sample, type);
if (ret)
return ret;
ret = perf_session__deliver_synth_event(pt->session, event, sample);
if (ret)
pr_err("Intel PT: failed to deliver event, error %d\n", ret);
return ret;
}
static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
union perf_event *event = ptq->event_buf;
struct perf_sample sample = { .ip = 0, };
struct dummy_branch_stack {
u64 nr;
perf tools: Add hw_idx in struct branch_stack The low level index of raw branch records for the most recent branch can be recorded in a sample with PERF_SAMPLE_BRANCH_HW_INDEX branch_sample_type. Extend struct branch_stack to support it. However, if the PERF_SAMPLE_BRANCH_HW_INDEX is not applied, only nr and entries[] will be output by kernel. The pointer of entries[] could be wrong, since the output format is different with new struct branch_stack. Add a variable no_hw_idx in struct perf_sample to indicate whether the hw_idx is output. Add get_branch_entry() to return corresponding pointer of entries[0]. To make dummy branch sample consistent as new branch sample, add hw_idx in struct dummy_branch_stack for cs-etm and intel-pt. Apply the new struct branch_stack for synthetic events as well. Extend test case sample-parsing to support new struct branch_stack. Committer notes: Renamed get_branch_entries() to perf_sample__branch_entries() to have proper namespacing and pave the way for this to be moved to libperf, eventually. Add 'static' to that inline as it is in a header. Add 'hw_idx' to 'struct dummy_branch_stack' in cs-etm.c to fix the build on arm64. Signed-off-by: Kan Liang <kan.liang@linux.intel.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexey Budankov <alexey.budankov@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Mathieu Poirier <mathieu.poirier@linaro.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Pavel Gerasimov <pavel.gerasimov@intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@linux.ibm.com> Cc: Stephane Eranian <eranian@google.com> Cc: Vitaly Slobodskoy <vitaly.slobodskoy@intel.com> Link: http://lore.kernel.org/lkml/20200228163011.19358-2-kan.liang@linux.intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-02-29 00:30:00 +08:00
u64 hw_idx;
struct branch_entry entries;
} dummy_bs;
if (pt->branches_filter && !(pt->branches_filter & ptq->flags))
return 0;
if (intel_pt_skip_event(pt))
return 0;
intel_pt_prep_b_sample(pt, ptq, event, &sample);
sample.id = ptq->pt->branches_id;
sample.stream_id = ptq->pt->branches_id;
/*
* perf report cannot handle events without a branch stack when using
* SORT_MODE__BRANCH so make a dummy one.
*/
if (pt->synth_opts.last_branch && sort__mode == SORT_MODE__BRANCH) {
dummy_bs = (struct dummy_branch_stack){
.nr = 1,
perf tools: Add hw_idx in struct branch_stack The low level index of raw branch records for the most recent branch can be recorded in a sample with PERF_SAMPLE_BRANCH_HW_INDEX branch_sample_type. Extend struct branch_stack to support it. However, if the PERF_SAMPLE_BRANCH_HW_INDEX is not applied, only nr and entries[] will be output by kernel. The pointer of entries[] could be wrong, since the output format is different with new struct branch_stack. Add a variable no_hw_idx in struct perf_sample to indicate whether the hw_idx is output. Add get_branch_entry() to return corresponding pointer of entries[0]. To make dummy branch sample consistent as new branch sample, add hw_idx in struct dummy_branch_stack for cs-etm and intel-pt. Apply the new struct branch_stack for synthetic events as well. Extend test case sample-parsing to support new struct branch_stack. Committer notes: Renamed get_branch_entries() to perf_sample__branch_entries() to have proper namespacing and pave the way for this to be moved to libperf, eventually. Add 'static' to that inline as it is in a header. Add 'hw_idx' to 'struct dummy_branch_stack' in cs-etm.c to fix the build on arm64. Signed-off-by: Kan Liang <kan.liang@linux.intel.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexey Budankov <alexey.budankov@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Mathieu Poirier <mathieu.poirier@linaro.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Pavel Gerasimov <pavel.gerasimov@intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@linux.ibm.com> Cc: Stephane Eranian <eranian@google.com> Cc: Vitaly Slobodskoy <vitaly.slobodskoy@intel.com> Link: http://lore.kernel.org/lkml/20200228163011.19358-2-kan.liang@linux.intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-02-29 00:30:00 +08:00
.hw_idx = -1ULL,
.entries = {
.from = sample.ip,
.to = sample.addr,
},
};
sample.branch_stack = (struct branch_stack *)&dummy_bs;
}
if (ptq->sample_ipc)
sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_br_cyc_cnt;
if (sample.cyc_cnt) {
sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_br_insn_cnt;
ptq->last_br_insn_cnt = ptq->ipc_insn_cnt;
ptq->last_br_cyc_cnt = ptq->ipc_cyc_cnt;
}
return intel_pt_deliver_synth_event(pt, event, &sample,
pt->branches_sample_type);
}
static void intel_pt_prep_sample(struct intel_pt *pt,
struct intel_pt_queue *ptq,
union perf_event *event,
struct perf_sample *sample)
{
intel_pt_prep_b_sample(pt, ptq, event, sample);
if (pt->synth_opts.callchain) {
thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain,
pt->synth_opts.callchain_sz + 1,
sample->ip, pt->kernel_start);
sample->callchain = ptq->chain;
}
if (pt->synth_opts.last_branch) {
thread_stack__br_sample(ptq->thread, ptq->cpu, ptq->last_branch,
pt->br_stack_sz);
sample->branch_stack = ptq->last_branch;
}
}
static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
union perf_event *event = ptq->event_buf;
struct perf_sample sample = { .ip = 0, };
if (intel_pt_skip_event(pt))
return 0;
intel_pt_prep_sample(pt, ptq, event, &sample);
sample.id = ptq->pt->instructions_id;
sample.stream_id = ptq->pt->instructions_id;
if (pt->synth_opts.quick)
sample.period = 1;
else
sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt;
if (ptq->sample_ipc)
sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_in_cyc_cnt;
if (sample.cyc_cnt) {
sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_in_insn_cnt;
ptq->last_in_insn_cnt = ptq->ipc_insn_cnt;
ptq->last_in_cyc_cnt = ptq->ipc_cyc_cnt;
}
ptq->last_insn_cnt = ptq->state->tot_insn_cnt;
return intel_pt_deliver_synth_event(pt, event, &sample,
pt->instructions_sample_type);
}
perf intel-pt: Synthesize cycle events There is no good reason why we cannot synthesize "cycle" events from Intel PT just as we can synthesize "instruction" events, in particular when CYC packets are available. This enables using PT to getting much more accurate cycle profiles than regular sampling (record -e cycles) when the work last for very short periods (<10 ms). Thus, add support for this, based off of the existing IPC calculation framework. The new option to --itrace is "y" (for cYcles), as c was taken for calls. Cycle and instruction events can be synthesized together, and are by default. The only real caveat is that CYC packets are only emitted whenever some other packet is, which in practice is when a branch instruction is encountered (and not even all branches). Thus, even at no subsampling (e.g. --itrace=y0ns), it is impossible to get more accuracy than a single basic block, and all cycles spent executing that block will get attributed to the branch instruction that ends the packet. Thus, one cannot know whether the cycles came from e.g. a specific load, a mispredicted branch, or something else. When subsampling (which is the default), the cycle events will get smeared out even more, but will still be generally useful to attribute cycle counts to functions. Reviewed-by: Adrian Hunter <adrian.hunter@intel.com> Signed-off-by: Steinar H. Gunderson <sesse@google.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: https://lore.kernel.org/r/20220322082452.1429091-1-sesse@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-03-22 16:24:52 +08:00
static int intel_pt_synth_cycle_sample(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
union perf_event *event = ptq->event_buf;
struct perf_sample sample = { .ip = 0, };
u64 period = 0;
if (ptq->sample_ipc)
period = ptq->ipc_cyc_cnt - ptq->last_cy_cyc_cnt;
if (!period || intel_pt_skip_event(pt))
return 0;
intel_pt_prep_sample(pt, ptq, event, &sample);
sample.id = ptq->pt->cycles_id;
sample.stream_id = ptq->pt->cycles_id;
sample.period = period;
sample.cyc_cnt = period;
sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_cy_insn_cnt;
ptq->last_cy_insn_cnt = ptq->ipc_insn_cnt;
ptq->last_cy_cyc_cnt = ptq->ipc_cyc_cnt;
return intel_pt_deliver_synth_event(pt, event, &sample, pt->cycles_sample_type);
}
static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
union perf_event *event = ptq->event_buf;
struct perf_sample sample = { .ip = 0, };
if (intel_pt_skip_event(pt))
return 0;
intel_pt_prep_sample(pt, ptq, event, &sample);
sample.id = ptq->pt->transactions_id;
sample.stream_id = ptq->pt->transactions_id;
return intel_pt_deliver_synth_event(pt, event, &sample,
pt->transactions_sample_type);
}
static void intel_pt_prep_p_sample(struct intel_pt *pt,
struct intel_pt_queue *ptq,
union perf_event *event,
struct perf_sample *sample)
{
intel_pt_prep_sample(pt, ptq, event, sample);
/*
* Zero IP is used to mean "trace start" but that is not the case for
* power or PTWRITE events with no IP, so clear the flags.
*/
if (!sample->ip)
sample->flags = 0;
}
static int intel_pt_synth_ptwrite_sample(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
union perf_event *event = ptq->event_buf;
struct perf_sample sample = { .ip = 0, };
struct perf_synth_intel_ptwrite raw;
if (intel_pt_skip_event(pt))
return 0;
intel_pt_prep_p_sample(pt, ptq, event, &sample);
sample.id = ptq->pt->ptwrites_id;
sample.stream_id = ptq->pt->ptwrites_id;
raw.flags = 0;
raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
raw.payload = cpu_to_le64(ptq->state->ptw_payload);
sample.raw_size = perf_synth__raw_size(raw);
sample.raw_data = perf_synth__raw_data(&raw);
return intel_pt_deliver_synth_event(pt, event, &sample,
pt->ptwrites_sample_type);
}
static int intel_pt_synth_cbr_sample(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
union perf_event *event = ptq->event_buf;
struct perf_sample sample = { .ip = 0, };
struct perf_synth_intel_cbr raw;
u32 flags;
if (intel_pt_skip_cbr_event(pt))
return 0;
ptq->cbr_seen = ptq->state->cbr;
intel_pt_prep_p_sample(pt, ptq, event, &sample);
sample.id = ptq->pt->cbr_id;
sample.stream_id = ptq->pt->cbr_id;
flags = (u16)ptq->state->cbr_payload | (pt->max_non_turbo_ratio << 16);
raw.flags = cpu_to_le32(flags);
raw.freq = cpu_to_le32(raw.cbr * pt->cbr2khz);
raw.reserved3 = 0;
sample.raw_size = perf_synth__raw_size(raw);
sample.raw_data = perf_synth__raw_data(&raw);
return intel_pt_deliver_synth_event(pt, event, &sample,
pt->pwr_events_sample_type);
}
perf intel-pt: Add PSB events Emitting a PSB+ can cause a CPU a slight delay. When doing timing analysis of code with Intel PT, it is useful to know if a timing bubble was caused by Intel PT or not. Add reporting of PSB events via perf script. PSB events are printed with the existing itrace 'p' option which also prints power and frequency changes. The PSB event contains the trace offset at which the PSB occurs, to allow easy reference back to the PSB+ packets. The PSB event timestamp is always the timestamp from the PSB+ TSC packet, and the ip is always the address from the PSB+ FUP packet. The code changes are non-trivial because the decoder must walk to the PSB+ FUP address before outputting the PSB event. Example: $ perf record -e intel_pt/cyc,psb_period=0/u uname Linux [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.046 MB perf.data ] $ perf script --itrace=p --ns perf 17981 [006] 25617.510820383: psb: psb offs: 0 0 [unknown] ([unknown]) perf 17981 [006] 25617.510820383: cbr: cbr: 42 freq: 4219 MHz (156%) 0 [unknown] ([unknown]) uname 17981 [006] 25617.510889753: psb: psb offs: 0xb50 7f78c12a212e __GI___tunables_init+0xee (/usr/lib/x86_64-linux-gnu/ld-2.31.so) uname 17981 [006] 25617.510899162: psb: psb offs: 0x12d0 7f78c128af1c dl_main+0x93c (/usr/lib/x86_64-linux-gnu/ld-2.31.so) uname 17981 [006] 25617.510939242: psb: psb offs: 0x1a50 7f78c128eefc _dl_map_object_from_fd+0x13c (/usr/lib/x86_64-linux-gnu/ld-2.31.so) uname 17981 [006] 25617.510981274: psb: psb offs: 0x21c8 7f78c1296307 _dl_relocate_object+0x927 (/usr/lib/x86_64-linux-gnu/ld-2.31.so) uname 17981 [006] 25617.510993034: psb: psb offs: 0x2948 7f78c12940e4 _dl_lookup_symbol_x+0x14 (/usr/lib/x86_64-linux-gnu/ld-2.31.so) uname 17981 [006] 25617.511003871: psb: psb offs: 0x30c8 7f78c12937b3 do_lookup_x+0x2f3 (/usr/lib/x86_64-linux-gnu/ld-2.31.so) uname 17981 [006] 25617.511019854: psb: psb offs: 0x3850 7f78c1295eed _dl_relocate_object+0x50d (/usr/lib/x86_64-linux-gnu/ld-2.31.so) uname 17981 [006] 25617.511029015: psb: psb offs: 0x4390 7f78c12a855a strcmp+0xf6a (/usr/lib/x86_64-linux-gnu/ld-2.31.so) uname 17981 [006] 25617.511064876: psb: psb offs: 0x4b10 0 [unknown] ([unknown]) uname 17981 [006] 25617.511080762: psb: psb offs: 0x5290 7f78c11db53d _dl_addr+0x13d (/usr/lib/x86_64-linux-gnu/libc-2.31.so) uname 17981 [006] 25617.511086035: psb: psb offs: 0x5a08 7f78c11db538 _dl_addr+0x138 (/usr/lib/x86_64-linux-gnu/libc-2.31.so) uname 17981 [006] 25617.511091381: psb: psb offs: 0x6190 7f78c11db534 _dl_addr+0x134 (/usr/lib/x86_64-linux-gnu/libc-2.31.so) uname 17981 [006] 25617.511096681: psb: psb offs: 0x6910 7f78c11db4c3 _dl_addr+0xc3 (/usr/lib/x86_64-linux-gnu/libc-2.31.so) uname 17981 [006] 25617.511119520: psb: psb offs: 0x7090 7f78c10ada5e _nl_intern_locale_data+0x12e (/usr/lib/x86_64-linux-gnu/libc-2.31.so) uname 17981 [006] 25617.511126584: psb: psb offs: 0x7818 7f78c10ada50 _nl_intern_locale_data+0x120 (/usr/lib/x86_64-linux-gnu/libc-2.31.so) uname 17981 [006] 25617.511132775: psb: psb offs: 0x8358 7f78c10c20c0 getenv+0xa0 (/usr/lib/x86_64-linux-gnu/libc-2.31.so) uname 17981 [006] 25617.511134598: psb: psb offs: 0x8ad0 7f78c10ada09 _nl_intern_locale_data+0xd9 (/usr/lib/x86_64-linux-gnu/libc-2.31.so) uname 17981 [006] 25617.511135685: psb: psb offs: 0x9258 7f78c10ada50 _nl_intern_locale_data+0x120 (/usr/lib/x86_64-linux-gnu/libc-2.31.so) uname 17981 [006] 25617.511138322: psb: psb offs: 0x99d0 7f78c11fffd9 __strncmp_avx2+0x39 (/usr/lib/x86_64-linux-gnu/libc-2.31.so) uname 17981 [006] 25617.511158907: psb: psb offs: 0xa150 0 [unknown] ([unknown]) Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Reviewed-by: Andi Kleen <ak@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: https://lore.kernel.org/r/20210205175350.23817-5-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2021-02-06 01:53:50 +08:00
static int intel_pt_synth_psb_sample(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
union perf_event *event = ptq->event_buf;
struct perf_sample sample = { .ip = 0, };
struct perf_synth_intel_psb raw;
if (intel_pt_skip_event(pt))
return 0;
intel_pt_prep_p_sample(pt, ptq, event, &sample);
sample.id = ptq->pt->psb_id;
sample.stream_id = ptq->pt->psb_id;
sample.flags = 0;
raw.reserved = 0;
raw.offset = ptq->state->psb_offset;
sample.raw_size = perf_synth__raw_size(raw);
sample.raw_data = perf_synth__raw_data(&raw);
return intel_pt_deliver_synth_event(pt, event, &sample,
pt->pwr_events_sample_type);
}
static int intel_pt_synth_mwait_sample(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
union perf_event *event = ptq->event_buf;
struct perf_sample sample = { .ip = 0, };
struct perf_synth_intel_mwait raw;
if (intel_pt_skip_event(pt))
return 0;
intel_pt_prep_p_sample(pt, ptq, event, &sample);
sample.id = ptq->pt->mwait_id;
sample.stream_id = ptq->pt->mwait_id;
raw.reserved = 0;
raw.payload = cpu_to_le64(ptq->state->mwait_payload);
sample.raw_size = perf_synth__raw_size(raw);
sample.raw_data = perf_synth__raw_data(&raw);
return intel_pt_deliver_synth_event(pt, event, &sample,
pt->pwr_events_sample_type);
}
static int intel_pt_synth_pwre_sample(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
union perf_event *event = ptq->event_buf;
struct perf_sample sample = { .ip = 0, };
struct perf_synth_intel_pwre raw;
if (intel_pt_skip_event(pt))
return 0;
intel_pt_prep_p_sample(pt, ptq, event, &sample);
sample.id = ptq->pt->pwre_id;
sample.stream_id = ptq->pt->pwre_id;
raw.reserved = 0;
raw.payload = cpu_to_le64(ptq->state->pwre_payload);
sample.raw_size = perf_synth__raw_size(raw);
sample.raw_data = perf_synth__raw_data(&raw);
return intel_pt_deliver_synth_event(pt, event, &sample,
pt->pwr_events_sample_type);
}
static int intel_pt_synth_exstop_sample(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
union perf_event *event = ptq->event_buf;
struct perf_sample sample = { .ip = 0, };
struct perf_synth_intel_exstop raw;
if (intel_pt_skip_event(pt))
return 0;
intel_pt_prep_p_sample(pt, ptq, event, &sample);
sample.id = ptq->pt->exstop_id;
sample.stream_id = ptq->pt->exstop_id;
raw.flags = 0;
raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
sample.raw_size = perf_synth__raw_size(raw);
sample.raw_data = perf_synth__raw_data(&raw);
return intel_pt_deliver_synth_event(pt, event, &sample,
pt->pwr_events_sample_type);
}
static int intel_pt_synth_pwrx_sample(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
union perf_event *event = ptq->event_buf;
struct perf_sample sample = { .ip = 0, };
struct perf_synth_intel_pwrx raw;
if (intel_pt_skip_event(pt))
return 0;
intel_pt_prep_p_sample(pt, ptq, event, &sample);
sample.id = ptq->pt->pwrx_id;
sample.stream_id = ptq->pt->pwrx_id;
raw.reserved = 0;
raw.payload = cpu_to_le64(ptq->state->pwrx_payload);
sample.raw_size = perf_synth__raw_size(raw);
sample.raw_data = perf_synth__raw_data(&raw);
return intel_pt_deliver_synth_event(pt, event, &sample,
pt->pwr_events_sample_type);
}
/*
* PEBS gp_regs array indexes plus 1 so that 0 means not present. Refer
* intel_pt_add_gp_regs().
*/
static const int pebs_gp_regs[] = {
[PERF_REG_X86_FLAGS] = 1,
[PERF_REG_X86_IP] = 2,
[PERF_REG_X86_AX] = 3,
[PERF_REG_X86_CX] = 4,
[PERF_REG_X86_DX] = 5,
[PERF_REG_X86_BX] = 6,
[PERF_REG_X86_SP] = 7,
[PERF_REG_X86_BP] = 8,
[PERF_REG_X86_SI] = 9,
[PERF_REG_X86_DI] = 10,
[PERF_REG_X86_R8] = 11,
[PERF_REG_X86_R9] = 12,
[PERF_REG_X86_R10] = 13,
[PERF_REG_X86_R11] = 14,
[PERF_REG_X86_R12] = 15,
[PERF_REG_X86_R13] = 16,
[PERF_REG_X86_R14] = 17,
[PERF_REG_X86_R15] = 18,
};
static u64 *intel_pt_add_gp_regs(struct regs_dump *intr_regs, u64 *pos,
const struct intel_pt_blk_items *items,
u64 regs_mask)
{
const u64 *gp_regs = items->val[INTEL_PT_GP_REGS_POS];
u32 mask = items->mask[INTEL_PT_GP_REGS_POS];
u32 bit;
int i;
for (i = 0, bit = 1; i < PERF_REG_X86_64_MAX; i++, bit <<= 1) {
/* Get the PEBS gp_regs array index */
int n = pebs_gp_regs[i] - 1;
if (n < 0)
continue;
/*
* Add only registers that were requested (i.e. 'regs_mask') and
* that were provided (i.e. 'mask'), and update the resulting
* mask (i.e. 'intr_regs->mask') accordingly.
*/
if (mask & 1 << n && regs_mask & bit) {
intr_regs->mask |= bit;
*pos++ = gp_regs[n];
}
}
return pos;
}
#ifndef PERF_REG_X86_XMM0
#define PERF_REG_X86_XMM0 32
#endif
static void intel_pt_add_xmm(struct regs_dump *intr_regs, u64 *pos,
const struct intel_pt_blk_items *items,
u64 regs_mask)
{
u32 mask = items->has_xmm & (regs_mask >> PERF_REG_X86_XMM0);
const u64 *xmm = items->xmm;
/*
* If there are any XMM registers, then there should be all of them.
* Nevertheless, follow the logic to add only registers that were
* requested (i.e. 'regs_mask') and that were provided (i.e. 'mask'),
* and update the resulting mask (i.e. 'intr_regs->mask') accordingly.
*/
intr_regs->mask |= (u64)mask << PERF_REG_X86_XMM0;
for (; mask; mask >>= 1, xmm++) {
if (mask & 1)
*pos++ = *xmm;
}
}
#define LBR_INFO_MISPRED (1ULL << 63)
#define LBR_INFO_IN_TX (1ULL << 62)
#define LBR_INFO_ABORT (1ULL << 61)
#define LBR_INFO_CYCLES 0xffff
/* Refer kernel's intel_pmu_store_pebs_lbrs() */
static u64 intel_pt_lbr_flags(u64 info)
{
union {
struct branch_flags flags;
u64 result;
perf callchain: Stitch LBR call stack In LBR call stack mode, the depth of reconstructed LBR call stack limits to the number of LBR registers. For example, on skylake, the depth of reconstructed LBR call stack is always <= 32. # To display the perf.data header info, please use # --header/--header-only options. # # # Total Lost Samples: 0 # # Samples: 6K of event 'cycles' # Event count (approx.): 6487119731 # # Children Self Command Shared Object Symbol # ........ ........ ............... .................. # ................................ 99.97% 99.97% tchain_edit tchain_edit [.] f43 | --99.64%--f11 f12 f13 f14 f15 f16 f17 f18 f19 f20 f21 f22 f23 f24 f25 f26 f27 f28 f29 f30 f31 f32 f33 f34 f35 f36 f37 f38 f39 f40 f41 f42 f43 For a call stack which is deeper than LBR limit, HW will overwrite the LBR register with oldest branch. Only partial call stacks can be reconstructed. However, the overwritten LBRs may still be retrieved from previous sample. At that moment, HW hasn't overwritten the LBR registers yet. Perf tools can stitch those overwritten LBRs on current call stacks to get a more complete call stack. To determine if LBRs can be stitched, perf tools need to compare current sample with previous sample. - They should have identical LBR records (Same from, to and flags values, and the same physical index of LBR registers). - The searching starts from the base-of-stack of current sample. Once perf determines to stitch the previous LBRs, the corresponding LBR cursor nodes will be copied to 'lists'. The 'lists' is to track the LBR cursor nodes which are going to be stitched. When the stitching is over, the nodes will not be freed immediately. They will be moved to 'free_lists'. Next stitching may reuse the space. Both 'lists' and 'free_lists' will be freed when all samples are processed. Committer notes: Fix the intel-pt.c initialization of the union with 'struct branch_flags', that breaks the build with its unnamed union on older gcc versions. Uninline thread__free_stitch_list(), as it grew big and started dragging includes to thread.h, so move it to thread.c where what it needs in terms of headers are already there. This fixes the build in several systems such as debian:experimental when cross building to the MIPS32 architecture, i.e. in the other cases what was needed was being included by sheer luck. In file included from builtin-sched.c:11: util/thread.h: In function 'thread__free_stitch_list': util/thread.h:169:3: error: implicit declaration of function 'free' [-Werror=implicit-function-declaration] 169 | free(pos); | ^~~~ util/thread.h:169:3: error: incompatible implicit declaration of built-in function 'free' [-Werror] util/thread.h:19:1: note: include '<stdlib.h>' or provide a declaration of 'free' 18 | #include "callchain.h" +++ |+#include <stdlib.h> 19 | util/thread.h:174:3: error: incompatible implicit declaration of built-in function 'free' [-Werror] 174 | free(pos); | ^~~~ util/thread.h:174:3: note: include '<stdlib.h>' or provide a declaration of 'free' Signed-off-by: Kan Liang <kan.liang@linux.intel.com> Reviewed-by: Andi Kleen <ak@linux.intel.com> Acked-by: Jiri Olsa <jolsa@redhat.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexey Budankov <alexey.budankov@linux.intel.com> Cc: Mathieu Poirier <mathieu.poirier@linaro.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Pavel Gerasimov <pavel.gerasimov@intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@linux.ibm.com> Cc: Stephane Eranian <eranian@google.com> Cc: Vitaly Slobodskoy <vitaly.slobodskoy@intel.com> Link: http://lore.kernel.org/lkml/20200319202517.23423-13-kan.liang@linux.intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-03-20 04:25:12 +08:00
} u;
u.result = 0;
u.flags.mispred = !!(info & LBR_INFO_MISPRED);
u.flags.predicted = !(info & LBR_INFO_MISPRED);
u.flags.in_tx = !!(info & LBR_INFO_IN_TX);
u.flags.abort = !!(info & LBR_INFO_ABORT);
u.flags.cycles = info & LBR_INFO_CYCLES;
return u.result;
}
static void intel_pt_add_lbrs(struct branch_stack *br_stack,
const struct intel_pt_blk_items *items)
{
u64 *to;
int i;
br_stack->nr = 0;
to = &br_stack->entries[0].from;
for (i = INTEL_PT_LBR_0_POS; i <= INTEL_PT_LBR_2_POS; i++) {
u32 mask = items->mask[i];
const u64 *from = items->val[i];
for (; mask; mask >>= 3, from += 3) {
if ((mask & 7) == 7) {
*to++ = from[0];
*to++ = from[1];
*to++ = intel_pt_lbr_flags(from[2]);
br_stack->nr += 1;
}
}
}
}
static int intel_pt_do_synth_pebs_sample(struct intel_pt_queue *ptq, struct evsel *evsel, u64 id)
{
const struct intel_pt_blk_items *items = &ptq->state->items;
struct perf_sample sample = { .ip = 0, };
union perf_event *event = ptq->event_buf;
struct intel_pt *pt = ptq->pt;
libperf: Move perf_event_attr field from perf's evsel to libperf's perf_evsel Move the perf_event_attr struct fron 'struct evsel' to 'struct perf_evsel'. Committer notes: Fixed up these: tools/perf/arch/arm/util/auxtrace.c tools/perf/arch/arm/util/cs-etm.c tools/perf/arch/arm64/util/arm-spe.c tools/perf/arch/s390/util/auxtrace.c tools/perf/util/cs-etm.c Also cc1: warnings being treated as errors tests/sample-parsing.c: In function 'do_test': tests/sample-parsing.c:162: error: missing initializer tests/sample-parsing.c:162: error: (near initialization for 'evsel.core.cpus') struct evsel evsel = { .needs_swap = false, - .core.attr = { - .sample_type = sample_type, - .read_format = read_format, + .core = { + . attr = { + .sample_type = sample_type, + .read_format = read_format, + }, [perfbuilder@a70e4eeb5549 /]$ gcc --version |& head -1 gcc (GCC) 4.4.7 Also we don't need to include perf_event.h in tools/perf/lib/include/perf/evsel.h, forward declaring 'struct perf_event_attr' is enough. And this even fixes the build in some systems where things are used somewhere down the include path from perf_event.h without defining __always_inline. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Budankov <alexey.budankov@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Michael Petlan <mpetlan@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20190721112506.12306-43-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-07-21 19:24:29 +08:00
u64 sample_type = evsel->core.attr.sample_type;
u8 cpumode;
u64 regs[8 * sizeof(sample.intr_regs.mask)];
if (intel_pt_skip_event(pt))
return 0;
intel_pt_prep_a_sample(ptq, event, &sample);
sample.id = id;
sample.stream_id = id;
libperf: Move perf_event_attr field from perf's evsel to libperf's perf_evsel Move the perf_event_attr struct fron 'struct evsel' to 'struct perf_evsel'. Committer notes: Fixed up these: tools/perf/arch/arm/util/auxtrace.c tools/perf/arch/arm/util/cs-etm.c tools/perf/arch/arm64/util/arm-spe.c tools/perf/arch/s390/util/auxtrace.c tools/perf/util/cs-etm.c Also cc1: warnings being treated as errors tests/sample-parsing.c: In function 'do_test': tests/sample-parsing.c:162: error: missing initializer tests/sample-parsing.c:162: error: (near initialization for 'evsel.core.cpus') struct evsel evsel = { .needs_swap = false, - .core.attr = { - .sample_type = sample_type, - .read_format = read_format, + .core = { + . attr = { + .sample_type = sample_type, + .read_format = read_format, + }, [perfbuilder@a70e4eeb5549 /]$ gcc --version |& head -1 gcc (GCC) 4.4.7 Also we don't need to include perf_event.h in tools/perf/lib/include/perf/evsel.h, forward declaring 'struct perf_event_attr' is enough. And this even fixes the build in some systems where things are used somewhere down the include path from perf_event.h without defining __always_inline. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Budankov <alexey.budankov@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Michael Petlan <mpetlan@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20190721112506.12306-43-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-07-21 19:24:29 +08:00
if (!evsel->core.attr.freq)
sample.period = evsel->core.attr.sample_period;
/* No support for non-zero CS base */
if (items->has_ip)
sample.ip = items->ip;
else if (items->has_rip)
sample.ip = items->rip;
else
sample.ip = ptq->state->from_ip;
cpumode = intel_pt_cpumode(ptq, sample.ip, 0);
event->sample.header.misc = cpumode | PERF_RECORD_MISC_EXACT_IP;
sample.cpumode = cpumode;
if (sample_type & PERF_SAMPLE_TIME) {
u64 timestamp = 0;
if (items->has_timestamp)
timestamp = items->timestamp;
else if (!pt->timeless_decoding)
timestamp = ptq->timestamp;
if (timestamp)
sample.time = tsc_to_perf_time(timestamp, &pt->tc);
}
if (sample_type & PERF_SAMPLE_CALLCHAIN &&
pt->synth_opts.callchain) {
thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain,
pt->synth_opts.callchain_sz, sample.ip,
pt->kernel_start);
sample.callchain = ptq->chain;
}
if (sample_type & PERF_SAMPLE_REGS_INTR &&
(items->mask[INTEL_PT_GP_REGS_POS] ||
items->mask[INTEL_PT_XMM_POS])) {
libperf: Move perf_event_attr field from perf's evsel to libperf's perf_evsel Move the perf_event_attr struct fron 'struct evsel' to 'struct perf_evsel'. Committer notes: Fixed up these: tools/perf/arch/arm/util/auxtrace.c tools/perf/arch/arm/util/cs-etm.c tools/perf/arch/arm64/util/arm-spe.c tools/perf/arch/s390/util/auxtrace.c tools/perf/util/cs-etm.c Also cc1: warnings being treated as errors tests/sample-parsing.c: In function 'do_test': tests/sample-parsing.c:162: error: missing initializer tests/sample-parsing.c:162: error: (near initialization for 'evsel.core.cpus') struct evsel evsel = { .needs_swap = false, - .core.attr = { - .sample_type = sample_type, - .read_format = read_format, + .core = { + . attr = { + .sample_type = sample_type, + .read_format = read_format, + }, [perfbuilder@a70e4eeb5549 /]$ gcc --version |& head -1 gcc (GCC) 4.4.7 Also we don't need to include perf_event.h in tools/perf/lib/include/perf/evsel.h, forward declaring 'struct perf_event_attr' is enough. And this even fixes the build in some systems where things are used somewhere down the include path from perf_event.h without defining __always_inline. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Budankov <alexey.budankov@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Michael Petlan <mpetlan@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20190721112506.12306-43-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-07-21 19:24:29 +08:00
u64 regs_mask = evsel->core.attr.sample_regs_intr;
u64 *pos;
sample.intr_regs.abi = items->is_32_bit ?
PERF_SAMPLE_REGS_ABI_32 :
PERF_SAMPLE_REGS_ABI_64;
sample.intr_regs.regs = regs;
pos = intel_pt_add_gp_regs(&sample.intr_regs, regs, items, regs_mask);
intel_pt_add_xmm(&sample.intr_regs, pos, items, regs_mask);
}
if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
if (items->mask[INTEL_PT_LBR_0_POS] ||
items->mask[INTEL_PT_LBR_1_POS] ||
items->mask[INTEL_PT_LBR_2_POS]) {
intel_pt_add_lbrs(ptq->last_branch, items);
} else if (pt->synth_opts.last_branch) {
thread_stack__br_sample(ptq->thread, ptq->cpu,
ptq->last_branch,
pt->br_stack_sz);
} else {
ptq->last_branch->nr = 0;
}
sample.branch_stack = ptq->last_branch;
}
if (sample_type & PERF_SAMPLE_ADDR && items->has_mem_access_address)
sample.addr = items->mem_access_address;
perf tools: Support PERF_SAMPLE_WEIGHT_STRUCT The new sample type, PERF_SAMPLE_WEIGHT_STRUCT, is an alternative of the PERF_SAMPLE_WEIGHT sample type. Users can apply either the PERF_SAMPLE_WEIGHT sample type or the PERF_SAMPLE_WEIGHT_STRUCT sample type to retrieve the sample weight, but they cannot apply both sample types simultaneously. The new sample type shares the same space as the PERF_SAMPLE_WEIGHT sample type. The lower 32 bits are exactly the same for both sample type. The higher 32 bits may be different for different architecture. Add arch specific arch_evsel__set_sample_weight() to set the new sample type for X86. Only store the lower 32 bits for the sample->weight if the new sample type is applied. In practice, no memory access could last than 4G cycles. No data will be lost. If the kernel doesn't support the new sample type. Fall back to the PERF_SAMPLE_WEIGHT sample type. There is no impact for other architectures. Committer notes: Fixup related to PERF_SAMPLE_CODE_PAGE_SIZE, present in acme/perf/core but not upstream yet. Signed-off-by: Kan Liang <kan.liang@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Jin Yao <yao.jin@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Madhavan Srinivasan <maddy@linux.vnet.ibm.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Link: http://lore.kernel.org/lkml/1612296553-21962-6-git-send-email-kan.liang@linux.intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2021-02-03 04:09:09 +08:00
if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) {
/*
* Refer kernel's setup_pebs_adaptive_sample_data() and
* intel_hsw_weight().
*/
perf tools: Support PERF_SAMPLE_WEIGHT_STRUCT The new sample type, PERF_SAMPLE_WEIGHT_STRUCT, is an alternative of the PERF_SAMPLE_WEIGHT sample type. Users can apply either the PERF_SAMPLE_WEIGHT sample type or the PERF_SAMPLE_WEIGHT_STRUCT sample type to retrieve the sample weight, but they cannot apply both sample types simultaneously. The new sample type shares the same space as the PERF_SAMPLE_WEIGHT sample type. The lower 32 bits are exactly the same for both sample type. The higher 32 bits may be different for different architecture. Add arch specific arch_evsel__set_sample_weight() to set the new sample type for X86. Only store the lower 32 bits for the sample->weight if the new sample type is applied. In practice, no memory access could last than 4G cycles. No data will be lost. If the kernel doesn't support the new sample type. Fall back to the PERF_SAMPLE_WEIGHT sample type. There is no impact for other architectures. Committer notes: Fixup related to PERF_SAMPLE_CODE_PAGE_SIZE, present in acme/perf/core but not upstream yet. Signed-off-by: Kan Liang <kan.liang@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Jin Yao <yao.jin@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Madhavan Srinivasan <maddy@linux.vnet.ibm.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Link: http://lore.kernel.org/lkml/1612296553-21962-6-git-send-email-kan.liang@linux.intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2021-02-03 04:09:09 +08:00
if (items->has_mem_access_latency) {
u64 weight = items->mem_access_latency >> 32;
/*
* Starts from SPR, the mem access latency field
* contains both cache latency [47:32] and instruction
* latency [15:0]. The cache latency is the same as the
* mem access latency on previous platforms.
*
* In practice, no memory access could last than 4G
* cycles. Use latency >> 32 to distinguish the
* different format of the mem access latency field.
*/
if (weight > 0) {
perf tools: Support PERF_SAMPLE_WEIGHT_STRUCT The new sample type, PERF_SAMPLE_WEIGHT_STRUCT, is an alternative of the PERF_SAMPLE_WEIGHT sample type. Users can apply either the PERF_SAMPLE_WEIGHT sample type or the PERF_SAMPLE_WEIGHT_STRUCT sample type to retrieve the sample weight, but they cannot apply both sample types simultaneously. The new sample type shares the same space as the PERF_SAMPLE_WEIGHT sample type. The lower 32 bits are exactly the same for both sample type. The higher 32 bits may be different for different architecture. Add arch specific arch_evsel__set_sample_weight() to set the new sample type for X86. Only store the lower 32 bits for the sample->weight if the new sample type is applied. In practice, no memory access could last than 4G cycles. No data will be lost. If the kernel doesn't support the new sample type. Fall back to the PERF_SAMPLE_WEIGHT sample type. There is no impact for other architectures. Committer notes: Fixup related to PERF_SAMPLE_CODE_PAGE_SIZE, present in acme/perf/core but not upstream yet. Signed-off-by: Kan Liang <kan.liang@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Jin Yao <yao.jin@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Madhavan Srinivasan <maddy@linux.vnet.ibm.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Link: http://lore.kernel.org/lkml/1612296553-21962-6-git-send-email-kan.liang@linux.intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2021-02-03 04:09:09 +08:00
sample.weight = weight & 0xffff;
sample.ins_lat = items->mem_access_latency & 0xffff;
} else
perf tools: Support PERF_SAMPLE_WEIGHT_STRUCT The new sample type, PERF_SAMPLE_WEIGHT_STRUCT, is an alternative of the PERF_SAMPLE_WEIGHT sample type. Users can apply either the PERF_SAMPLE_WEIGHT sample type or the PERF_SAMPLE_WEIGHT_STRUCT sample type to retrieve the sample weight, but they cannot apply both sample types simultaneously. The new sample type shares the same space as the PERF_SAMPLE_WEIGHT sample type. The lower 32 bits are exactly the same for both sample type. The higher 32 bits may be different for different architecture. Add arch specific arch_evsel__set_sample_weight() to set the new sample type for X86. Only store the lower 32 bits for the sample->weight if the new sample type is applied. In practice, no memory access could last than 4G cycles. No data will be lost. If the kernel doesn't support the new sample type. Fall back to the PERF_SAMPLE_WEIGHT sample type. There is no impact for other architectures. Committer notes: Fixup related to PERF_SAMPLE_CODE_PAGE_SIZE, present in acme/perf/core but not upstream yet. Signed-off-by: Kan Liang <kan.liang@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Jin Yao <yao.jin@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Madhavan Srinivasan <maddy@linux.vnet.ibm.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Link: http://lore.kernel.org/lkml/1612296553-21962-6-git-send-email-kan.liang@linux.intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2021-02-03 04:09:09 +08:00
sample.weight = items->mem_access_latency;
}
if (!sample.weight && items->has_tsx_aux_info) {
/* Cycles last block */
sample.weight = (u32)items->tsx_aux_info;
}
}
if (sample_type & PERF_SAMPLE_TRANSACTION && items->has_tsx_aux_info) {
u64 ax = items->has_rax ? items->rax : 0;
/* Refer kernel's intel_hsw_transaction() */
u64 txn = (u8)(items->tsx_aux_info >> 32);
/* For RTM XABORTs also log the abort code from AX */
if (txn & PERF_TXN_TRANSACTION && ax & 1)
txn |= ((ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT;
sample.transaction = txn;
}
return intel_pt_deliver_synth_event(pt, event, &sample, sample_type);
}
static int intel_pt_synth_single_pebs_sample(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
struct evsel *evsel = pt->pebs_evsel;
u64 id = evsel->core.id[0];
return intel_pt_do_synth_pebs_sample(ptq, evsel, id);
}
static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq)
{
const struct intel_pt_blk_items *items = &ptq->state->items;
struct intel_pt_pebs_event *pe;
struct intel_pt *pt = ptq->pt;
int err = -EINVAL;
int hw_id;
if (!items->has_applicable_counters || !items->applicable_counters) {
if (!pt->single_pebs)
pr_err("PEBS-via-PT record with no applicable_counters\n");
return intel_pt_synth_single_pebs_sample(ptq);
}
for_each_set_bit(hw_id, (unsigned long *)&items->applicable_counters, INTEL_PT_MAX_PEBS) {
pe = &ptq->pebs[hw_id];
if (!pe->evsel) {
if (!pt->single_pebs)
pr_err("PEBS-via-PT record with no matching event, hw_id %d\n",
hw_id);
return intel_pt_synth_single_pebs_sample(ptq);
}
err = intel_pt_do_synth_pebs_sample(ptq, pe->evsel, pe->id);
if (err)
return err;
}
return err;
}
static int intel_pt_synth_events_sample(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
union perf_event *event = ptq->event_buf;
struct perf_sample sample = { .ip = 0, };
struct {
struct perf_synth_intel_evt cfe;
struct perf_synth_intel_evd evd[INTEL_PT_MAX_EVDS];
} raw;
int i;
if (intel_pt_skip_event(pt))
return 0;
intel_pt_prep_p_sample(pt, ptq, event, &sample);
sample.id = ptq->pt->evt_id;
sample.stream_id = ptq->pt->evt_id;
raw.cfe.type = ptq->state->cfe_type;
raw.cfe.reserved = 0;
raw.cfe.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
raw.cfe.vector = ptq->state->cfe_vector;
raw.cfe.evd_cnt = ptq->state->evd_cnt;
for (i = 0; i < ptq->state->evd_cnt; i++) {
raw.evd[i].et = 0;
raw.evd[i].evd_type = ptq->state->evd[i].type;
raw.evd[i].payload = ptq->state->evd[i].payload;
}
sample.raw_size = perf_synth__raw_size(raw) +
ptq->state->evd_cnt * sizeof(struct perf_synth_intel_evd);
sample.raw_data = perf_synth__raw_data(&raw);
return intel_pt_deliver_synth_event(pt, event, &sample,
pt->evt_sample_type);
}
static int intel_pt_synth_iflag_chg_sample(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
union perf_event *event = ptq->event_buf;
struct perf_sample sample = { .ip = 0, };
struct perf_synth_intel_iflag_chg raw;
if (intel_pt_skip_event(pt))
return 0;
intel_pt_prep_p_sample(pt, ptq, event, &sample);
sample.id = ptq->pt->iflag_chg_id;
sample.stream_id = ptq->pt->iflag_chg_id;
raw.flags = 0;
raw.iflag = ptq->state->to_iflag;
if (ptq->state->type & INTEL_PT_BRANCH) {
raw.via_branch = 1;
raw.branch_ip = ptq->state->to_ip;
} else {
sample.addr = 0;
}
sample.flags = ptq->flags;
sample.raw_size = perf_synth__raw_size(raw);
sample.raw_data = perf_synth__raw_data(&raw);
return intel_pt_deliver_synth_event(pt, event, &sample,
pt->iflag_chg_sample_type);
}
static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu,
pid_t pid, pid_t tid, u64 ip, u64 timestamp,
pid_t machine_pid, int vcpu)
{
perf intel-pt: Support itrace option flag d+e to log on error Pass d+e option and log size via intel_pt_log_enable(). Allocate a buffer for log messages and provide intel_pt_log_dump_buf() to dump and reset the buffer upon decoder errors. Example: $ sudo perf record -e intel_pt// sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.094 MB perf.data ] $ sudo perf config itrace.debug-log-buffer-size=300 $ sudo perf script --itrace=ed+e+o | head -20 Dumping debug log buffer (first line may be sliced) Other ffffffff96ca22f6: 48 89 e5 Other ffffffff96ca22f9: 65 48 8b 05 ff e0 38 69 Other ffffffff96ca2301: 48 3d c0 a5 c1 98 Other ffffffff96ca2307: 74 08 Jcc +8 ffffffff96ca2311: 5d Other ffffffff96ca2312: c3 Ret ERROR: Bad RET compression (TNT=N) at 0xffffffff96ca2312 End of debug log buffer dump instruction trace error type 1 time 15913.537143482 cpu 5 pid 36292 tid 36292 ip 0xffffffff96ca2312 code 6: Trace doesn't match instruction Dumping debug log buffer (first line may be sliced) Other ffffffff96ce7fe9: f6 47 2e 20 Other ffffffff96ce7fed: 74 11 Jcc +17 ffffffff96ce7fef: 48 8b 87 28 0a 00 00 Other ffffffff96ce7ff6: 5d Other ffffffff96ce7ff7: 48 8b 40 18 Other ffffffff96ce7ffb: c3 Ret ERROR: Bad RET compression (TNT=N) at 0xffffffff96ce7ffb Warning: 8 instruction trace errors Reviewed-by: Andi Kleen <ak@linux.intel.com> Reviewed-by: Namhyung Kim <namhyung@kernel.org> Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Ian Rogers <irogers@google.com> Cc: Jiri Olsa <jolsa@kernel.org> Link: https://lore.kernel.org/r/20220905073424.3971-6-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-09-05 15:34:23 +08:00
bool dump_log_on_error = pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_ON_ERROR;
bool log_on_stdout = pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_USE_STDOUT;
union perf_event event;
char msg[MAX_AUXTRACE_ERROR_MSG];
int err;
if (pt->synth_opts.error_minus_flags) {
if (code == INTEL_PT_ERR_OVR &&
pt->synth_opts.error_minus_flags & AUXTRACE_ERR_FLG_OVERFLOW)
return 0;
if (code == INTEL_PT_ERR_LOST &&
pt->synth_opts.error_minus_flags & AUXTRACE_ERR_FLG_DATA_LOST)
return 0;
}
intel_pt__strerror(code, msg, MAX_AUXTRACE_ERROR_MSG);
auxtrace_synth_guest_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
code, cpu, pid, tid, ip, msg, timestamp,
machine_pid, vcpu);
perf intel-pt: Support itrace option flag d+e to log on error Pass d+e option and log size via intel_pt_log_enable(). Allocate a buffer for log messages and provide intel_pt_log_dump_buf() to dump and reset the buffer upon decoder errors. Example: $ sudo perf record -e intel_pt// sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.094 MB perf.data ] $ sudo perf config itrace.debug-log-buffer-size=300 $ sudo perf script --itrace=ed+e+o | head -20 Dumping debug log buffer (first line may be sliced) Other ffffffff96ca22f6: 48 89 e5 Other ffffffff96ca22f9: 65 48 8b 05 ff e0 38 69 Other ffffffff96ca2301: 48 3d c0 a5 c1 98 Other ffffffff96ca2307: 74 08 Jcc +8 ffffffff96ca2311: 5d Other ffffffff96ca2312: c3 Ret ERROR: Bad RET compression (TNT=N) at 0xffffffff96ca2312 End of debug log buffer dump instruction trace error type 1 time 15913.537143482 cpu 5 pid 36292 tid 36292 ip 0xffffffff96ca2312 code 6: Trace doesn't match instruction Dumping debug log buffer (first line may be sliced) Other ffffffff96ce7fe9: f6 47 2e 20 Other ffffffff96ce7fed: 74 11 Jcc +17 ffffffff96ce7fef: 48 8b 87 28 0a 00 00 Other ffffffff96ce7ff6: 5d Other ffffffff96ce7ff7: 48 8b 40 18 Other ffffffff96ce7ffb: c3 Ret ERROR: Bad RET compression (TNT=N) at 0xffffffff96ce7ffb Warning: 8 instruction trace errors Reviewed-by: Andi Kleen <ak@linux.intel.com> Reviewed-by: Namhyung Kim <namhyung@kernel.org> Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Ian Rogers <irogers@google.com> Cc: Jiri Olsa <jolsa@kernel.org> Link: https://lore.kernel.org/r/20220905073424.3971-6-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-09-05 15:34:23 +08:00
if (intel_pt_enable_logging && !log_on_stdout) {
FILE *fp = intel_pt_log_fp();
if (fp)
perf_event__fprintf_auxtrace_error(&event, fp);
}
if (code != INTEL_PT_ERR_LOST && dump_log_on_error)
intel_pt_log_dump_buf();
err = perf_session__deliver_synth_event(pt->session, &event, NULL);
if (err)
pr_err("Intel Processor Trace: failed to deliver error event, error %d\n",
err);
return err;
}
static int intel_ptq_synth_error(struct intel_pt_queue *ptq,
const struct intel_pt_state *state)
{
struct intel_pt *pt = ptq->pt;
u64 tm = ptq->timestamp;
pid_t machine_pid = 0;
pid_t pid = ptq->pid;
pid_t tid = ptq->tid;
int vcpu = -1;
tm = pt->timeless_decoding ? 0 : tsc_to_perf_time(tm, &pt->tc);
if (pt->have_guest_sideband && state->from_nr) {
machine_pid = ptq->guest_machine_pid;
vcpu = ptq->vcpu;
pid = ptq->guest_pid;
tid = ptq->guest_tid;
}
return intel_pt_synth_error(pt, state->err, ptq->cpu, pid, tid,
state->from_ip, tm, machine_pid, vcpu);
}
static int intel_pt_next_tid(struct intel_pt *pt, struct intel_pt_queue *ptq)
{
struct auxtrace_queue *queue;
pid_t tid = ptq->next_tid;
int err;
if (tid == -1)
return 0;
intel_pt_log("switch: cpu %d tid %d\n", ptq->cpu, tid);
err = machine__set_current_tid(pt->machine, ptq->cpu, -1, tid);
queue = &pt->queues.queue_array[ptq->queue_nr];
intel_pt_set_pid_tid_cpu(pt, queue);
ptq->next_tid = -1;
return err;
}
static inline bool intel_pt_is_switch_ip(struct intel_pt_queue *ptq, u64 ip)
{
struct intel_pt *pt = ptq->pt;
return ip == pt->switch_ip &&
(ptq->flags & PERF_IP_FLAG_BRANCH) &&
!(ptq->flags & (PERF_IP_FLAG_CONDITIONAL | PERF_IP_FLAG_ASYNC |
PERF_IP_FLAG_INTERRUPT | PERF_IP_FLAG_TX_ABORT));
}
#define INTEL_PT_PWR_EVT (INTEL_PT_MWAIT_OP | INTEL_PT_PWR_ENTRY | \
INTEL_PT_EX_STOP | INTEL_PT_PWR_EXIT)
static int intel_pt_sample(struct intel_pt_queue *ptq)
{
const struct intel_pt_state *state = ptq->state;
struct intel_pt *pt = ptq->pt;
int err;
if (!ptq->have_sample)
return 0;
ptq->have_sample = false;
if (pt->synth_opts.approx_ipc) {
ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt;
ptq->ipc_cyc_cnt = ptq->state->cycles;
ptq->sample_ipc = true;
} else {
ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt;
ptq->ipc_cyc_cnt = ptq->state->tot_cyc_cnt;
ptq->sample_ipc = ptq->state->flags & INTEL_PT_SAMPLE_IPC;
}
perf intel-pt: Add guest_code support A common case for KVM test programs is that the test program acts as the hypervisor, creating, running and destroying the virtual machine, and providing the guest object code from its own object code. In this case, the VM is not running an OS, but only the functions loaded into it by the hypervisor test program, and conveniently, loaded at the same virtual addresses. To support that, a new option "--guest-code" has been added in previous patches. In this patch, add support also to Intel PT. In particular, ensure guest_code thread is set up before attempting to walk object code or synthesize samples. Example: # perf record --kcore -e intel_pt/cyc/ -- tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.280 MB perf.data ] # perf script --guest-code --itrace=bep --ns -F-period,+addr,+flags [SNIP] tsc_msrs_test 18436 [007] 10897.962087733: branches: call ffffffffc13b2ff5 __vmx_vcpu_run+0x15 (vmlinux) => ffffffffc13b2f50 vmx_update_host_rsp+0x0 (vmlinux) tsc_msrs_test 18436 [007] 10897.962087733: branches: return ffffffffc13b2f5d vmx_update_host_rsp+0xd (vmlinux) => ffffffffc13b2ffa __vmx_vcpu_run+0x1a (vmlinux) tsc_msrs_test 18436 [007] 10897.962087733: branches: call ffffffffc13b303b __vmx_vcpu_run+0x5b (vmlinux) => ffffffffc13b2f80 vmx_vmenter+0x0 (vmlinux) tsc_msrs_test 18436 [007] 10897.962087836: branches: vmentry ffffffffc13b2f82 vmx_vmenter+0x2 (vmlinux) => 0 [unknown] ([unknown]) [guest/18436] 18436 [007] 10897.962087836: branches: vmentry 0 [unknown] ([unknown]) => 402c81 guest_code+0x131 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) [guest/18436] 18436 [007] 10897.962087836: branches: call 402c81 guest_code+0x131 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 40dba0 ucall+0x0 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) [guest/18436] 18436 [007] 10897.962088248: branches: vmexit 40dba0 ucall+0x0 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 0 [unknown] ([unknown]) tsc_msrs_test 18436 [007] 10897.962088248: branches: vmexit 0 [unknown] ([unknown]) => ffffffffc13b2fa0 vmx_vmexit+0x0 (vmlinux) tsc_msrs_test 18436 [007] 10897.962088248: branches: jmp ffffffffc13b2fa0 vmx_vmexit+0x0 (vmlinux) => ffffffffc13b2fd2 vmx_vmexit+0x32 (vmlinux) tsc_msrs_test 18436 [007] 10897.962088256: branches: return ffffffffc13b2fd2 vmx_vmexit+0x32 (vmlinux) => ffffffffc13b3040 __vmx_vcpu_run+0x60 (vmlinux) tsc_msrs_test 18436 [007] 10897.962088270: branches: return ffffffffc13b30b6 __vmx_vcpu_run+0xd6 (vmlinux) => ffffffffc13b2f2e vmx_vcpu_enter_exit+0x4e (vmlinux) [SNIP] tsc_msrs_test 18436 [007] 10897.962089321: branches: call ffffffffc13b2ff5 __vmx_vcpu_run+0x15 (vmlinux) => ffffffffc13b2f50 vmx_update_host_rsp+0x0 (vmlinux) tsc_msrs_test 18436 [007] 10897.962089321: branches: return ffffffffc13b2f5d vmx_update_host_rsp+0xd (vmlinux) => ffffffffc13b2ffa __vmx_vcpu_run+0x1a (vmlinux) tsc_msrs_test 18436 [007] 10897.962089321: branches: call ffffffffc13b303b __vmx_vcpu_run+0x5b (vmlinux) => ffffffffc13b2f80 vmx_vmenter+0x0 (vmlinux) tsc_msrs_test 18436 [007] 10897.962089424: branches: vmentry ffffffffc13b2f82 vmx_vmenter+0x2 (vmlinux) => 0 [unknown] ([unknown]) [guest/18436] 18436 [007] 10897.962089424: branches: vmentry 0 [unknown] ([unknown]) => 40dba0 ucall+0x0 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) [guest/18436] 18436 [007] 10897.962089701: branches: jmp 40dc1b ucall+0x7b (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 40dc39 ucall+0x99 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) [guest/18436] 18436 [007] 10897.962089701: branches: jcc 40dc3c ucall+0x9c (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 40dc20 ucall+0x80 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) [guest/18436] 18436 [007] 10897.962089701: branches: jcc 40dc3c ucall+0x9c (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 40dc20 ucall+0x80 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) [guest/18436] 18436 [007] 10897.962089701: branches: jcc 40dc37 ucall+0x97 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 40dc50 ucall+0xb0 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) [guest/18436] 18436 [007] 10897.962089878: branches: vmexit 40dc55 ucall+0xb5 (/home/ahunter/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 0 [unknown] ([unknown]) tsc_msrs_test 18436 [007] 10897.962089878: branches: vmexit 0 [unknown] ([unknown]) => ffffffffc13b2fa0 vmx_vmexit+0x0 (vmlinux) tsc_msrs_test 18436 [007] 10897.962089878: branches: jmp ffffffffc13b2fa0 vmx_vmexit+0x0 (vmlinux) => ffffffffc13b2fd2 vmx_vmexit+0x32 (vmlinux) tsc_msrs_test 18436 [007] 10897.962089887: branches: return ffffffffc13b2fd2 vmx_vmexit+0x32 (vmlinux) => ffffffffc13b3040 __vmx_vcpu_run+0x60 (vmlinux) tsc_msrs_test 18436 [007] 10897.962089901: branches: return ffffffffc13b30b6 __vmx_vcpu_run+0xd6 (vmlinux) => ffffffffc13b2f2e vmx_vcpu_enter_exit+0x4e (vmlinux) [SNIP] # perf kvm --guest-code --guest --host report -i perf.data --stdio | head -20 # To display the perf.data header info, please use --header/--header-only options. # # # Total Lost Samples: 0 # # Samples: 12 of event 'instructions' # Event count (approx.): 2274583 # # Children Self Command Shared Object Symbol # ........ ........ ............. .................... ........................................... # 54.70% 0.00% tsc_msrs_test [kernel.vmlinux] [k] entry_SYSCALL_64_after_hwframe | ---entry_SYSCALL_64_after_hwframe do_syscall_64 | |--29.44%--syscall_exit_to_user_mode | exit_to_user_mode_prepare | task_work_run | __fput For more information about Perf tools support for Intel® Processor Trace refer: https://perf.wiki.kernel.org/index.php/Perf_tools_support_for_Intel%C2%AE_Processor_Trace Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Ian Rogers <irogers@google.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Leo Yan <leo.yan@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: kvm@vger.kernel.org Link: https://lore.kernel.org/r/20220517131011.6117-7-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-05-17 21:10:11 +08:00
/* Ensure guest code maps are set up */
if (symbol_conf.guest_code && (state->from_nr || state->to_nr))
intel_pt_get_guest(ptq);
/*
* Do PEBS first to allow for the possibility that the PEBS timestamp
* precedes the current timestamp.
*/
if (pt->sample_pebs && state->type & INTEL_PT_BLK_ITEMS) {
err = intel_pt_synth_pebs_sample(ptq);
if (err)
return err;
}
if (pt->synth_opts.intr_events) {
if (state->type & INTEL_PT_EVT) {
err = intel_pt_synth_events_sample(ptq);
if (err)
return err;
}
if (state->type & INTEL_PT_IFLAG_CHG) {
err = intel_pt_synth_iflag_chg_sample(ptq);
if (err)
return err;
}
}
if (pt->sample_pwr_events) {
perf intel-pt: Add PSB events Emitting a PSB+ can cause a CPU a slight delay. When doing timing analysis of code with Intel PT, it is useful to know if a timing bubble was caused by Intel PT or not. Add reporting of PSB events via perf script. PSB events are printed with the existing itrace 'p' option which also prints power and frequency changes. The PSB event contains the trace offset at which the PSB occurs, to allow easy reference back to the PSB+ packets. The PSB event timestamp is always the timestamp from the PSB+ TSC packet, and the ip is always the address from the PSB+ FUP packet. The code changes are non-trivial because the decoder must walk to the PSB+ FUP address before outputting the PSB event. Example: $ perf record -e intel_pt/cyc,psb_period=0/u uname Linux [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.046 MB perf.data ] $ perf script --itrace=p --ns perf 17981 [006] 25617.510820383: psb: psb offs: 0 0 [unknown] ([unknown]) perf 17981 [006] 25617.510820383: cbr: cbr: 42 freq: 4219 MHz (156%) 0 [unknown] ([unknown]) uname 17981 [006] 25617.510889753: psb: psb offs: 0xb50 7f78c12a212e __GI___tunables_init+0xee (/usr/lib/x86_64-linux-gnu/ld-2.31.so) uname 17981 [006] 25617.510899162: psb: psb offs: 0x12d0 7f78c128af1c dl_main+0x93c (/usr/lib/x86_64-linux-gnu/ld-2.31.so) uname 17981 [006] 25617.510939242: psb: psb offs: 0x1a50 7f78c128eefc _dl_map_object_from_fd+0x13c (/usr/lib/x86_64-linux-gnu/ld-2.31.so) uname 17981 [006] 25617.510981274: psb: psb offs: 0x21c8 7f78c1296307 _dl_relocate_object+0x927 (/usr/lib/x86_64-linux-gnu/ld-2.31.so) uname 17981 [006] 25617.510993034: psb: psb offs: 0x2948 7f78c12940e4 _dl_lookup_symbol_x+0x14 (/usr/lib/x86_64-linux-gnu/ld-2.31.so) uname 17981 [006] 25617.511003871: psb: psb offs: 0x30c8 7f78c12937b3 do_lookup_x+0x2f3 (/usr/lib/x86_64-linux-gnu/ld-2.31.so) uname 17981 [006] 25617.511019854: psb: psb offs: 0x3850 7f78c1295eed _dl_relocate_object+0x50d (/usr/lib/x86_64-linux-gnu/ld-2.31.so) uname 17981 [006] 25617.511029015: psb: psb offs: 0x4390 7f78c12a855a strcmp+0xf6a (/usr/lib/x86_64-linux-gnu/ld-2.31.so) uname 17981 [006] 25617.511064876: psb: psb offs: 0x4b10 0 [unknown] ([unknown]) uname 17981 [006] 25617.511080762: psb: psb offs: 0x5290 7f78c11db53d _dl_addr+0x13d (/usr/lib/x86_64-linux-gnu/libc-2.31.so) uname 17981 [006] 25617.511086035: psb: psb offs: 0x5a08 7f78c11db538 _dl_addr+0x138 (/usr/lib/x86_64-linux-gnu/libc-2.31.so) uname 17981 [006] 25617.511091381: psb: psb offs: 0x6190 7f78c11db534 _dl_addr+0x134 (/usr/lib/x86_64-linux-gnu/libc-2.31.so) uname 17981 [006] 25617.511096681: psb: psb offs: 0x6910 7f78c11db4c3 _dl_addr+0xc3 (/usr/lib/x86_64-linux-gnu/libc-2.31.so) uname 17981 [006] 25617.511119520: psb: psb offs: 0x7090 7f78c10ada5e _nl_intern_locale_data+0x12e (/usr/lib/x86_64-linux-gnu/libc-2.31.so) uname 17981 [006] 25617.511126584: psb: psb offs: 0x7818 7f78c10ada50 _nl_intern_locale_data+0x120 (/usr/lib/x86_64-linux-gnu/libc-2.31.so) uname 17981 [006] 25617.511132775: psb: psb offs: 0x8358 7f78c10c20c0 getenv+0xa0 (/usr/lib/x86_64-linux-gnu/libc-2.31.so) uname 17981 [006] 25617.511134598: psb: psb offs: 0x8ad0 7f78c10ada09 _nl_intern_locale_data+0xd9 (/usr/lib/x86_64-linux-gnu/libc-2.31.so) uname 17981 [006] 25617.511135685: psb: psb offs: 0x9258 7f78c10ada50 _nl_intern_locale_data+0x120 (/usr/lib/x86_64-linux-gnu/libc-2.31.so) uname 17981 [006] 25617.511138322: psb: psb offs: 0x99d0 7f78c11fffd9 __strncmp_avx2+0x39 (/usr/lib/x86_64-linux-gnu/libc-2.31.so) uname 17981 [006] 25617.511158907: psb: psb offs: 0xa150 0 [unknown] ([unknown]) Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Reviewed-by: Andi Kleen <ak@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: https://lore.kernel.org/r/20210205175350.23817-5-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2021-02-06 01:53:50 +08:00
if (state->type & INTEL_PT_PSB_EVT) {
err = intel_pt_synth_psb_sample(ptq);
if (err)
return err;
}
if (ptq->state->cbr != ptq->cbr_seen) {
err = intel_pt_synth_cbr_sample(ptq);
if (err)
return err;
}
if (state->type & INTEL_PT_PWR_EVT) {
if (state->type & INTEL_PT_MWAIT_OP) {
err = intel_pt_synth_mwait_sample(ptq);
if (err)
return err;
}
if (state->type & INTEL_PT_PWR_ENTRY) {
err = intel_pt_synth_pwre_sample(ptq);
if (err)
return err;
}
if (state->type & INTEL_PT_EX_STOP) {
err = intel_pt_synth_exstop_sample(ptq);
if (err)
return err;
}
if (state->type & INTEL_PT_PWR_EXIT) {
err = intel_pt_synth_pwrx_sample(ptq);
if (err)
return err;
}
}
}
perf intel-pt: Synthesize cycle events There is no good reason why we cannot synthesize "cycle" events from Intel PT just as we can synthesize "instruction" events, in particular when CYC packets are available. This enables using PT to getting much more accurate cycle profiles than regular sampling (record -e cycles) when the work last for very short periods (<10 ms). Thus, add support for this, based off of the existing IPC calculation framework. The new option to --itrace is "y" (for cYcles), as c was taken for calls. Cycle and instruction events can be synthesized together, and are by default. The only real caveat is that CYC packets are only emitted whenever some other packet is, which in practice is when a branch instruction is encountered (and not even all branches). Thus, even at no subsampling (e.g. --itrace=y0ns), it is impossible to get more accuracy than a single basic block, and all cycles spent executing that block will get attributed to the branch instruction that ends the packet. Thus, one cannot know whether the cycles came from e.g. a specific load, a mispredicted branch, or something else. When subsampling (which is the default), the cycle events will get smeared out even more, but will still be generally useful to attribute cycle counts to functions. Reviewed-by: Adrian Hunter <adrian.hunter@intel.com> Signed-off-by: Steinar H. Gunderson <sesse@google.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: https://lore.kernel.org/r/20220322082452.1429091-1-sesse@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-03-22 16:24:52 +08:00
if (state->type & INTEL_PT_INSTRUCTION) {
if (pt->sample_instructions) {
err = intel_pt_synth_instruction_sample(ptq);
if (err)
return err;
}
if (pt->sample_cycles) {
err = intel_pt_synth_cycle_sample(ptq);
if (err)
return err;
}
}
if (pt->sample_transactions && (state->type & INTEL_PT_TRANSACTION)) {
err = intel_pt_synth_transaction_sample(ptq);
if (err)
return err;
}
if (pt->sample_ptwrites && (state->type & INTEL_PT_PTW)) {
err = intel_pt_synth_ptwrite_sample(ptq);
if (err)
return err;
}
if (!(state->type & INTEL_PT_BRANCH))
return 0;
if (pt->use_thread_stack) {
thread_stack__event(ptq->thread, ptq->cpu, ptq->flags,
state->from_ip, state->to_ip, ptq->insn_len,
state->trace_nr, pt->callstack,
perf intel-pt: Add support for synthesizing branch stacks for regular events Use the new thread_stack__br_sample_late() function to create a thread stack for regular events. Example: # perf record --kcore --aux-sample -e '{intel_pt//,cycles:ppp}' -c 10000 uname Linux [ perf record: Woken up 2 times to write data ] [ perf record: Captured and wrote 0.743 MB perf.data ] # perf report --itrace=Le --stdio | head -30 | tail -18 # Samples: 11K of event 'cycles:ppp' # Event count (approx.): 11648 # # Overhead Command Source Shared Object Source Symbol Target Symbol Basic Block Cycles # ........ ....... .................... ............................ ............................ .................. # 5.49% uname libc-2.30.so [.] _dl_addr [.] _dl_addr - 2.41% uname ld-2.30.so [.] _dl_relocate_object [.] _dl_relocate_object - 2.31% uname ld-2.30.so [.] do_lookup_x [.] do_lookup_x - 2.17% uname [kernel.kallsyms] [k] unmap_page_range [k] unmap_page_range - 2.05% uname ld-2.30.so [k] _dl_start [k] _dl_start - 1.97% uname ld-2.30.so [.] _dl_lookup_symbol_x [.] _dl_lookup_symbol_x - 1.94% uname [kernel.kallsyms] [k] filemap_map_pages [k] filemap_map_pages - 1.60% uname [kernel.kallsyms] [k] __handle_mm_fault [k] __handle_mm_fault - 1.44% uname [kernel.kallsyms] [k] page_add_file_rmap [k] page_add_file_rmap - 1.12% uname [kernel.kallsyms] [k] vma_interval_tree_insert [k] vma_interval_tree_insert - 0.94% uname [kernel.kallsyms] [k] perf_iterate_ctx [k] perf_iterate_ctx - Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lore.kernel.org/lkml/20200429150751.12570-8-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-04-29 23:07:49 +08:00
pt->br_stack_sz_plus,
pt->mispred_all);
} else {
thread_stack__set_trace_nr(ptq->thread, ptq->cpu, state->trace_nr);
}
if (pt->sample_branches) {
if (state->from_nr != state->to_nr &&
state->from_ip && state->to_ip) {
struct intel_pt_state *st = (struct intel_pt_state *)state;
u64 to_ip = st->to_ip;
u64 from_ip = st->from_ip;
/*
* perf cannot handle having different machines for ip
* and addr, so create 2 branches.
*/
st->to_ip = 0;
err = intel_pt_synth_branch_sample(ptq);
if (err)
return err;
st->from_ip = 0;
st->to_ip = to_ip;
err = intel_pt_synth_branch_sample(ptq);
st->from_ip = from_ip;
} else {
err = intel_pt_synth_branch_sample(ptq);
}
if (err)
return err;
}
if (!ptq->sync_switch)
return 0;
if (intel_pt_is_switch_ip(ptq, state->to_ip)) {
switch (ptq->switch_state) {
case INTEL_PT_SS_NOT_TRACING:
case INTEL_PT_SS_UNKNOWN:
case INTEL_PT_SS_EXPECTING_SWITCH_IP:
err = intel_pt_next_tid(pt, ptq);
if (err)
return err;
ptq->switch_state = INTEL_PT_SS_TRACING;
break;
default:
ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_EVENT;
return 1;
}
} else if (!state->to_ip) {
ptq->switch_state = INTEL_PT_SS_NOT_TRACING;
} else if (ptq->switch_state == INTEL_PT_SS_NOT_TRACING) {
ptq->switch_state = INTEL_PT_SS_UNKNOWN;
} else if (ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
state->to_ip == pt->ptss_ip &&
(ptq->flags & PERF_IP_FLAG_CALL)) {
ptq->switch_state = INTEL_PT_SS_TRACING;
}
return 0;
}
static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip)
{
struct machine *machine = pt->machine;
struct map *map;
struct symbol *sym, *start;
u64 ip, switch_ip = 0;
const char *ptss;
if (ptss_ip)
*ptss_ip = 0;
map = machine__kernel_map(machine);
if (!map)
return 0;
if (map__load(map))
return 0;
perf map: Add accessor for dso Later changes will add reference count checking for struct map, with dso being the most frequently accessed variable. Add an accessor so that the reference count check is only necessary in one place. Additional changes: - add a dso variable to avoid repeated map__dso calls. - in builtin-mem.c dump_raw_samples, code only partially tested for dso == NULL. Make the possibility of NULL consistent. - in thread.c thread__memcpy fix use of spaces and use tabs. Committer notes: Did missing conversions on these files: tools/perf/arch/powerpc/util/skip-callchain-idx.c tools/perf/arch/powerpc/util/sym-handling.c tools/perf/ui/browsers/hists.c tools/perf/ui/gtk/annotate.c tools/perf/util/cs-etm.c tools/perf/util/thread.c tools/perf/util/unwind-libunwind-local.c tools/perf/util/unwind-libunwind.c Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Darren Hart <dvhart@infradead.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Dmitriy Vyukov <dvyukov@google.com> Cc: Eric Dumazet <edumazet@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Hao Luo <haoluo@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Miaoqian Lin <linmq006@gmail.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Riccardo Mancini <rickyman7@gmail.com> Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com> Cc: Song Liu <song@kernel.org> Cc: Stephane Eranian <eranian@google.com> Cc: Stephen Brennan <stephen.s.brennan@oracle.com> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Yury Norov <yury.norov@gmail.com> Link: https://lore.kernel.org/r/20230320212248.1175731-2-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-03-21 05:22:35 +08:00
start = dso__first_symbol(map__dso(map));
for (sym = start; sym; sym = dso__next_symbol(sym)) {
if (sym->binding == STB_GLOBAL &&
!strcmp(sym->name, "__switch_to")) {
perf map: Add helper for ->map_ip() and ->unmap_ip() Later changes will add reference count checking for struct map, add a helper function to invoke the map_ip and unmap_ip function pointers. The helper allows the reference count check to be in fewer places. Committer notes: Add missing conversions to: tools/perf/util/map.c tools/perf/util/cs-etm.c tools/perf/util/annotate.c tools/perf/arch/powerpc/util/sym-handling.c tools/perf/arch/s390/annotate/instructions.c Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Darren Hart <dvhart@infradead.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Dmitriy Vyukov <dvyukov@google.com> Cc: Eric Dumazet <edumazet@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Hao Luo <haoluo@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Miaoqian Lin <linmq006@gmail.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Riccardo Mancini <rickyman7@gmail.com> Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com> Cc: Song Liu <song@kernel.org> Cc: Stephane Eranian <eranian@google.com> Cc: Stephen Brennan <stephen.s.brennan@oracle.com> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Yury Norov <yury.norov@gmail.com> Link: https://lore.kernel.org/r/20230404205954.2245628-2-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-04-05 04:59:44 +08:00
ip = map__unmap_ip(map, sym->start);
perf map: Add accessor for start and end Later changes will add reference count checking for struct map, start and end are frequently accessed variables. Add an accessor so that the reference count check is only necessary in one place. Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Darren Hart <dvhart@infradead.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Dmitriy Vyukov <dvyukov@google.com> Cc: Eric Dumazet <edumazet@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Hao Luo <haoluo@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Miaoqian Lin <linmq006@gmail.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Riccardo Mancini <rickyman7@gmail.com> Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com> Cc: Song Liu <song@kernel.org> Cc: Stephane Eranian <eranian@google.com> Cc: Stephen Brennan <stephen.s.brennan@oracle.com> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Yury Norov <yury.norov@gmail.com> Link: https://lore.kernel.org/r/20230320212248.1175731-2-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-03-21 05:22:36 +08:00
if (ip >= map__start(map) && ip < map__end(map)) {
switch_ip = ip;
break;
}
}
}
if (!switch_ip || !ptss_ip)
return 0;
if (pt->have_sched_switch == 1)
ptss = "perf_trace_sched_switch";
else
ptss = "__perf_event_task_sched_out";
for (sym = start; sym; sym = dso__next_symbol(sym)) {
if (!strcmp(sym->name, ptss)) {
perf map: Add helper for ->map_ip() and ->unmap_ip() Later changes will add reference count checking for struct map, add a helper function to invoke the map_ip and unmap_ip function pointers. The helper allows the reference count check to be in fewer places. Committer notes: Add missing conversions to: tools/perf/util/map.c tools/perf/util/cs-etm.c tools/perf/util/annotate.c tools/perf/arch/powerpc/util/sym-handling.c tools/perf/arch/s390/annotate/instructions.c Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Darren Hart <dvhart@infradead.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Dmitriy Vyukov <dvyukov@google.com> Cc: Eric Dumazet <edumazet@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Hao Luo <haoluo@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Miaoqian Lin <linmq006@gmail.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Riccardo Mancini <rickyman7@gmail.com> Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com> Cc: Song Liu <song@kernel.org> Cc: Stephane Eranian <eranian@google.com> Cc: Stephen Brennan <stephen.s.brennan@oracle.com> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Yury Norov <yury.norov@gmail.com> Link: https://lore.kernel.org/r/20230404205954.2245628-2-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-04-05 04:59:44 +08:00
ip = map__unmap_ip(map, sym->start);
perf map: Add accessor for start and end Later changes will add reference count checking for struct map, start and end are frequently accessed variables. Add an accessor so that the reference count check is only necessary in one place. Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Darren Hart <dvhart@infradead.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Dmitriy Vyukov <dvyukov@google.com> Cc: Eric Dumazet <edumazet@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Hao Luo <haoluo@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Miaoqian Lin <linmq006@gmail.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Riccardo Mancini <rickyman7@gmail.com> Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com> Cc: Song Liu <song@kernel.org> Cc: Stephane Eranian <eranian@google.com> Cc: Stephen Brennan <stephen.s.brennan@oracle.com> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Yury Norov <yury.norov@gmail.com> Link: https://lore.kernel.org/r/20230320212248.1175731-2-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-03-21 05:22:36 +08:00
if (ip >= map__start(map) && ip < map__end(map)) {
*ptss_ip = ip;
break;
}
}
}
return switch_ip;
}
static void intel_pt_enable_sync_switch(struct intel_pt *pt)
{
unsigned int i;
if (pt->sync_switch_not_supported)
return;
pt->sync_switch = true;
for (i = 0; i < pt->queues.nr_queues; i++) {
struct auxtrace_queue *queue = &pt->queues.queue_array[i];
struct intel_pt_queue *ptq = queue->priv;
if (ptq)
ptq->sync_switch = true;
}
}
static void intel_pt_disable_sync_switch(struct intel_pt *pt)
{
unsigned int i;
pt->sync_switch = false;
for (i = 0; i < pt->queues.nr_queues; i++) {
struct auxtrace_queue *queue = &pt->queues.queue_array[i];
struct intel_pt_queue *ptq = queue->priv;
if (ptq) {
ptq->sync_switch = false;
intel_pt_next_tid(pt, ptq);
}
}
}
/*
* To filter against time ranges, it is only necessary to look at the next start
* or end time.
*/
static bool intel_pt_next_time(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
if (ptq->sel_start) {
/* Next time is an end time */
ptq->sel_start = false;
ptq->sel_timestamp = pt->time_ranges[ptq->sel_idx].end;
return true;
} else if (ptq->sel_idx + 1 < pt->range_cnt) {
/* Next time is a start time */
ptq->sel_start = true;
ptq->sel_idx += 1;
ptq->sel_timestamp = pt->time_ranges[ptq->sel_idx].start;
return true;
}
/* No next time */
return false;
}
static int intel_pt_time_filter(struct intel_pt_queue *ptq, u64 *ff_timestamp)
{
int err;
while (1) {
if (ptq->sel_start) {
if (ptq->timestamp >= ptq->sel_timestamp) {
/* After start time, so consider next time */
intel_pt_next_time(ptq);
if (!ptq->sel_timestamp) {
/* No end time */
return 0;
}
/* Check against end time */
continue;
}
/* Before start time, so fast forward */
ptq->have_sample = false;
if (ptq->sel_timestamp > *ff_timestamp) {
if (ptq->sync_switch) {
intel_pt_next_tid(ptq->pt, ptq);
ptq->switch_state = INTEL_PT_SS_UNKNOWN;
}
*ff_timestamp = ptq->sel_timestamp;
err = intel_pt_fast_forward(ptq->decoder,
ptq->sel_timestamp);
if (err)
return err;
}
return 0;
} else if (ptq->timestamp > ptq->sel_timestamp) {
/* After end time, so consider next time */
if (!intel_pt_next_time(ptq)) {
/* No next time range, so stop decoding */
ptq->have_sample = false;
ptq->switch_state = INTEL_PT_SS_NOT_TRACING;
return 1;
}
/* Check against next start time */
continue;
} else {
/* Before end time */
return 0;
}
}
}
static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
{
const struct intel_pt_state *state = ptq->state;
struct intel_pt *pt = ptq->pt;
u64 ff_timestamp = 0;
int err;
if (!pt->kernel_start) {
pt->kernel_start = machine__kernel_start(pt->machine);
if (pt->per_cpu_mmaps &&
(pt->have_sched_switch == 1 || pt->have_sched_switch == 3) &&
!pt->timeless_decoding && intel_pt_tracing_kernel(pt) &&
!pt->sampling_mode && !pt->synth_opts.vm_time_correlation) {
pt->switch_ip = intel_pt_switch_ip(pt, &pt->ptss_ip);
if (pt->switch_ip) {
intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n",
pt->switch_ip, pt->ptss_ip);
intel_pt_enable_sync_switch(pt);
}
}
}
intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
while (1) {
err = intel_pt_sample(ptq);
if (err)
return err;
state = intel_pt_decode(ptq->decoder);
if (state->err) {
if (state->err == INTEL_PT_ERR_NODATA)
return 1;
if (ptq->sync_switch &&
state->from_ip >= pt->kernel_start) {
ptq->sync_switch = false;
intel_pt_next_tid(pt, ptq);
}
ptq->timestamp = state->est_timestamp;
if (pt->synth_opts.errors) {
err = intel_ptq_synth_error(ptq, state);
if (err)
return err;
}
continue;
}
ptq->state = state;
ptq->have_sample = true;
intel_pt_sample_flags(ptq);
/* Use estimated TSC upon return to user space */
if (pt->est_tsc &&
(state->from_ip >= pt->kernel_start || !state->from_ip) &&
state->to_ip && state->to_ip < pt->kernel_start) {
intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
state->timestamp, state->est_timestamp);
ptq->timestamp = state->est_timestamp;
/* Use estimated TSC in unknown switch state */
} else if (ptq->sync_switch &&
ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
intel_pt_is_switch_ip(ptq, state->to_ip) &&
ptq->next_tid == -1) {
intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
state->timestamp, state->est_timestamp);
ptq->timestamp = state->est_timestamp;
} else if (state->timestamp > ptq->timestamp) {
ptq->timestamp = state->timestamp;
}
if (ptq->sel_timestamp) {
err = intel_pt_time_filter(ptq, &ff_timestamp);
if (err)
return err;
}
if (!pt->timeless_decoding && ptq->timestamp >= *timestamp) {
*timestamp = ptq->timestamp;
return 0;
}
}
return 0;
}
static inline int intel_pt_update_queues(struct intel_pt *pt)
{
if (pt->queues.new_data) {
pt->queues.new_data = false;
return intel_pt_setup_queues(pt);
}
return 0;
}
static int intel_pt_process_queues(struct intel_pt *pt, u64 timestamp)
{
unsigned int queue_nr;
u64 ts;
int ret;
while (1) {
struct auxtrace_queue *queue;
struct intel_pt_queue *ptq;
if (!pt->heap.heap_cnt)
return 0;
if (pt->heap.heap_array[0].ordinal >= timestamp)
return 0;
queue_nr = pt->heap.heap_array[0].queue_nr;
queue = &pt->queues.queue_array[queue_nr];
ptq = queue->priv;
intel_pt_log("queue %u processing 0x%" PRIx64 " to 0x%" PRIx64 "\n",
queue_nr, pt->heap.heap_array[0].ordinal,
timestamp);
auxtrace_heap__pop(&pt->heap);
if (pt->heap.heap_cnt) {
ts = pt->heap.heap_array[0].ordinal + 1;
if (ts > timestamp)
ts = timestamp;
} else {
ts = timestamp;
}
intel_pt_set_pid_tid_cpu(pt, queue);
ret = intel_pt_run_decoder(ptq, &ts);
if (ret < 0) {
auxtrace_heap__add(&pt->heap, queue_nr, ts);
return ret;
}
if (!ret) {
ret = auxtrace_heap__add(&pt->heap, queue_nr, ts);
if (ret < 0)
return ret;
} else {
ptq->on_heap = false;
}
}
return 0;
}
static int intel_pt_process_timeless_queues(struct intel_pt *pt, pid_t tid,
u64 time_)
{
struct auxtrace_queues *queues = &pt->queues;
unsigned int i;
u64 ts = 0;
for (i = 0; i < queues->nr_queues; i++) {
struct auxtrace_queue *queue = &pt->queues.queue_array[i];
struct intel_pt_queue *ptq = queue->priv;
if (ptq && (tid == -1 || ptq->tid == tid)) {
ptq->time = time_;
intel_pt_set_pid_tid_cpu(pt, queue);
intel_pt_run_decoder(ptq, &ts);
}
}
return 0;
}
static void intel_pt_sample_set_pid_tid_cpu(struct intel_pt_queue *ptq,
struct auxtrace_queue *queue,
struct perf_sample *sample)
{
struct machine *m = ptq->pt->machine;
ptq->pid = sample->pid;
ptq->tid = sample->tid;
ptq->cpu = queue->cpu;
intel_pt_log("queue %u cpu %d pid %d tid %d\n",
ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
thread__zput(ptq->thread);
if (ptq->tid == -1)
return;
if (ptq->pid == -1) {
ptq->thread = machine__find_thread(m, -1, ptq->tid);
if (ptq->thread)
perf thread: Add accessor functions for thread Using accessors will make it easier to add reference count checking in later patches. Committer notes: thread->nsinfo wasn't wrapped as it is used together with nsinfo__zput(), where does a trick to set the field with a refcount being dropped to NULL, and that doesn't work well with using thread__nsinfo(thread), that loses the &thread->nsinfo pointer. When refcount checking is added to 'struct thread', later in this series, nsinfo__zput(RC_CHK_ACCESS(thread)->nsinfo) will be used to check the thread pointer. Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ali Saidi <alisaidi@amazon.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Brian Robbins <brianrob@linux.microsoft.com> Cc: Changbin Du <changbin.du@huawei.com> Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: Fangrui Song <maskray@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ivan Babrou <ivan@cloudflare.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: K Prateek Nayak <kprateek.nayak@amd.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Mike Leach <mike.leach@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Steinar H. Gunderson <sesse@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Wenyu Liu <liuwenyu7@huawei.com> Cc: Will Deacon <will@kernel.org> Cc: Yang Jihong <yangjihong1@huawei.com> Cc: Ye Xingchen <ye.xingchen@zte.com.cn> Cc: Yuan Can <yuancan@huawei.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230608232823.4027869-4-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-09 07:28:00 +08:00
ptq->pid = thread__pid(ptq->thread);
return;
}
ptq->thread = machine__findnew_thread(m, ptq->pid, ptq->tid);
}
static int intel_pt_process_timeless_sample(struct intel_pt *pt,
struct perf_sample *sample)
{
struct auxtrace_queue *queue;
struct intel_pt_queue *ptq;
u64 ts = 0;
queue = auxtrace_queues__sample_queue(&pt->queues, sample, pt->session);
if (!queue)
return -EINVAL;
ptq = queue->priv;
if (!ptq)
return 0;
ptq->stop = false;
ptq->time = sample->time;
intel_pt_sample_set_pid_tid_cpu(ptq, queue, sample);
intel_pt_run_decoder(ptq, &ts);
return 0;
}
static int intel_pt_lost(struct intel_pt *pt, struct perf_sample *sample)
{
return intel_pt_synth_error(pt, INTEL_PT_ERR_LOST, sample->cpu,
sample->pid, sample->tid, 0, sample->time,
sample->machine_pid, sample->vcpu);
}
static struct intel_pt_queue *intel_pt_cpu_to_ptq(struct intel_pt *pt, int cpu)
{
unsigned i, j;
if (cpu < 0 || !pt->queues.nr_queues)
return NULL;
if ((unsigned)cpu >= pt->queues.nr_queues)
i = pt->queues.nr_queues - 1;
else
i = cpu;
if (pt->queues.queue_array[i].cpu == cpu)
return pt->queues.queue_array[i].priv;
for (j = 0; i > 0; j++) {
if (pt->queues.queue_array[--i].cpu == cpu)
return pt->queues.queue_array[i].priv;
}
for (; j < pt->queues.nr_queues; j++) {
if (pt->queues.queue_array[j].cpu == cpu)
return pt->queues.queue_array[j].priv;
}
return NULL;
}
static int intel_pt_sync_switch(struct intel_pt *pt, int cpu, pid_t tid,
u64 timestamp)
{
struct intel_pt_queue *ptq;
int err;
if (!pt->sync_switch)
return 1;
ptq = intel_pt_cpu_to_ptq(pt, cpu);
if (!ptq || !ptq->sync_switch)
return 1;
switch (ptq->switch_state) {
case INTEL_PT_SS_NOT_TRACING:
break;
case INTEL_PT_SS_UNKNOWN:
case INTEL_PT_SS_TRACING:
ptq->next_tid = tid;
ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_IP;
return 0;
case INTEL_PT_SS_EXPECTING_SWITCH_EVENT:
if (!ptq->on_heap) {
ptq->timestamp = perf_time_to_tsc(timestamp,
&pt->tc);
err = auxtrace_heap__add(&pt->heap, ptq->queue_nr,
ptq->timestamp);
if (err)
return err;
ptq->on_heap = true;
}
ptq->switch_state = INTEL_PT_SS_TRACING;
break;
case INTEL_PT_SS_EXPECTING_SWITCH_IP:
intel_pt_log("ERROR: cpu %d expecting switch ip\n", cpu);
break;
default:
break;
}
ptq->next_tid = -1;
return 1;
}
perf build: Use libtraceevent from the system Remove the LIBTRACEEVENT_DYNAMIC and LIBTRACEFS_DYNAMIC make command line variables. If libtraceevent isn't installed or NO_LIBTRACEEVENT=1 is passed to the build, don't compile in libtraceevent and libtracefs support. This also disables CONFIG_TRACE that controls "perf trace". CONFIG_LIBTRACEEVENT is used to control enablement in Build/Makefiles, HAVE_LIBTRACEEVENT is used in C code. Without HAVE_LIBTRACEEVENT tracepoints are disabled and as such the commands kmem, kwork, lock, sched and timechart are removed. The majority of commands continue to work including "perf test". Committer notes: Fixed up a tools/perf/util/Build reject and added: #include <traceevent/event-parse.h> to tools/perf/util/scripting-engines/trace-event-perl.c. Committer testing: $ rpm -qi libtraceevent-devel Name : libtraceevent-devel Version : 1.5.3 Release : 2.fc36 Architecture: x86_64 Install Date: Mon 25 Jul 2022 03:20:19 PM -03 Group : Unspecified Size : 27728 License : LGPLv2+ and GPLv2+ Signature : RSA/SHA256, Fri 15 Apr 2022 02:11:58 PM -03, Key ID 999f7cbf38ab71f4 Source RPM : libtraceevent-1.5.3-2.fc36.src.rpm Build Date : Fri 15 Apr 2022 10:57:01 AM -03 Build Host : buildvm-x86-05.iad2.fedoraproject.org Packager : Fedora Project Vendor : Fedora Project URL : https://git.kernel.org/pub/scm/libs/libtrace/libtraceevent.git/ Bug URL : https://bugz.fedoraproject.org/libtraceevent Summary : Development headers of libtraceevent Description : Development headers of libtraceevent-libs $ Default build: $ ldd ~/bin/perf | grep tracee libtraceevent.so.1 => /lib64/libtraceevent.so.1 (0x00007f1dcaf8f000) $ # perf trace -e sched:* --max-events 10 0.000 migration/0/17 sched:sched_migrate_task(comm: "", pid: 1603763 (perf), prio: 120, dest_cpu: 1) 0.005 migration/0/17 sched:sched_wake_idle_without_ipi(cpu: 1) 0.011 migration/0/17 sched:sched_switch(prev_comm: "", prev_pid: 17 (migration/0), prev_state: 1, next_comm: "", next_prio: 120) 1.173 :0/0 sched:sched_wakeup(comm: "", pid: 3138 (gnome-terminal-), prio: 120) 1.180 :0/0 sched:sched_switch(prev_comm: "", prev_prio: 120, next_comm: "", next_pid: 3138 (gnome-terminal-), next_prio: 120) 0.156 migration/1/21 sched:sched_migrate_task(comm: "", pid: 1603763 (perf), prio: 120, orig_cpu: 1, dest_cpu: 2) 0.160 migration/1/21 sched:sched_wake_idle_without_ipi(cpu: 2) 0.166 migration/1/21 sched:sched_switch(prev_comm: "", prev_pid: 21 (migration/1), prev_state: 1, next_comm: "", next_prio: 120) 1.183 :0/0 sched:sched_wakeup(comm: "", pid: 1602985 (kworker/u16:0-f), prio: 120, target_cpu: 1) 1.186 :0/0 sched:sched_switch(prev_comm: "", prev_prio: 120, next_comm: "", next_pid: 1602985 (kworker/u16:0-f), next_prio: 120) # Had to tweak tools/perf/util/setup.py to make sure the python binding shared object links with libtraceevent if -DHAVE_LIBTRACEEVENT is present in CFLAGS. Building with NO_LIBTRACEEVENT=1 uncovered some more build failures: - Make building of data-convert-bt.c to CONFIG_LIBTRACEEVENT=y - perf-$(CONFIG_LIBTRACEEVENT) += scripts/ - bpf_kwork.o needs also to be dependent on CONFIG_LIBTRACEEVENT=y - The python binding needed some fixups and util/trace-event.c can't be built and linked with the python binding shared object, so remove it in tools/perf/util/setup.py and exclude it from the list of dependencies in the python/perf.so Makefile.perf target. Building without libtraceevent-devel installed uncovered more build failures: - The python binding tools/perf/util/python.c was assuming that traceevent/parse-events.h was always available, which was the case when we defaulted to using the in-kernel tools/lib/traceevent/ files, now we need to enclose it under ifdef HAVE_LIBTRACEEVENT, just like the other parts of it that deal with tracepoints. - We have to ifdef the rules in the Build files with CONFIG_LIBTRACEEVENT=y to build builtin-trace.c and tools/perf/trace/beauty/ as we only ifdef setting CONFIG_TRACE=y when setting NO_LIBTRACEEVENT=1 in the make command line, not when we don't detect libtraceevent-devel installed in the system. Simplification here to avoid these two ways of disabling builtin-trace.c and not having CONFIG_TRACE=y when libtraceevent-devel isn't installed is the clean way. From Athira: <quote> tools/perf/arch/powerpc/util/Build -perf-y += kvm-stat.o +perf-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o </quote> Then, ditto for arm64 and s390, detected by container cross build tests. - s/390 uses test__checkevent_tracepoint() that is now only available if HAVE_LIBTRACEEVENT is defined, enclose the callsite with ifder HAVE_LIBTRACEEVENT. Also from Athira: <quote> With this change, I could successfully compile in these environment: - Without libtraceevent-devel installed - With libtraceevent-devel installed - With “make NO_LIBTRACEEVENT=1” </quote> Then, finally rename CONFIG_TRACEEVENT to CONFIG_LIBTRACEEVENT for consistency with other libraries detected in tools/perf/. Signed-off-by: Ian Rogers <irogers@google.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Tested-by: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Nick Desaulniers <ndesaulniers@google.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: bpf@vger.kernel.org Link: http://lore.kernel.org/lkml/20221205225940.3079667-3-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-12-06 06:59:39 +08:00
#ifdef HAVE_LIBTRACEEVENT
static int intel_pt_process_switch(struct intel_pt *pt,
struct perf_sample *sample)
{
pid_t tid;
int cpu, ret;
struct evsel *evsel = evlist__id2evsel(pt->session->evlist, sample->id);
if (evsel != pt->switch_evsel)
return 0;
tid = evsel__intval(evsel, sample, "next_pid");
cpu = sample->cpu;
intel_pt_log("sched_switch: cpu %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
cpu, tid, sample->time, perf_time_to_tsc(sample->time,
&pt->tc));
ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
if (ret <= 0)
return ret;
return machine__set_current_tid(pt->machine, cpu, -1, tid);
}
perf build: Use libtraceevent from the system Remove the LIBTRACEEVENT_DYNAMIC and LIBTRACEFS_DYNAMIC make command line variables. If libtraceevent isn't installed or NO_LIBTRACEEVENT=1 is passed to the build, don't compile in libtraceevent and libtracefs support. This also disables CONFIG_TRACE that controls "perf trace". CONFIG_LIBTRACEEVENT is used to control enablement in Build/Makefiles, HAVE_LIBTRACEEVENT is used in C code. Without HAVE_LIBTRACEEVENT tracepoints are disabled and as such the commands kmem, kwork, lock, sched and timechart are removed. The majority of commands continue to work including "perf test". Committer notes: Fixed up a tools/perf/util/Build reject and added: #include <traceevent/event-parse.h> to tools/perf/util/scripting-engines/trace-event-perl.c. Committer testing: $ rpm -qi libtraceevent-devel Name : libtraceevent-devel Version : 1.5.3 Release : 2.fc36 Architecture: x86_64 Install Date: Mon 25 Jul 2022 03:20:19 PM -03 Group : Unspecified Size : 27728 License : LGPLv2+ and GPLv2+ Signature : RSA/SHA256, Fri 15 Apr 2022 02:11:58 PM -03, Key ID 999f7cbf38ab71f4 Source RPM : libtraceevent-1.5.3-2.fc36.src.rpm Build Date : Fri 15 Apr 2022 10:57:01 AM -03 Build Host : buildvm-x86-05.iad2.fedoraproject.org Packager : Fedora Project Vendor : Fedora Project URL : https://git.kernel.org/pub/scm/libs/libtrace/libtraceevent.git/ Bug URL : https://bugz.fedoraproject.org/libtraceevent Summary : Development headers of libtraceevent Description : Development headers of libtraceevent-libs $ Default build: $ ldd ~/bin/perf | grep tracee libtraceevent.so.1 => /lib64/libtraceevent.so.1 (0x00007f1dcaf8f000) $ # perf trace -e sched:* --max-events 10 0.000 migration/0/17 sched:sched_migrate_task(comm: "", pid: 1603763 (perf), prio: 120, dest_cpu: 1) 0.005 migration/0/17 sched:sched_wake_idle_without_ipi(cpu: 1) 0.011 migration/0/17 sched:sched_switch(prev_comm: "", prev_pid: 17 (migration/0), prev_state: 1, next_comm: "", next_prio: 120) 1.173 :0/0 sched:sched_wakeup(comm: "", pid: 3138 (gnome-terminal-), prio: 120) 1.180 :0/0 sched:sched_switch(prev_comm: "", prev_prio: 120, next_comm: "", next_pid: 3138 (gnome-terminal-), next_prio: 120) 0.156 migration/1/21 sched:sched_migrate_task(comm: "", pid: 1603763 (perf), prio: 120, orig_cpu: 1, dest_cpu: 2) 0.160 migration/1/21 sched:sched_wake_idle_without_ipi(cpu: 2) 0.166 migration/1/21 sched:sched_switch(prev_comm: "", prev_pid: 21 (migration/1), prev_state: 1, next_comm: "", next_prio: 120) 1.183 :0/0 sched:sched_wakeup(comm: "", pid: 1602985 (kworker/u16:0-f), prio: 120, target_cpu: 1) 1.186 :0/0 sched:sched_switch(prev_comm: "", prev_prio: 120, next_comm: "", next_pid: 1602985 (kworker/u16:0-f), next_prio: 120) # Had to tweak tools/perf/util/setup.py to make sure the python binding shared object links with libtraceevent if -DHAVE_LIBTRACEEVENT is present in CFLAGS. Building with NO_LIBTRACEEVENT=1 uncovered some more build failures: - Make building of data-convert-bt.c to CONFIG_LIBTRACEEVENT=y - perf-$(CONFIG_LIBTRACEEVENT) += scripts/ - bpf_kwork.o needs also to be dependent on CONFIG_LIBTRACEEVENT=y - The python binding needed some fixups and util/trace-event.c can't be built and linked with the python binding shared object, so remove it in tools/perf/util/setup.py and exclude it from the list of dependencies in the python/perf.so Makefile.perf target. Building without libtraceevent-devel installed uncovered more build failures: - The python binding tools/perf/util/python.c was assuming that traceevent/parse-events.h was always available, which was the case when we defaulted to using the in-kernel tools/lib/traceevent/ files, now we need to enclose it under ifdef HAVE_LIBTRACEEVENT, just like the other parts of it that deal with tracepoints. - We have to ifdef the rules in the Build files with CONFIG_LIBTRACEEVENT=y to build builtin-trace.c and tools/perf/trace/beauty/ as we only ifdef setting CONFIG_TRACE=y when setting NO_LIBTRACEEVENT=1 in the make command line, not when we don't detect libtraceevent-devel installed in the system. Simplification here to avoid these two ways of disabling builtin-trace.c and not having CONFIG_TRACE=y when libtraceevent-devel isn't installed is the clean way. From Athira: <quote> tools/perf/arch/powerpc/util/Build -perf-y += kvm-stat.o +perf-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o </quote> Then, ditto for arm64 and s390, detected by container cross build tests. - s/390 uses test__checkevent_tracepoint() that is now only available if HAVE_LIBTRACEEVENT is defined, enclose the callsite with ifder HAVE_LIBTRACEEVENT. Also from Athira: <quote> With this change, I could successfully compile in these environment: - Without libtraceevent-devel installed - With libtraceevent-devel installed - With “make NO_LIBTRACEEVENT=1” </quote> Then, finally rename CONFIG_TRACEEVENT to CONFIG_LIBTRACEEVENT for consistency with other libraries detected in tools/perf/. Signed-off-by: Ian Rogers <irogers@google.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Tested-by: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Nick Desaulniers <ndesaulniers@google.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: bpf@vger.kernel.org Link: http://lore.kernel.org/lkml/20221205225940.3079667-3-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-12-06 06:59:39 +08:00
#endif /* HAVE_LIBTRACEEVENT */
static int intel_pt_context_switch_in(struct intel_pt *pt,
struct perf_sample *sample)
{
pid_t pid = sample->pid;
pid_t tid = sample->tid;
int cpu = sample->cpu;
if (pt->sync_switch) {
struct intel_pt_queue *ptq;
ptq = intel_pt_cpu_to_ptq(pt, cpu);
if (ptq && ptq->sync_switch) {
ptq->next_tid = -1;
switch (ptq->switch_state) {
case INTEL_PT_SS_NOT_TRACING:
case INTEL_PT_SS_UNKNOWN:
case INTEL_PT_SS_TRACING:
break;
case INTEL_PT_SS_EXPECTING_SWITCH_EVENT:
case INTEL_PT_SS_EXPECTING_SWITCH_IP:
ptq->switch_state = INTEL_PT_SS_TRACING;
break;
default:
break;
}
}
}
/*
* If the current tid has not been updated yet, ensure it is now that
* a "switch in" event has occurred.
*/
if (machine__get_current_tid(pt->machine, cpu) == tid)
return 0;
return machine__set_current_tid(pt->machine, cpu, pid, tid);
}
static int intel_pt_guest_context_switch(struct intel_pt *pt,
union perf_event *event,
struct perf_sample *sample)
{
bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
struct machines *machines = &pt->session->machines;
struct machine *machine = machines__find(machines, sample->machine_pid);
pt->have_guest_sideband = true;
/*
* sync_switch cannot handle guest machines at present, so just disable
* it.
*/
pt->sync_switch_not_supported = true;
if (pt->sync_switch)
intel_pt_disable_sync_switch(pt);
if (out)
return 0;
if (!machine)
return -EINVAL;
return machine__set_current_tid(machine, sample->vcpu, sample->pid, sample->tid);
}
static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event,
struct perf_sample *sample)
{
bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
pid_t pid, tid;
int cpu, ret;
if (perf_event__is_guest(event))
return intel_pt_guest_context_switch(pt, event, sample);
cpu = sample->cpu;
if (pt->have_sched_switch == 3) {
if (!out)
return intel_pt_context_switch_in(pt, sample);
if (event->header.type != PERF_RECORD_SWITCH_CPU_WIDE) {
pr_err("Expecting CPU-wide context switch event\n");
return -EINVAL;
}
pid = event->context_switch.next_prev_pid;
tid = event->context_switch.next_prev_tid;
} else {
if (out)
return 0;
pid = sample->pid;
tid = sample->tid;
}
perf intel-pt: Fix "context_switch event has no tid" error A context_switch event can have no tid because pids can be detached from a task while the task is still running (in do_exit()). Note this won't happen with per-task contexts because then tracing stops at perf_event_exit_task() If a task with no tid gets preempted, or a dying task gets preempted and its parent releases it, when it subsequently gets switched back in, Intel PT will not be able to determine what task is running and prints an error "context_switch event has no tid". However, it is not really an error because the task is in kernel space and the decoder can continue to decode successfully. Fix by changing the error to be only a logged message, and make allowance for tid == -1. Example: Using 5.9-rc4 with Preemptible Kernel (Low-Latency Desktop) e.g. $ uname -r 5.9.0-rc4 $ grep PREEMPT .config # CONFIG_PREEMPT_NONE is not set # CONFIG_PREEMPT_VOLUNTARY is not set CONFIG_PREEMPT=y CONFIG_PREEMPT_COUNT=y CONFIG_PREEMPTION=y CONFIG_PREEMPT_RCU=y CONFIG_PREEMPT_NOTIFIERS=y CONFIG_DRM_I915_PREEMPT_TIMEOUT=640 CONFIG_DEBUG_PREEMPT=y # CONFIG_PREEMPT_TRACER is not set # CONFIG_PREEMPTIRQ_DELAY_TEST is not set Before: $ cat forkit.c #include <sys/types.h> #include <unistd.h> #include <sys/wait.h> int main() { pid_t child; int status = 0; child = fork(); if (child == 0) return 123; wait(&status); return 0; } $ gcc -o forkit forkit.c $ sudo ~/bin/perf record --kcore -a -m,64M -e intel_pt/cyc/k & [1] 11016 $ taskset 2 ./forkit $ sudo pkill perf $ [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 17.262 MB perf.data ] [1]+ Terminated sudo ~/bin/perf record --kcore -a -m,64M -e intel_pt/cyc/k $ sudo ~/bin/perf script --show-task-events --show-switch-events --itrace=iqqe-o -C 1 --ns | grep -C 2 forkit context_switch event has no tid taskset 11019 [001] 66663.270045029: 1 instructions:k: ffffffffb1d9f844 strnlen_user+0xb4 ([kernel.kallsyms]) taskset 11019 [001] 66663.270201816: 1 instructions:k: ffffffffb1a83121 unmap_page_range+0x561 ([kernel.kallsyms]) forkit 11019 [001] 66663.270327553: PERF_RECORD_COMM exec: forkit:11019/11019 forkit 11019 [001] 66663.270420028: 1 instructions:k: ffffffffb1db9537 __clear_user+0x27 ([kernel.kallsyms]) forkit 11019 [001] 66663.270648704: 1 instructions:k: ffffffffb18829e6 do_user_addr_fault+0xf6 ([kernel.kallsyms]) forkit 11019 [001] 66663.270833163: 1 instructions:k: ffffffffb230a825 irqentry_exit_to_user_mode+0x15 ([kernel.kallsyms]) forkit 11019 [001] 66663.271092359: 1 instructions:k: ffffffffb1aea3d9 lock_page_memcg+0x9 ([kernel.kallsyms]) forkit 11019 [001] 66663.271207092: PERF_RECORD_FORK(11020:11020):(11019:11019) forkit 11019 [001] 66663.271234775: PERF_RECORD_SWITCH_CPU_WIDE OUT next pid/tid: 11020/11020 forkit 11020 [001] 66663.271238407: PERF_RECORD_SWITCH_CPU_WIDE IN prev pid/tid: 11019/11019 forkit 11020 [001] 66663.271312066: 1 instructions:k: ffffffffb1a88140 handle_mm_fault+0x10 ([kernel.kallsyms]) forkit 11020 [001] 66663.271476225: PERF_RECORD_EXIT(11020:11020):(11019:11019) forkit 11020 [001] 66663.271497488: PERF_RECORD_SWITCH_CPU_WIDE OUT preempt next pid/tid: 11019/11019 forkit 11019 [001] 66663.271500523: PERF_RECORD_SWITCH_CPU_WIDE IN prev pid/tid: 11020/11020 forkit 11019 [001] 66663.271517241: 1 instructions:k: ffffffffb24012cd error_entry+0x6d ([kernel.kallsyms]) forkit 11019 [001] 66663.271664080: PERF_RECORD_EXIT(11019:11019):(1386:1386) After: $ sudo ~/bin/perf script --show-task-events --show-switch-events --itrace=iqqe-o -C 1 --ns | grep -C 2 forkit taskset 11019 [001] 66663.270045029: 1 instructions:k: ffffffffb1d9f844 strnlen_user+0xb4 ([kernel.kallsyms]) taskset 11019 [001] 66663.270201816: 1 instructions:k: ffffffffb1a83121 unmap_page_range+0x561 ([kernel.kallsyms]) forkit 11019 [001] 66663.270327553: PERF_RECORD_COMM exec: forkit:11019/11019 forkit 11019 [001] 66663.270420028: 1 instructions:k: ffffffffb1db9537 __clear_user+0x27 ([kernel.kallsyms]) forkit 11019 [001] 66663.270648704: 1 instructions:k: ffffffffb18829e6 do_user_addr_fault+0xf6 ([kernel.kallsyms]) forkit 11019 [001] 66663.270833163: 1 instructions:k: ffffffffb230a825 irqentry_exit_to_user_mode+0x15 ([kernel.kallsyms]) forkit 11019 [001] 66663.271092359: 1 instructions:k: ffffffffb1aea3d9 lock_page_memcg+0x9 ([kernel.kallsyms]) forkit 11019 [001] 66663.271207092: PERF_RECORD_FORK(11020:11020):(11019:11019) forkit 11019 [001] 66663.271234775: PERF_RECORD_SWITCH_CPU_WIDE OUT next pid/tid: 11020/11020 forkit 11020 [001] 66663.271238407: PERF_RECORD_SWITCH_CPU_WIDE IN prev pid/tid: 11019/11019 forkit 11020 [001] 66663.271312066: 1 instructions:k: ffffffffb1a88140 handle_mm_fault+0x10 ([kernel.kallsyms]) forkit 11020 [001] 66663.271476225: PERF_RECORD_EXIT(11020:11020):(11019:11019) forkit 11020 [001] 66663.271497488: PERF_RECORD_SWITCH_CPU_WIDE OUT preempt next pid/tid: 11019/11019 forkit 11019 [001] 66663.271500523: PERF_RECORD_SWITCH_CPU_WIDE IN prev pid/tid: 11020/11020 forkit 11019 [001] 66663.271517241: 1 instructions:k: ffffffffb24012cd error_entry+0x6d ([kernel.kallsyms]) forkit 11019 [001] 66663.271664080: PERF_RECORD_EXIT(11019:11019):(1386:1386) forkit 11019 [001] 66663.271688752: PERF_RECORD_SWITCH_CPU_WIDE OUT next pid/tid: -1/-1 :-1 -1 [001] 66663.271692086: PERF_RECORD_SWITCH_CPU_WIDE IN prev pid/tid: 11019/11019 :-1 -1 [001] 66663.271707466: 1 instructions:k: ffffffffb18eb096 update_load_avg+0x306 ([kernel.kallsyms]) Fixes: 86c2786994bd7c ("perf intel-pt: Add support for PERF_RECORD_SWITCH") Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Yu-cheng Yu <yu-cheng.yu@intel.com> Link: http://lore.kernel.org/lkml/20200909084923.9096-3-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-09-09 16:49:23 +08:00
if (tid == -1)
intel_pt_log("context_switch event has no tid\n");
ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
if (ret <= 0)
return ret;
return machine__set_current_tid(pt->machine, cpu, pid, tid);
}
static int intel_pt_process_itrace_start(struct intel_pt *pt,
union perf_event *event,
struct perf_sample *sample)
{
if (!pt->per_cpu_mmaps)
return 0;
intel_pt_log("itrace_start: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
sample->cpu, event->itrace_start.pid,
event->itrace_start.tid, sample->time,
perf_time_to_tsc(sample->time, &pt->tc));
return machine__set_current_tid(pt->machine, sample->cpu,
event->itrace_start.pid,
event->itrace_start.tid);
}
static int intel_pt_process_aux_output_hw_id(struct intel_pt *pt,
union perf_event *event,
struct perf_sample *sample)
{
u64 hw_id = event->aux_output_hw_id.hw_id;
struct auxtrace_queue *queue;
struct intel_pt_queue *ptq;
struct evsel *evsel;
queue = auxtrace_queues__sample_queue(&pt->queues, sample, pt->session);
evsel = evlist__id2evsel_strict(pt->session->evlist, sample->id);
if (!queue || !queue->priv || !evsel || hw_id > INTEL_PT_MAX_PEBS) {
pr_err("Bad AUX output hardware ID\n");
return -EINVAL;
}
ptq = queue->priv;
ptq->pebs[hw_id].evsel = evsel;
ptq->pebs[hw_id].id = sample->id;
return 0;
}
perf intel-pt: Add support for text poke events Select text poke events when available and the kernel is being traced. Process text poke events to invalidate entries in Intel PT's instruction cache. Example: The example requires kernel config: CONFIG_PROC_SYSCTL=y CONFIG_SCHED_DEBUG=y CONFIG_SCHEDSTATS=y Before: # perf record -o perf.data.before --kcore -a -e intel_pt//k -m,64M & # cat /proc/sys/kernel/sched_schedstats 0 # echo 1 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 1 # echo 0 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 0 # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 3.341 MB perf.data.before ] [1]+ Terminated perf record -o perf.data.before --kcore -a -e intel_pt//k -m,64M # perf script -i perf.data.before --itrace=e >/dev/null Warning: 474 instruction trace errors After: # perf record -o perf.data.after --kcore -a -e intel_pt//k -m,64M & # cat /proc/sys/kernel/sched_schedstats 0 # echo 1 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 1 # echo 0 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 0 # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 2.646 MB perf.data.after ] [1]+ Terminated perf record -o perf.data.after --kcore -a -e intel_pt//k -m,64M # perf script -i perf.data.after --itrace=e >/dev/null Example: The example requires kernel config: # CONFIG_FUNCTION_TRACER is not set Before: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __schedule Added new event: probe:__schedule (on __schedule) You can now use it in all perf tools, such as: perf record -e probe:__schedule -aR sleep 1 # perf record -e probe:__schedule -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.026 MB perf.data (68 samples) ] # perf probe -d probe:__schedule Removed event: probe:__schedule # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 41.268 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null Warning: 207 instruction trace errors After: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __schedule Added new event: probe:__schedule (on __schedule) You can now use it in all perf tools, such as: perf record -e probe:__schedule -aR sleep 1 # perf record -e probe:__schedule -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.028 MB perf.data (107 samples) ] # perf probe -d probe:__schedule Removed event: probe:__schedule # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 39.978 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null # perf script -i t1 --no-itrace -D | grep 'POKE\|KSYMBOL' 6 565303693547 0x291f18 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc027a000 len 4096 type 2 flags 0x0 name kprobe_insn_page 6 565303697010 0x291f68 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027a000 old len 0 new len 6 6 565303838278 0x291fa8 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc027c000 len 4096 type 2 flags 0x0 name kprobe_optinsn_page 6 565303848286 0x291ff8 [0xa0]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027c000 old len 0 new len 106 6 565369336743 0x292af8 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffff88ab8890 old len 5 new len 5 7 566434327704 0x217c208 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffff88ab8890 old len 5 new len 5 6 566456313475 0x293198 [0xa0]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027c000 old len 106 new len 0 6 566456314935 0x293238 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027a000 old len 6 new len 0 Example: The example requires kernel config: CONFIG_FUNCTION_TRACER=y Before: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __kmalloc Added new event: probe:__kmalloc (on __kmalloc) You can now use it in all perf tools, such as: perf record -e probe:__kmalloc -aR sleep 1 # perf record -e probe:__kmalloc -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.022 MB perf.data (6 samples) ] # perf probe -d probe:__kmalloc Removed event: probe:__kmalloc # kill %1 [ perf record: Woken up 2 times to write data ] [ perf record: Captured and wrote 43.850 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null Warning: 8 instruction trace errors After: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __kmalloc Added new event: probe:__kmalloc (on __kmalloc) You can now use it in all perf tools, such as: perf record -e probe:__kmalloc -aR sleep 1 # perf record -e probe:__kmalloc -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.037 MB perf.data (206 samples) ] # perf probe -d probe:__kmalloc Removed event: probe:__kmalloc # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 41.442 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null # perf script -i t1 --no-itrace -D | grep 'POKE\|KSYMBOL' 5 312216133258 0x8bafe0 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc0360000 len 415 type 2 flags 0x0 name ftrace_trampoline 5 312216133494 0x8bb030 [0x1d8]: PERF_RECORD_TEXT_POKE addr 0xffffffffc0360000 old len 0 new len 415 5 312216229563 0x8bb208 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 5 312216239063 0x8bb248 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 5 312216727230 0x8bb288 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffabbea190 old len 5 new len 5 5 312216739322 0x8bb2c8 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 5 312216748321 0x8bb308 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 7 313287163462 0x2817430 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 7 313287174890 0x2817470 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 7 313287818979 0x28174b0 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffabbea190 old len 5 new len 5 7 313287829357 0x28174f0 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 7 313287841246 0x2817530 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Borislav Petkov <bp@alien8.de> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Mathieu Poirier <mathieu.poirier@linaro.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: x86@kernel.org Link: http://lore.kernel.org/lkml/20200512121922.8997-14-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-05-12 20:19:20 +08:00
static int intel_pt_find_map(struct thread *thread, u8 cpumode, u64 addr,
struct addr_location *al)
{
perf map: Add accessor for start and end Later changes will add reference count checking for struct map, start and end are frequently accessed variables. Add an accessor so that the reference count check is only necessary in one place. Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Darren Hart <dvhart@infradead.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Dmitriy Vyukov <dvyukov@google.com> Cc: Eric Dumazet <edumazet@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Hao Luo <haoluo@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Miaoqian Lin <linmq006@gmail.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Riccardo Mancini <rickyman7@gmail.com> Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com> Cc: Song Liu <song@kernel.org> Cc: Stephane Eranian <eranian@google.com> Cc: Stephen Brennan <stephen.s.brennan@oracle.com> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Yury Norov <yury.norov@gmail.com> Link: https://lore.kernel.org/r/20230320212248.1175731-2-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-03-21 05:22:36 +08:00
if (!al->map || addr < map__start(al->map) || addr >= map__end(al->map)) {
perf intel-pt: Add support for text poke events Select text poke events when available and the kernel is being traced. Process text poke events to invalidate entries in Intel PT's instruction cache. Example: The example requires kernel config: CONFIG_PROC_SYSCTL=y CONFIG_SCHED_DEBUG=y CONFIG_SCHEDSTATS=y Before: # perf record -o perf.data.before --kcore -a -e intel_pt//k -m,64M & # cat /proc/sys/kernel/sched_schedstats 0 # echo 1 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 1 # echo 0 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 0 # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 3.341 MB perf.data.before ] [1]+ Terminated perf record -o perf.data.before --kcore -a -e intel_pt//k -m,64M # perf script -i perf.data.before --itrace=e >/dev/null Warning: 474 instruction trace errors After: # perf record -o perf.data.after --kcore -a -e intel_pt//k -m,64M & # cat /proc/sys/kernel/sched_schedstats 0 # echo 1 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 1 # echo 0 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 0 # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 2.646 MB perf.data.after ] [1]+ Terminated perf record -o perf.data.after --kcore -a -e intel_pt//k -m,64M # perf script -i perf.data.after --itrace=e >/dev/null Example: The example requires kernel config: # CONFIG_FUNCTION_TRACER is not set Before: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __schedule Added new event: probe:__schedule (on __schedule) You can now use it in all perf tools, such as: perf record -e probe:__schedule -aR sleep 1 # perf record -e probe:__schedule -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.026 MB perf.data (68 samples) ] # perf probe -d probe:__schedule Removed event: probe:__schedule # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 41.268 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null Warning: 207 instruction trace errors After: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __schedule Added new event: probe:__schedule (on __schedule) You can now use it in all perf tools, such as: perf record -e probe:__schedule -aR sleep 1 # perf record -e probe:__schedule -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.028 MB perf.data (107 samples) ] # perf probe -d probe:__schedule Removed event: probe:__schedule # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 39.978 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null # perf script -i t1 --no-itrace -D | grep 'POKE\|KSYMBOL' 6 565303693547 0x291f18 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc027a000 len 4096 type 2 flags 0x0 name kprobe_insn_page 6 565303697010 0x291f68 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027a000 old len 0 new len 6 6 565303838278 0x291fa8 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc027c000 len 4096 type 2 flags 0x0 name kprobe_optinsn_page 6 565303848286 0x291ff8 [0xa0]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027c000 old len 0 new len 106 6 565369336743 0x292af8 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffff88ab8890 old len 5 new len 5 7 566434327704 0x217c208 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffff88ab8890 old len 5 new len 5 6 566456313475 0x293198 [0xa0]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027c000 old len 106 new len 0 6 566456314935 0x293238 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027a000 old len 6 new len 0 Example: The example requires kernel config: CONFIG_FUNCTION_TRACER=y Before: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __kmalloc Added new event: probe:__kmalloc (on __kmalloc) You can now use it in all perf tools, such as: perf record -e probe:__kmalloc -aR sleep 1 # perf record -e probe:__kmalloc -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.022 MB perf.data (6 samples) ] # perf probe -d probe:__kmalloc Removed event: probe:__kmalloc # kill %1 [ perf record: Woken up 2 times to write data ] [ perf record: Captured and wrote 43.850 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null Warning: 8 instruction trace errors After: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __kmalloc Added new event: probe:__kmalloc (on __kmalloc) You can now use it in all perf tools, such as: perf record -e probe:__kmalloc -aR sleep 1 # perf record -e probe:__kmalloc -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.037 MB perf.data (206 samples) ] # perf probe -d probe:__kmalloc Removed event: probe:__kmalloc # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 41.442 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null # perf script -i t1 --no-itrace -D | grep 'POKE\|KSYMBOL' 5 312216133258 0x8bafe0 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc0360000 len 415 type 2 flags 0x0 name ftrace_trampoline 5 312216133494 0x8bb030 [0x1d8]: PERF_RECORD_TEXT_POKE addr 0xffffffffc0360000 old len 0 new len 415 5 312216229563 0x8bb208 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 5 312216239063 0x8bb248 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 5 312216727230 0x8bb288 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffabbea190 old len 5 new len 5 5 312216739322 0x8bb2c8 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 5 312216748321 0x8bb308 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 7 313287163462 0x2817430 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 7 313287174890 0x2817470 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 7 313287818979 0x28174b0 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffabbea190 old len 5 new len 5 7 313287829357 0x28174f0 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 7 313287841246 0x2817530 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Borislav Petkov <bp@alien8.de> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Mathieu Poirier <mathieu.poirier@linaro.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: x86@kernel.org Link: http://lore.kernel.org/lkml/20200512121922.8997-14-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-05-12 20:19:20 +08:00
if (!thread__find_map(thread, cpumode, addr, al))
return -1;
}
return 0;
}
/* Invalidate all instruction cache entries that overlap the text poke */
static int intel_pt_text_poke(struct intel_pt *pt, union perf_event *event)
{
u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
u64 addr = event->text_poke.addr + event->text_poke.new_len - 1;
/* Assume text poke begins in a basic block no more than 4096 bytes */
int cnt = 4096 + event->text_poke.new_len;
struct thread *thread = pt->unknown_thread;
perf addr_location: Add init/exit/copy functions struct addr_location holds references to multiple reference counted objects. Add init/exit functions to make maintenance of those more consistent with the rest of the code and to try to avoid leaks. Modification of thread reference counts isn't included in this change. Committer notes: I needed to initialize result to sample->ip to make sure is set to something, fixing a compile time error, mostly keeping the previous logic as build_alloc_func_list() already does debugging/error prints about what went wrong if it takes the 'goto out'. Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ali Saidi <alisaidi@amazon.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Brian Robbins <brianrob@linux.microsoft.com> Cc: Changbin Du <changbin.du@huawei.com> Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: Fangrui Song <maskray@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ivan Babrou <ivan@cloudflare.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: K Prateek Nayak <kprateek.nayak@amd.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Mike Leach <mike.leach@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Steinar H. Gunderson <sesse@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Wenyu Liu <liuwenyu7@huawei.com> Cc: Will Deacon <will@kernel.org> Cc: Yang Jihong <yangjihong1@huawei.com> Cc: Ye Xingchen <ye.xingchen@zte.com.cn> Cc: Yuan Can <yuancan@huawei.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230608232823.4027869-7-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-09 07:28:03 +08:00
struct addr_location al;
perf intel-pt: Add support for text poke events Select text poke events when available and the kernel is being traced. Process text poke events to invalidate entries in Intel PT's instruction cache. Example: The example requires kernel config: CONFIG_PROC_SYSCTL=y CONFIG_SCHED_DEBUG=y CONFIG_SCHEDSTATS=y Before: # perf record -o perf.data.before --kcore -a -e intel_pt//k -m,64M & # cat /proc/sys/kernel/sched_schedstats 0 # echo 1 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 1 # echo 0 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 0 # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 3.341 MB perf.data.before ] [1]+ Terminated perf record -o perf.data.before --kcore -a -e intel_pt//k -m,64M # perf script -i perf.data.before --itrace=e >/dev/null Warning: 474 instruction trace errors After: # perf record -o perf.data.after --kcore -a -e intel_pt//k -m,64M & # cat /proc/sys/kernel/sched_schedstats 0 # echo 1 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 1 # echo 0 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 0 # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 2.646 MB perf.data.after ] [1]+ Terminated perf record -o perf.data.after --kcore -a -e intel_pt//k -m,64M # perf script -i perf.data.after --itrace=e >/dev/null Example: The example requires kernel config: # CONFIG_FUNCTION_TRACER is not set Before: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __schedule Added new event: probe:__schedule (on __schedule) You can now use it in all perf tools, such as: perf record -e probe:__schedule -aR sleep 1 # perf record -e probe:__schedule -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.026 MB perf.data (68 samples) ] # perf probe -d probe:__schedule Removed event: probe:__schedule # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 41.268 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null Warning: 207 instruction trace errors After: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __schedule Added new event: probe:__schedule (on __schedule) You can now use it in all perf tools, such as: perf record -e probe:__schedule -aR sleep 1 # perf record -e probe:__schedule -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.028 MB perf.data (107 samples) ] # perf probe -d probe:__schedule Removed event: probe:__schedule # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 39.978 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null # perf script -i t1 --no-itrace -D | grep 'POKE\|KSYMBOL' 6 565303693547 0x291f18 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc027a000 len 4096 type 2 flags 0x0 name kprobe_insn_page 6 565303697010 0x291f68 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027a000 old len 0 new len 6 6 565303838278 0x291fa8 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc027c000 len 4096 type 2 flags 0x0 name kprobe_optinsn_page 6 565303848286 0x291ff8 [0xa0]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027c000 old len 0 new len 106 6 565369336743 0x292af8 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffff88ab8890 old len 5 new len 5 7 566434327704 0x217c208 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffff88ab8890 old len 5 new len 5 6 566456313475 0x293198 [0xa0]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027c000 old len 106 new len 0 6 566456314935 0x293238 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027a000 old len 6 new len 0 Example: The example requires kernel config: CONFIG_FUNCTION_TRACER=y Before: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __kmalloc Added new event: probe:__kmalloc (on __kmalloc) You can now use it in all perf tools, such as: perf record -e probe:__kmalloc -aR sleep 1 # perf record -e probe:__kmalloc -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.022 MB perf.data (6 samples) ] # perf probe -d probe:__kmalloc Removed event: probe:__kmalloc # kill %1 [ perf record: Woken up 2 times to write data ] [ perf record: Captured and wrote 43.850 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null Warning: 8 instruction trace errors After: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __kmalloc Added new event: probe:__kmalloc (on __kmalloc) You can now use it in all perf tools, such as: perf record -e probe:__kmalloc -aR sleep 1 # perf record -e probe:__kmalloc -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.037 MB perf.data (206 samples) ] # perf probe -d probe:__kmalloc Removed event: probe:__kmalloc # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 41.442 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null # perf script -i t1 --no-itrace -D | grep 'POKE\|KSYMBOL' 5 312216133258 0x8bafe0 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc0360000 len 415 type 2 flags 0x0 name ftrace_trampoline 5 312216133494 0x8bb030 [0x1d8]: PERF_RECORD_TEXT_POKE addr 0xffffffffc0360000 old len 0 new len 415 5 312216229563 0x8bb208 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 5 312216239063 0x8bb248 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 5 312216727230 0x8bb288 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffabbea190 old len 5 new len 5 5 312216739322 0x8bb2c8 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 5 312216748321 0x8bb308 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 7 313287163462 0x2817430 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 7 313287174890 0x2817470 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 7 313287818979 0x28174b0 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffabbea190 old len 5 new len 5 7 313287829357 0x28174f0 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 7 313287841246 0x2817530 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Borislav Petkov <bp@alien8.de> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Mathieu Poirier <mathieu.poirier@linaro.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: x86@kernel.org Link: http://lore.kernel.org/lkml/20200512121922.8997-14-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-05-12 20:19:20 +08:00
struct machine *machine = pt->machine;
struct intel_pt_cache_entry *e;
u64 offset;
perf addr_location: Add init/exit/copy functions struct addr_location holds references to multiple reference counted objects. Add init/exit functions to make maintenance of those more consistent with the rest of the code and to try to avoid leaks. Modification of thread reference counts isn't included in this change. Committer notes: I needed to initialize result to sample->ip to make sure is set to something, fixing a compile time error, mostly keeping the previous logic as build_alloc_func_list() already does debugging/error prints about what went wrong if it takes the 'goto out'. Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ali Saidi <alisaidi@amazon.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Brian Robbins <brianrob@linux.microsoft.com> Cc: Changbin Du <changbin.du@huawei.com> Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: Fangrui Song <maskray@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ivan Babrou <ivan@cloudflare.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: K Prateek Nayak <kprateek.nayak@amd.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Mike Leach <mike.leach@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Steinar H. Gunderson <sesse@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Wenyu Liu <liuwenyu7@huawei.com> Cc: Will Deacon <will@kernel.org> Cc: Yang Jihong <yangjihong1@huawei.com> Cc: Ye Xingchen <ye.xingchen@zte.com.cn> Cc: Yuan Can <yuancan@huawei.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230608232823.4027869-7-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-09 07:28:03 +08:00
int ret = 0;
perf intel-pt: Add support for text poke events Select text poke events when available and the kernel is being traced. Process text poke events to invalidate entries in Intel PT's instruction cache. Example: The example requires kernel config: CONFIG_PROC_SYSCTL=y CONFIG_SCHED_DEBUG=y CONFIG_SCHEDSTATS=y Before: # perf record -o perf.data.before --kcore -a -e intel_pt//k -m,64M & # cat /proc/sys/kernel/sched_schedstats 0 # echo 1 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 1 # echo 0 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 0 # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 3.341 MB perf.data.before ] [1]+ Terminated perf record -o perf.data.before --kcore -a -e intel_pt//k -m,64M # perf script -i perf.data.before --itrace=e >/dev/null Warning: 474 instruction trace errors After: # perf record -o perf.data.after --kcore -a -e intel_pt//k -m,64M & # cat /proc/sys/kernel/sched_schedstats 0 # echo 1 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 1 # echo 0 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 0 # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 2.646 MB perf.data.after ] [1]+ Terminated perf record -o perf.data.after --kcore -a -e intel_pt//k -m,64M # perf script -i perf.data.after --itrace=e >/dev/null Example: The example requires kernel config: # CONFIG_FUNCTION_TRACER is not set Before: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __schedule Added new event: probe:__schedule (on __schedule) You can now use it in all perf tools, such as: perf record -e probe:__schedule -aR sleep 1 # perf record -e probe:__schedule -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.026 MB perf.data (68 samples) ] # perf probe -d probe:__schedule Removed event: probe:__schedule # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 41.268 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null Warning: 207 instruction trace errors After: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __schedule Added new event: probe:__schedule (on __schedule) You can now use it in all perf tools, such as: perf record -e probe:__schedule -aR sleep 1 # perf record -e probe:__schedule -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.028 MB perf.data (107 samples) ] # perf probe -d probe:__schedule Removed event: probe:__schedule # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 39.978 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null # perf script -i t1 --no-itrace -D | grep 'POKE\|KSYMBOL' 6 565303693547 0x291f18 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc027a000 len 4096 type 2 flags 0x0 name kprobe_insn_page 6 565303697010 0x291f68 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027a000 old len 0 new len 6 6 565303838278 0x291fa8 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc027c000 len 4096 type 2 flags 0x0 name kprobe_optinsn_page 6 565303848286 0x291ff8 [0xa0]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027c000 old len 0 new len 106 6 565369336743 0x292af8 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffff88ab8890 old len 5 new len 5 7 566434327704 0x217c208 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffff88ab8890 old len 5 new len 5 6 566456313475 0x293198 [0xa0]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027c000 old len 106 new len 0 6 566456314935 0x293238 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027a000 old len 6 new len 0 Example: The example requires kernel config: CONFIG_FUNCTION_TRACER=y Before: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __kmalloc Added new event: probe:__kmalloc (on __kmalloc) You can now use it in all perf tools, such as: perf record -e probe:__kmalloc -aR sleep 1 # perf record -e probe:__kmalloc -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.022 MB perf.data (6 samples) ] # perf probe -d probe:__kmalloc Removed event: probe:__kmalloc # kill %1 [ perf record: Woken up 2 times to write data ] [ perf record: Captured and wrote 43.850 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null Warning: 8 instruction trace errors After: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __kmalloc Added new event: probe:__kmalloc (on __kmalloc) You can now use it in all perf tools, such as: perf record -e probe:__kmalloc -aR sleep 1 # perf record -e probe:__kmalloc -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.037 MB perf.data (206 samples) ] # perf probe -d probe:__kmalloc Removed event: probe:__kmalloc # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 41.442 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null # perf script -i t1 --no-itrace -D | grep 'POKE\|KSYMBOL' 5 312216133258 0x8bafe0 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc0360000 len 415 type 2 flags 0x0 name ftrace_trampoline 5 312216133494 0x8bb030 [0x1d8]: PERF_RECORD_TEXT_POKE addr 0xffffffffc0360000 old len 0 new len 415 5 312216229563 0x8bb208 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 5 312216239063 0x8bb248 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 5 312216727230 0x8bb288 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffabbea190 old len 5 new len 5 5 312216739322 0x8bb2c8 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 5 312216748321 0x8bb308 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 7 313287163462 0x2817430 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 7 313287174890 0x2817470 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 7 313287818979 0x28174b0 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffabbea190 old len 5 new len 5 7 313287829357 0x28174f0 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 7 313287841246 0x2817530 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Borislav Petkov <bp@alien8.de> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Mathieu Poirier <mathieu.poirier@linaro.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: x86@kernel.org Link: http://lore.kernel.org/lkml/20200512121922.8997-14-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-05-12 20:19:20 +08:00
perf addr_location: Add init/exit/copy functions struct addr_location holds references to multiple reference counted objects. Add init/exit functions to make maintenance of those more consistent with the rest of the code and to try to avoid leaks. Modification of thread reference counts isn't included in this change. Committer notes: I needed to initialize result to sample->ip to make sure is set to something, fixing a compile time error, mostly keeping the previous logic as build_alloc_func_list() already does debugging/error prints about what went wrong if it takes the 'goto out'. Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ali Saidi <alisaidi@amazon.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Brian Robbins <brianrob@linux.microsoft.com> Cc: Changbin Du <changbin.du@huawei.com> Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: Fangrui Song <maskray@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ivan Babrou <ivan@cloudflare.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: K Prateek Nayak <kprateek.nayak@amd.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Mike Leach <mike.leach@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Steinar H. Gunderson <sesse@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Wenyu Liu <liuwenyu7@huawei.com> Cc: Will Deacon <will@kernel.org> Cc: Yang Jihong <yangjihong1@huawei.com> Cc: Ye Xingchen <ye.xingchen@zte.com.cn> Cc: Yuan Can <yuancan@huawei.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230608232823.4027869-7-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-09 07:28:03 +08:00
addr_location__init(&al);
perf intel-pt: Add support for text poke events Select text poke events when available and the kernel is being traced. Process text poke events to invalidate entries in Intel PT's instruction cache. Example: The example requires kernel config: CONFIG_PROC_SYSCTL=y CONFIG_SCHED_DEBUG=y CONFIG_SCHEDSTATS=y Before: # perf record -o perf.data.before --kcore -a -e intel_pt//k -m,64M & # cat /proc/sys/kernel/sched_schedstats 0 # echo 1 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 1 # echo 0 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 0 # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 3.341 MB perf.data.before ] [1]+ Terminated perf record -o perf.data.before --kcore -a -e intel_pt//k -m,64M # perf script -i perf.data.before --itrace=e >/dev/null Warning: 474 instruction trace errors After: # perf record -o perf.data.after --kcore -a -e intel_pt//k -m,64M & # cat /proc/sys/kernel/sched_schedstats 0 # echo 1 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 1 # echo 0 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 0 # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 2.646 MB perf.data.after ] [1]+ Terminated perf record -o perf.data.after --kcore -a -e intel_pt//k -m,64M # perf script -i perf.data.after --itrace=e >/dev/null Example: The example requires kernel config: # CONFIG_FUNCTION_TRACER is not set Before: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __schedule Added new event: probe:__schedule (on __schedule) You can now use it in all perf tools, such as: perf record -e probe:__schedule -aR sleep 1 # perf record -e probe:__schedule -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.026 MB perf.data (68 samples) ] # perf probe -d probe:__schedule Removed event: probe:__schedule # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 41.268 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null Warning: 207 instruction trace errors After: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __schedule Added new event: probe:__schedule (on __schedule) You can now use it in all perf tools, such as: perf record -e probe:__schedule -aR sleep 1 # perf record -e probe:__schedule -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.028 MB perf.data (107 samples) ] # perf probe -d probe:__schedule Removed event: probe:__schedule # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 39.978 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null # perf script -i t1 --no-itrace -D | grep 'POKE\|KSYMBOL' 6 565303693547 0x291f18 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc027a000 len 4096 type 2 flags 0x0 name kprobe_insn_page 6 565303697010 0x291f68 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027a000 old len 0 new len 6 6 565303838278 0x291fa8 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc027c000 len 4096 type 2 flags 0x0 name kprobe_optinsn_page 6 565303848286 0x291ff8 [0xa0]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027c000 old len 0 new len 106 6 565369336743 0x292af8 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffff88ab8890 old len 5 new len 5 7 566434327704 0x217c208 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffff88ab8890 old len 5 new len 5 6 566456313475 0x293198 [0xa0]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027c000 old len 106 new len 0 6 566456314935 0x293238 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027a000 old len 6 new len 0 Example: The example requires kernel config: CONFIG_FUNCTION_TRACER=y Before: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __kmalloc Added new event: probe:__kmalloc (on __kmalloc) You can now use it in all perf tools, such as: perf record -e probe:__kmalloc -aR sleep 1 # perf record -e probe:__kmalloc -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.022 MB perf.data (6 samples) ] # perf probe -d probe:__kmalloc Removed event: probe:__kmalloc # kill %1 [ perf record: Woken up 2 times to write data ] [ perf record: Captured and wrote 43.850 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null Warning: 8 instruction trace errors After: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __kmalloc Added new event: probe:__kmalloc (on __kmalloc) You can now use it in all perf tools, such as: perf record -e probe:__kmalloc -aR sleep 1 # perf record -e probe:__kmalloc -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.037 MB perf.data (206 samples) ] # perf probe -d probe:__kmalloc Removed event: probe:__kmalloc # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 41.442 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null # perf script -i t1 --no-itrace -D | grep 'POKE\|KSYMBOL' 5 312216133258 0x8bafe0 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc0360000 len 415 type 2 flags 0x0 name ftrace_trampoline 5 312216133494 0x8bb030 [0x1d8]: PERF_RECORD_TEXT_POKE addr 0xffffffffc0360000 old len 0 new len 415 5 312216229563 0x8bb208 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 5 312216239063 0x8bb248 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 5 312216727230 0x8bb288 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffabbea190 old len 5 new len 5 5 312216739322 0x8bb2c8 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 5 312216748321 0x8bb308 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 7 313287163462 0x2817430 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 7 313287174890 0x2817470 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 7 313287818979 0x28174b0 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffabbea190 old len 5 new len 5 7 313287829357 0x28174f0 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 7 313287841246 0x2817530 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Borislav Petkov <bp@alien8.de> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Mathieu Poirier <mathieu.poirier@linaro.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: x86@kernel.org Link: http://lore.kernel.org/lkml/20200512121922.8997-14-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-05-12 20:19:20 +08:00
if (!event->text_poke.new_len)
perf addr_location: Add init/exit/copy functions struct addr_location holds references to multiple reference counted objects. Add init/exit functions to make maintenance of those more consistent with the rest of the code and to try to avoid leaks. Modification of thread reference counts isn't included in this change. Committer notes: I needed to initialize result to sample->ip to make sure is set to something, fixing a compile time error, mostly keeping the previous logic as build_alloc_func_list() already does debugging/error prints about what went wrong if it takes the 'goto out'. Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ali Saidi <alisaidi@amazon.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Brian Robbins <brianrob@linux.microsoft.com> Cc: Changbin Du <changbin.du@huawei.com> Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: Fangrui Song <maskray@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ivan Babrou <ivan@cloudflare.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: K Prateek Nayak <kprateek.nayak@amd.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Mike Leach <mike.leach@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Steinar H. Gunderson <sesse@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Wenyu Liu <liuwenyu7@huawei.com> Cc: Will Deacon <will@kernel.org> Cc: Yang Jihong <yangjihong1@huawei.com> Cc: Ye Xingchen <ye.xingchen@zte.com.cn> Cc: Yuan Can <yuancan@huawei.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230608232823.4027869-7-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-09 07:28:03 +08:00
goto out;
perf intel-pt: Add support for text poke events Select text poke events when available and the kernel is being traced. Process text poke events to invalidate entries in Intel PT's instruction cache. Example: The example requires kernel config: CONFIG_PROC_SYSCTL=y CONFIG_SCHED_DEBUG=y CONFIG_SCHEDSTATS=y Before: # perf record -o perf.data.before --kcore -a -e intel_pt//k -m,64M & # cat /proc/sys/kernel/sched_schedstats 0 # echo 1 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 1 # echo 0 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 0 # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 3.341 MB perf.data.before ] [1]+ Terminated perf record -o perf.data.before --kcore -a -e intel_pt//k -m,64M # perf script -i perf.data.before --itrace=e >/dev/null Warning: 474 instruction trace errors After: # perf record -o perf.data.after --kcore -a -e intel_pt//k -m,64M & # cat /proc/sys/kernel/sched_schedstats 0 # echo 1 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 1 # echo 0 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 0 # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 2.646 MB perf.data.after ] [1]+ Terminated perf record -o perf.data.after --kcore -a -e intel_pt//k -m,64M # perf script -i perf.data.after --itrace=e >/dev/null Example: The example requires kernel config: # CONFIG_FUNCTION_TRACER is not set Before: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __schedule Added new event: probe:__schedule (on __schedule) You can now use it in all perf tools, such as: perf record -e probe:__schedule -aR sleep 1 # perf record -e probe:__schedule -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.026 MB perf.data (68 samples) ] # perf probe -d probe:__schedule Removed event: probe:__schedule # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 41.268 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null Warning: 207 instruction trace errors After: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __schedule Added new event: probe:__schedule (on __schedule) You can now use it in all perf tools, such as: perf record -e probe:__schedule -aR sleep 1 # perf record -e probe:__schedule -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.028 MB perf.data (107 samples) ] # perf probe -d probe:__schedule Removed event: probe:__schedule # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 39.978 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null # perf script -i t1 --no-itrace -D | grep 'POKE\|KSYMBOL' 6 565303693547 0x291f18 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc027a000 len 4096 type 2 flags 0x0 name kprobe_insn_page 6 565303697010 0x291f68 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027a000 old len 0 new len 6 6 565303838278 0x291fa8 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc027c000 len 4096 type 2 flags 0x0 name kprobe_optinsn_page 6 565303848286 0x291ff8 [0xa0]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027c000 old len 0 new len 106 6 565369336743 0x292af8 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffff88ab8890 old len 5 new len 5 7 566434327704 0x217c208 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffff88ab8890 old len 5 new len 5 6 566456313475 0x293198 [0xa0]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027c000 old len 106 new len 0 6 566456314935 0x293238 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027a000 old len 6 new len 0 Example: The example requires kernel config: CONFIG_FUNCTION_TRACER=y Before: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __kmalloc Added new event: probe:__kmalloc (on __kmalloc) You can now use it in all perf tools, such as: perf record -e probe:__kmalloc -aR sleep 1 # perf record -e probe:__kmalloc -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.022 MB perf.data (6 samples) ] # perf probe -d probe:__kmalloc Removed event: probe:__kmalloc # kill %1 [ perf record: Woken up 2 times to write data ] [ perf record: Captured and wrote 43.850 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null Warning: 8 instruction trace errors After: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __kmalloc Added new event: probe:__kmalloc (on __kmalloc) You can now use it in all perf tools, such as: perf record -e probe:__kmalloc -aR sleep 1 # perf record -e probe:__kmalloc -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.037 MB perf.data (206 samples) ] # perf probe -d probe:__kmalloc Removed event: probe:__kmalloc # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 41.442 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null # perf script -i t1 --no-itrace -D | grep 'POKE\|KSYMBOL' 5 312216133258 0x8bafe0 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc0360000 len 415 type 2 flags 0x0 name ftrace_trampoline 5 312216133494 0x8bb030 [0x1d8]: PERF_RECORD_TEXT_POKE addr 0xffffffffc0360000 old len 0 new len 415 5 312216229563 0x8bb208 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 5 312216239063 0x8bb248 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 5 312216727230 0x8bb288 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffabbea190 old len 5 new len 5 5 312216739322 0x8bb2c8 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 5 312216748321 0x8bb308 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 7 313287163462 0x2817430 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 7 313287174890 0x2817470 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 7 313287818979 0x28174b0 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffabbea190 old len 5 new len 5 7 313287829357 0x28174f0 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 7 313287841246 0x2817530 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Borislav Petkov <bp@alien8.de> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Mathieu Poirier <mathieu.poirier@linaro.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: x86@kernel.org Link: http://lore.kernel.org/lkml/20200512121922.8997-14-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-05-12 20:19:20 +08:00
for (; cnt; cnt--, addr--) {
perf map: Add accessor for dso Later changes will add reference count checking for struct map, with dso being the most frequently accessed variable. Add an accessor so that the reference count check is only necessary in one place. Additional changes: - add a dso variable to avoid repeated map__dso calls. - in builtin-mem.c dump_raw_samples, code only partially tested for dso == NULL. Make the possibility of NULL consistent. - in thread.c thread__memcpy fix use of spaces and use tabs. Committer notes: Did missing conversions on these files: tools/perf/arch/powerpc/util/skip-callchain-idx.c tools/perf/arch/powerpc/util/sym-handling.c tools/perf/ui/browsers/hists.c tools/perf/ui/gtk/annotate.c tools/perf/util/cs-etm.c tools/perf/util/thread.c tools/perf/util/unwind-libunwind-local.c tools/perf/util/unwind-libunwind.c Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Darren Hart <dvhart@infradead.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Dmitriy Vyukov <dvyukov@google.com> Cc: Eric Dumazet <edumazet@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Hao Luo <haoluo@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Miaoqian Lin <linmq006@gmail.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Riccardo Mancini <rickyman7@gmail.com> Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com> Cc: Song Liu <song@kernel.org> Cc: Stephane Eranian <eranian@google.com> Cc: Stephen Brennan <stephen.s.brennan@oracle.com> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Yury Norov <yury.norov@gmail.com> Link: https://lore.kernel.org/r/20230320212248.1175731-2-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-03-21 05:22:35 +08:00
struct dso *dso;
perf intel-pt: Add support for text poke events Select text poke events when available and the kernel is being traced. Process text poke events to invalidate entries in Intel PT's instruction cache. Example: The example requires kernel config: CONFIG_PROC_SYSCTL=y CONFIG_SCHED_DEBUG=y CONFIG_SCHEDSTATS=y Before: # perf record -o perf.data.before --kcore -a -e intel_pt//k -m,64M & # cat /proc/sys/kernel/sched_schedstats 0 # echo 1 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 1 # echo 0 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 0 # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 3.341 MB perf.data.before ] [1]+ Terminated perf record -o perf.data.before --kcore -a -e intel_pt//k -m,64M # perf script -i perf.data.before --itrace=e >/dev/null Warning: 474 instruction trace errors After: # perf record -o perf.data.after --kcore -a -e intel_pt//k -m,64M & # cat /proc/sys/kernel/sched_schedstats 0 # echo 1 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 1 # echo 0 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 0 # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 2.646 MB perf.data.after ] [1]+ Terminated perf record -o perf.data.after --kcore -a -e intel_pt//k -m,64M # perf script -i perf.data.after --itrace=e >/dev/null Example: The example requires kernel config: # CONFIG_FUNCTION_TRACER is not set Before: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __schedule Added new event: probe:__schedule (on __schedule) You can now use it in all perf tools, such as: perf record -e probe:__schedule -aR sleep 1 # perf record -e probe:__schedule -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.026 MB perf.data (68 samples) ] # perf probe -d probe:__schedule Removed event: probe:__schedule # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 41.268 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null Warning: 207 instruction trace errors After: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __schedule Added new event: probe:__schedule (on __schedule) You can now use it in all perf tools, such as: perf record -e probe:__schedule -aR sleep 1 # perf record -e probe:__schedule -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.028 MB perf.data (107 samples) ] # perf probe -d probe:__schedule Removed event: probe:__schedule # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 39.978 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null # perf script -i t1 --no-itrace -D | grep 'POKE\|KSYMBOL' 6 565303693547 0x291f18 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc027a000 len 4096 type 2 flags 0x0 name kprobe_insn_page 6 565303697010 0x291f68 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027a000 old len 0 new len 6 6 565303838278 0x291fa8 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc027c000 len 4096 type 2 flags 0x0 name kprobe_optinsn_page 6 565303848286 0x291ff8 [0xa0]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027c000 old len 0 new len 106 6 565369336743 0x292af8 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffff88ab8890 old len 5 new len 5 7 566434327704 0x217c208 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffff88ab8890 old len 5 new len 5 6 566456313475 0x293198 [0xa0]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027c000 old len 106 new len 0 6 566456314935 0x293238 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027a000 old len 6 new len 0 Example: The example requires kernel config: CONFIG_FUNCTION_TRACER=y Before: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __kmalloc Added new event: probe:__kmalloc (on __kmalloc) You can now use it in all perf tools, such as: perf record -e probe:__kmalloc -aR sleep 1 # perf record -e probe:__kmalloc -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.022 MB perf.data (6 samples) ] # perf probe -d probe:__kmalloc Removed event: probe:__kmalloc # kill %1 [ perf record: Woken up 2 times to write data ] [ perf record: Captured and wrote 43.850 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null Warning: 8 instruction trace errors After: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __kmalloc Added new event: probe:__kmalloc (on __kmalloc) You can now use it in all perf tools, such as: perf record -e probe:__kmalloc -aR sleep 1 # perf record -e probe:__kmalloc -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.037 MB perf.data (206 samples) ] # perf probe -d probe:__kmalloc Removed event: probe:__kmalloc # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 41.442 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null # perf script -i t1 --no-itrace -D | grep 'POKE\|KSYMBOL' 5 312216133258 0x8bafe0 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc0360000 len 415 type 2 flags 0x0 name ftrace_trampoline 5 312216133494 0x8bb030 [0x1d8]: PERF_RECORD_TEXT_POKE addr 0xffffffffc0360000 old len 0 new len 415 5 312216229563 0x8bb208 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 5 312216239063 0x8bb248 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 5 312216727230 0x8bb288 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffabbea190 old len 5 new len 5 5 312216739322 0x8bb2c8 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 5 312216748321 0x8bb308 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 7 313287163462 0x2817430 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 7 313287174890 0x2817470 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 7 313287818979 0x28174b0 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffabbea190 old len 5 new len 5 7 313287829357 0x28174f0 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 7 313287841246 0x2817530 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Borislav Petkov <bp@alien8.de> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Mathieu Poirier <mathieu.poirier@linaro.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: x86@kernel.org Link: http://lore.kernel.org/lkml/20200512121922.8997-14-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-05-12 20:19:20 +08:00
if (intel_pt_find_map(thread, cpumode, addr, &al)) {
if (addr < event->text_poke.addr)
perf addr_location: Add init/exit/copy functions struct addr_location holds references to multiple reference counted objects. Add init/exit functions to make maintenance of those more consistent with the rest of the code and to try to avoid leaks. Modification of thread reference counts isn't included in this change. Committer notes: I needed to initialize result to sample->ip to make sure is set to something, fixing a compile time error, mostly keeping the previous logic as build_alloc_func_list() already does debugging/error prints about what went wrong if it takes the 'goto out'. Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ali Saidi <alisaidi@amazon.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Brian Robbins <brianrob@linux.microsoft.com> Cc: Changbin Du <changbin.du@huawei.com> Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: Fangrui Song <maskray@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ivan Babrou <ivan@cloudflare.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: K Prateek Nayak <kprateek.nayak@amd.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Mike Leach <mike.leach@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Steinar H. Gunderson <sesse@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Wenyu Liu <liuwenyu7@huawei.com> Cc: Will Deacon <will@kernel.org> Cc: Yang Jihong <yangjihong1@huawei.com> Cc: Ye Xingchen <ye.xingchen@zte.com.cn> Cc: Yuan Can <yuancan@huawei.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230608232823.4027869-7-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-09 07:28:03 +08:00
goto out;
perf intel-pt: Add support for text poke events Select text poke events when available and the kernel is being traced. Process text poke events to invalidate entries in Intel PT's instruction cache. Example: The example requires kernel config: CONFIG_PROC_SYSCTL=y CONFIG_SCHED_DEBUG=y CONFIG_SCHEDSTATS=y Before: # perf record -o perf.data.before --kcore -a -e intel_pt//k -m,64M & # cat /proc/sys/kernel/sched_schedstats 0 # echo 1 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 1 # echo 0 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 0 # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 3.341 MB perf.data.before ] [1]+ Terminated perf record -o perf.data.before --kcore -a -e intel_pt//k -m,64M # perf script -i perf.data.before --itrace=e >/dev/null Warning: 474 instruction trace errors After: # perf record -o perf.data.after --kcore -a -e intel_pt//k -m,64M & # cat /proc/sys/kernel/sched_schedstats 0 # echo 1 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 1 # echo 0 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 0 # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 2.646 MB perf.data.after ] [1]+ Terminated perf record -o perf.data.after --kcore -a -e intel_pt//k -m,64M # perf script -i perf.data.after --itrace=e >/dev/null Example: The example requires kernel config: # CONFIG_FUNCTION_TRACER is not set Before: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __schedule Added new event: probe:__schedule (on __schedule) You can now use it in all perf tools, such as: perf record -e probe:__schedule -aR sleep 1 # perf record -e probe:__schedule -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.026 MB perf.data (68 samples) ] # perf probe -d probe:__schedule Removed event: probe:__schedule # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 41.268 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null Warning: 207 instruction trace errors After: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __schedule Added new event: probe:__schedule (on __schedule) You can now use it in all perf tools, such as: perf record -e probe:__schedule -aR sleep 1 # perf record -e probe:__schedule -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.028 MB perf.data (107 samples) ] # perf probe -d probe:__schedule Removed event: probe:__schedule # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 39.978 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null # perf script -i t1 --no-itrace -D | grep 'POKE\|KSYMBOL' 6 565303693547 0x291f18 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc027a000 len 4096 type 2 flags 0x0 name kprobe_insn_page 6 565303697010 0x291f68 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027a000 old len 0 new len 6 6 565303838278 0x291fa8 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc027c000 len 4096 type 2 flags 0x0 name kprobe_optinsn_page 6 565303848286 0x291ff8 [0xa0]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027c000 old len 0 new len 106 6 565369336743 0x292af8 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffff88ab8890 old len 5 new len 5 7 566434327704 0x217c208 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffff88ab8890 old len 5 new len 5 6 566456313475 0x293198 [0xa0]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027c000 old len 106 new len 0 6 566456314935 0x293238 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027a000 old len 6 new len 0 Example: The example requires kernel config: CONFIG_FUNCTION_TRACER=y Before: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __kmalloc Added new event: probe:__kmalloc (on __kmalloc) You can now use it in all perf tools, such as: perf record -e probe:__kmalloc -aR sleep 1 # perf record -e probe:__kmalloc -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.022 MB perf.data (6 samples) ] # perf probe -d probe:__kmalloc Removed event: probe:__kmalloc # kill %1 [ perf record: Woken up 2 times to write data ] [ perf record: Captured and wrote 43.850 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null Warning: 8 instruction trace errors After: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __kmalloc Added new event: probe:__kmalloc (on __kmalloc) You can now use it in all perf tools, such as: perf record -e probe:__kmalloc -aR sleep 1 # perf record -e probe:__kmalloc -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.037 MB perf.data (206 samples) ] # perf probe -d probe:__kmalloc Removed event: probe:__kmalloc # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 41.442 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null # perf script -i t1 --no-itrace -D | grep 'POKE\|KSYMBOL' 5 312216133258 0x8bafe0 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc0360000 len 415 type 2 flags 0x0 name ftrace_trampoline 5 312216133494 0x8bb030 [0x1d8]: PERF_RECORD_TEXT_POKE addr 0xffffffffc0360000 old len 0 new len 415 5 312216229563 0x8bb208 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 5 312216239063 0x8bb248 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 5 312216727230 0x8bb288 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffabbea190 old len 5 new len 5 5 312216739322 0x8bb2c8 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 5 312216748321 0x8bb308 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 7 313287163462 0x2817430 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 7 313287174890 0x2817470 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 7 313287818979 0x28174b0 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffabbea190 old len 5 new len 5 7 313287829357 0x28174f0 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 7 313287841246 0x2817530 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Borislav Petkov <bp@alien8.de> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Mathieu Poirier <mathieu.poirier@linaro.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: x86@kernel.org Link: http://lore.kernel.org/lkml/20200512121922.8997-14-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-05-12 20:19:20 +08:00
continue;
}
perf map: Add accessor for dso Later changes will add reference count checking for struct map, with dso being the most frequently accessed variable. Add an accessor so that the reference count check is only necessary in one place. Additional changes: - add a dso variable to avoid repeated map__dso calls. - in builtin-mem.c dump_raw_samples, code only partially tested for dso == NULL. Make the possibility of NULL consistent. - in thread.c thread__memcpy fix use of spaces and use tabs. Committer notes: Did missing conversions on these files: tools/perf/arch/powerpc/util/skip-callchain-idx.c tools/perf/arch/powerpc/util/sym-handling.c tools/perf/ui/browsers/hists.c tools/perf/ui/gtk/annotate.c tools/perf/util/cs-etm.c tools/perf/util/thread.c tools/perf/util/unwind-libunwind-local.c tools/perf/util/unwind-libunwind.c Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Darren Hart <dvhart@infradead.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Dmitriy Vyukov <dvyukov@google.com> Cc: Eric Dumazet <edumazet@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Hao Luo <haoluo@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Miaoqian Lin <linmq006@gmail.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Riccardo Mancini <rickyman7@gmail.com> Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com> Cc: Song Liu <song@kernel.org> Cc: Stephane Eranian <eranian@google.com> Cc: Stephen Brennan <stephen.s.brennan@oracle.com> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Yury Norov <yury.norov@gmail.com> Link: https://lore.kernel.org/r/20230320212248.1175731-2-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-03-21 05:22:35 +08:00
dso = map__dso(al.map);
if (!dso || !dso->auxtrace_cache)
perf intel-pt: Add support for text poke events Select text poke events when available and the kernel is being traced. Process text poke events to invalidate entries in Intel PT's instruction cache. Example: The example requires kernel config: CONFIG_PROC_SYSCTL=y CONFIG_SCHED_DEBUG=y CONFIG_SCHEDSTATS=y Before: # perf record -o perf.data.before --kcore -a -e intel_pt//k -m,64M & # cat /proc/sys/kernel/sched_schedstats 0 # echo 1 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 1 # echo 0 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 0 # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 3.341 MB perf.data.before ] [1]+ Terminated perf record -o perf.data.before --kcore -a -e intel_pt//k -m,64M # perf script -i perf.data.before --itrace=e >/dev/null Warning: 474 instruction trace errors After: # perf record -o perf.data.after --kcore -a -e intel_pt//k -m,64M & # cat /proc/sys/kernel/sched_schedstats 0 # echo 1 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 1 # echo 0 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 0 # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 2.646 MB perf.data.after ] [1]+ Terminated perf record -o perf.data.after --kcore -a -e intel_pt//k -m,64M # perf script -i perf.data.after --itrace=e >/dev/null Example: The example requires kernel config: # CONFIG_FUNCTION_TRACER is not set Before: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __schedule Added new event: probe:__schedule (on __schedule) You can now use it in all perf tools, such as: perf record -e probe:__schedule -aR sleep 1 # perf record -e probe:__schedule -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.026 MB perf.data (68 samples) ] # perf probe -d probe:__schedule Removed event: probe:__schedule # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 41.268 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null Warning: 207 instruction trace errors After: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __schedule Added new event: probe:__schedule (on __schedule) You can now use it in all perf tools, such as: perf record -e probe:__schedule -aR sleep 1 # perf record -e probe:__schedule -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.028 MB perf.data (107 samples) ] # perf probe -d probe:__schedule Removed event: probe:__schedule # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 39.978 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null # perf script -i t1 --no-itrace -D | grep 'POKE\|KSYMBOL' 6 565303693547 0x291f18 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc027a000 len 4096 type 2 flags 0x0 name kprobe_insn_page 6 565303697010 0x291f68 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027a000 old len 0 new len 6 6 565303838278 0x291fa8 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc027c000 len 4096 type 2 flags 0x0 name kprobe_optinsn_page 6 565303848286 0x291ff8 [0xa0]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027c000 old len 0 new len 106 6 565369336743 0x292af8 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffff88ab8890 old len 5 new len 5 7 566434327704 0x217c208 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffff88ab8890 old len 5 new len 5 6 566456313475 0x293198 [0xa0]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027c000 old len 106 new len 0 6 566456314935 0x293238 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027a000 old len 6 new len 0 Example: The example requires kernel config: CONFIG_FUNCTION_TRACER=y Before: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __kmalloc Added new event: probe:__kmalloc (on __kmalloc) You can now use it in all perf tools, such as: perf record -e probe:__kmalloc -aR sleep 1 # perf record -e probe:__kmalloc -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.022 MB perf.data (6 samples) ] # perf probe -d probe:__kmalloc Removed event: probe:__kmalloc # kill %1 [ perf record: Woken up 2 times to write data ] [ perf record: Captured and wrote 43.850 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null Warning: 8 instruction trace errors After: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __kmalloc Added new event: probe:__kmalloc (on __kmalloc) You can now use it in all perf tools, such as: perf record -e probe:__kmalloc -aR sleep 1 # perf record -e probe:__kmalloc -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.037 MB perf.data (206 samples) ] # perf probe -d probe:__kmalloc Removed event: probe:__kmalloc # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 41.442 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null # perf script -i t1 --no-itrace -D | grep 'POKE\|KSYMBOL' 5 312216133258 0x8bafe0 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc0360000 len 415 type 2 flags 0x0 name ftrace_trampoline 5 312216133494 0x8bb030 [0x1d8]: PERF_RECORD_TEXT_POKE addr 0xffffffffc0360000 old len 0 new len 415 5 312216229563 0x8bb208 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 5 312216239063 0x8bb248 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 5 312216727230 0x8bb288 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffabbea190 old len 5 new len 5 5 312216739322 0x8bb2c8 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 5 312216748321 0x8bb308 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 7 313287163462 0x2817430 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 7 313287174890 0x2817470 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 7 313287818979 0x28174b0 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffabbea190 old len 5 new len 5 7 313287829357 0x28174f0 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 7 313287841246 0x2817530 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Borislav Petkov <bp@alien8.de> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Mathieu Poirier <mathieu.poirier@linaro.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: x86@kernel.org Link: http://lore.kernel.org/lkml/20200512121922.8997-14-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-05-12 20:19:20 +08:00
continue;
perf map: Add helper for ->map_ip() and ->unmap_ip() Later changes will add reference count checking for struct map, add a helper function to invoke the map_ip and unmap_ip function pointers. The helper allows the reference count check to be in fewer places. Committer notes: Add missing conversions to: tools/perf/util/map.c tools/perf/util/cs-etm.c tools/perf/util/annotate.c tools/perf/arch/powerpc/util/sym-handling.c tools/perf/arch/s390/annotate/instructions.c Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Darren Hart <dvhart@infradead.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Dmitriy Vyukov <dvyukov@google.com> Cc: Eric Dumazet <edumazet@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Hao Luo <haoluo@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Miaoqian Lin <linmq006@gmail.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Riccardo Mancini <rickyman7@gmail.com> Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com> Cc: Song Liu <song@kernel.org> Cc: Stephane Eranian <eranian@google.com> Cc: Stephen Brennan <stephen.s.brennan@oracle.com> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Yury Norov <yury.norov@gmail.com> Link: https://lore.kernel.org/r/20230404205954.2245628-2-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-04-05 04:59:44 +08:00
offset = map__map_ip(al.map, addr);
perf intel-pt: Add support for text poke events Select text poke events when available and the kernel is being traced. Process text poke events to invalidate entries in Intel PT's instruction cache. Example: The example requires kernel config: CONFIG_PROC_SYSCTL=y CONFIG_SCHED_DEBUG=y CONFIG_SCHEDSTATS=y Before: # perf record -o perf.data.before --kcore -a -e intel_pt//k -m,64M & # cat /proc/sys/kernel/sched_schedstats 0 # echo 1 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 1 # echo 0 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 0 # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 3.341 MB perf.data.before ] [1]+ Terminated perf record -o perf.data.before --kcore -a -e intel_pt//k -m,64M # perf script -i perf.data.before --itrace=e >/dev/null Warning: 474 instruction trace errors After: # perf record -o perf.data.after --kcore -a -e intel_pt//k -m,64M & # cat /proc/sys/kernel/sched_schedstats 0 # echo 1 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 1 # echo 0 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 0 # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 2.646 MB perf.data.after ] [1]+ Terminated perf record -o perf.data.after --kcore -a -e intel_pt//k -m,64M # perf script -i perf.data.after --itrace=e >/dev/null Example: The example requires kernel config: # CONFIG_FUNCTION_TRACER is not set Before: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __schedule Added new event: probe:__schedule (on __schedule) You can now use it in all perf tools, such as: perf record -e probe:__schedule -aR sleep 1 # perf record -e probe:__schedule -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.026 MB perf.data (68 samples) ] # perf probe -d probe:__schedule Removed event: probe:__schedule # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 41.268 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null Warning: 207 instruction trace errors After: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __schedule Added new event: probe:__schedule (on __schedule) You can now use it in all perf tools, such as: perf record -e probe:__schedule -aR sleep 1 # perf record -e probe:__schedule -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.028 MB perf.data (107 samples) ] # perf probe -d probe:__schedule Removed event: probe:__schedule # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 39.978 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null # perf script -i t1 --no-itrace -D | grep 'POKE\|KSYMBOL' 6 565303693547 0x291f18 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc027a000 len 4096 type 2 flags 0x0 name kprobe_insn_page 6 565303697010 0x291f68 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027a000 old len 0 new len 6 6 565303838278 0x291fa8 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc027c000 len 4096 type 2 flags 0x0 name kprobe_optinsn_page 6 565303848286 0x291ff8 [0xa0]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027c000 old len 0 new len 106 6 565369336743 0x292af8 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffff88ab8890 old len 5 new len 5 7 566434327704 0x217c208 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffff88ab8890 old len 5 new len 5 6 566456313475 0x293198 [0xa0]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027c000 old len 106 new len 0 6 566456314935 0x293238 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027a000 old len 6 new len 0 Example: The example requires kernel config: CONFIG_FUNCTION_TRACER=y Before: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __kmalloc Added new event: probe:__kmalloc (on __kmalloc) You can now use it in all perf tools, such as: perf record -e probe:__kmalloc -aR sleep 1 # perf record -e probe:__kmalloc -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.022 MB perf.data (6 samples) ] # perf probe -d probe:__kmalloc Removed event: probe:__kmalloc # kill %1 [ perf record: Woken up 2 times to write data ] [ perf record: Captured and wrote 43.850 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null Warning: 8 instruction trace errors After: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __kmalloc Added new event: probe:__kmalloc (on __kmalloc) You can now use it in all perf tools, such as: perf record -e probe:__kmalloc -aR sleep 1 # perf record -e probe:__kmalloc -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.037 MB perf.data (206 samples) ] # perf probe -d probe:__kmalloc Removed event: probe:__kmalloc # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 41.442 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null # perf script -i t1 --no-itrace -D | grep 'POKE\|KSYMBOL' 5 312216133258 0x8bafe0 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc0360000 len 415 type 2 flags 0x0 name ftrace_trampoline 5 312216133494 0x8bb030 [0x1d8]: PERF_RECORD_TEXT_POKE addr 0xffffffffc0360000 old len 0 new len 415 5 312216229563 0x8bb208 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 5 312216239063 0x8bb248 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 5 312216727230 0x8bb288 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffabbea190 old len 5 new len 5 5 312216739322 0x8bb2c8 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 5 312216748321 0x8bb308 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 7 313287163462 0x2817430 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 7 313287174890 0x2817470 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 7 313287818979 0x28174b0 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffabbea190 old len 5 new len 5 7 313287829357 0x28174f0 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 7 313287841246 0x2817530 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Borislav Petkov <bp@alien8.de> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Mathieu Poirier <mathieu.poirier@linaro.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: x86@kernel.org Link: http://lore.kernel.org/lkml/20200512121922.8997-14-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-05-12 20:19:20 +08:00
perf map: Add accessor for dso Later changes will add reference count checking for struct map, with dso being the most frequently accessed variable. Add an accessor so that the reference count check is only necessary in one place. Additional changes: - add a dso variable to avoid repeated map__dso calls. - in builtin-mem.c dump_raw_samples, code only partially tested for dso == NULL. Make the possibility of NULL consistent. - in thread.c thread__memcpy fix use of spaces and use tabs. Committer notes: Did missing conversions on these files: tools/perf/arch/powerpc/util/skip-callchain-idx.c tools/perf/arch/powerpc/util/sym-handling.c tools/perf/ui/browsers/hists.c tools/perf/ui/gtk/annotate.c tools/perf/util/cs-etm.c tools/perf/util/thread.c tools/perf/util/unwind-libunwind-local.c tools/perf/util/unwind-libunwind.c Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Darren Hart <dvhart@infradead.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Dmitriy Vyukov <dvyukov@google.com> Cc: Eric Dumazet <edumazet@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Hao Luo <haoluo@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Miaoqian Lin <linmq006@gmail.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Riccardo Mancini <rickyman7@gmail.com> Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com> Cc: Song Liu <song@kernel.org> Cc: Stephane Eranian <eranian@google.com> Cc: Stephen Brennan <stephen.s.brennan@oracle.com> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Yury Norov <yury.norov@gmail.com> Link: https://lore.kernel.org/r/20230320212248.1175731-2-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-03-21 05:22:35 +08:00
e = intel_pt_cache_lookup(dso, machine, offset);
perf intel-pt: Add support for text poke events Select text poke events when available and the kernel is being traced. Process text poke events to invalidate entries in Intel PT's instruction cache. Example: The example requires kernel config: CONFIG_PROC_SYSCTL=y CONFIG_SCHED_DEBUG=y CONFIG_SCHEDSTATS=y Before: # perf record -o perf.data.before --kcore -a -e intel_pt//k -m,64M & # cat /proc/sys/kernel/sched_schedstats 0 # echo 1 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 1 # echo 0 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 0 # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 3.341 MB perf.data.before ] [1]+ Terminated perf record -o perf.data.before --kcore -a -e intel_pt//k -m,64M # perf script -i perf.data.before --itrace=e >/dev/null Warning: 474 instruction trace errors After: # perf record -o perf.data.after --kcore -a -e intel_pt//k -m,64M & # cat /proc/sys/kernel/sched_schedstats 0 # echo 1 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 1 # echo 0 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 0 # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 2.646 MB perf.data.after ] [1]+ Terminated perf record -o perf.data.after --kcore -a -e intel_pt//k -m,64M # perf script -i perf.data.after --itrace=e >/dev/null Example: The example requires kernel config: # CONFIG_FUNCTION_TRACER is not set Before: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __schedule Added new event: probe:__schedule (on __schedule) You can now use it in all perf tools, such as: perf record -e probe:__schedule -aR sleep 1 # perf record -e probe:__schedule -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.026 MB perf.data (68 samples) ] # perf probe -d probe:__schedule Removed event: probe:__schedule # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 41.268 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null Warning: 207 instruction trace errors After: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __schedule Added new event: probe:__schedule (on __schedule) You can now use it in all perf tools, such as: perf record -e probe:__schedule -aR sleep 1 # perf record -e probe:__schedule -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.028 MB perf.data (107 samples) ] # perf probe -d probe:__schedule Removed event: probe:__schedule # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 39.978 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null # perf script -i t1 --no-itrace -D | grep 'POKE\|KSYMBOL' 6 565303693547 0x291f18 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc027a000 len 4096 type 2 flags 0x0 name kprobe_insn_page 6 565303697010 0x291f68 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027a000 old len 0 new len 6 6 565303838278 0x291fa8 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc027c000 len 4096 type 2 flags 0x0 name kprobe_optinsn_page 6 565303848286 0x291ff8 [0xa0]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027c000 old len 0 new len 106 6 565369336743 0x292af8 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffff88ab8890 old len 5 new len 5 7 566434327704 0x217c208 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffff88ab8890 old len 5 new len 5 6 566456313475 0x293198 [0xa0]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027c000 old len 106 new len 0 6 566456314935 0x293238 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027a000 old len 6 new len 0 Example: The example requires kernel config: CONFIG_FUNCTION_TRACER=y Before: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __kmalloc Added new event: probe:__kmalloc (on __kmalloc) You can now use it in all perf tools, such as: perf record -e probe:__kmalloc -aR sleep 1 # perf record -e probe:__kmalloc -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.022 MB perf.data (6 samples) ] # perf probe -d probe:__kmalloc Removed event: probe:__kmalloc # kill %1 [ perf record: Woken up 2 times to write data ] [ perf record: Captured and wrote 43.850 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null Warning: 8 instruction trace errors After: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __kmalloc Added new event: probe:__kmalloc (on __kmalloc) You can now use it in all perf tools, such as: perf record -e probe:__kmalloc -aR sleep 1 # perf record -e probe:__kmalloc -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.037 MB perf.data (206 samples) ] # perf probe -d probe:__kmalloc Removed event: probe:__kmalloc # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 41.442 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null # perf script -i t1 --no-itrace -D | grep 'POKE\|KSYMBOL' 5 312216133258 0x8bafe0 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc0360000 len 415 type 2 flags 0x0 name ftrace_trampoline 5 312216133494 0x8bb030 [0x1d8]: PERF_RECORD_TEXT_POKE addr 0xffffffffc0360000 old len 0 new len 415 5 312216229563 0x8bb208 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 5 312216239063 0x8bb248 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 5 312216727230 0x8bb288 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffabbea190 old len 5 new len 5 5 312216739322 0x8bb2c8 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 5 312216748321 0x8bb308 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 7 313287163462 0x2817430 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 7 313287174890 0x2817470 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 7 313287818979 0x28174b0 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffabbea190 old len 5 new len 5 7 313287829357 0x28174f0 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 7 313287841246 0x2817530 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Borislav Petkov <bp@alien8.de> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Mathieu Poirier <mathieu.poirier@linaro.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: x86@kernel.org Link: http://lore.kernel.org/lkml/20200512121922.8997-14-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-05-12 20:19:20 +08:00
if (!e)
continue;
if (addr + e->byte_cnt + e->length <= event->text_poke.addr) {
/*
* No overlap. Working backwards there cannot be another
* basic block that overlaps the text poke if there is a
* branch instruction before the text poke address.
*/
if (e->branch != INTEL_PT_BR_NO_BRANCH)
perf addr_location: Add init/exit/copy functions struct addr_location holds references to multiple reference counted objects. Add init/exit functions to make maintenance of those more consistent with the rest of the code and to try to avoid leaks. Modification of thread reference counts isn't included in this change. Committer notes: I needed to initialize result to sample->ip to make sure is set to something, fixing a compile time error, mostly keeping the previous logic as build_alloc_func_list() already does debugging/error prints about what went wrong if it takes the 'goto out'. Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ali Saidi <alisaidi@amazon.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Brian Robbins <brianrob@linux.microsoft.com> Cc: Changbin Du <changbin.du@huawei.com> Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: Fangrui Song <maskray@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ivan Babrou <ivan@cloudflare.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: K Prateek Nayak <kprateek.nayak@amd.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Mike Leach <mike.leach@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Steinar H. Gunderson <sesse@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Wenyu Liu <liuwenyu7@huawei.com> Cc: Will Deacon <will@kernel.org> Cc: Yang Jihong <yangjihong1@huawei.com> Cc: Ye Xingchen <ye.xingchen@zte.com.cn> Cc: Yuan Can <yuancan@huawei.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230608232823.4027869-7-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-09 07:28:03 +08:00
goto out;
perf intel-pt: Add support for text poke events Select text poke events when available and the kernel is being traced. Process text poke events to invalidate entries in Intel PT's instruction cache. Example: The example requires kernel config: CONFIG_PROC_SYSCTL=y CONFIG_SCHED_DEBUG=y CONFIG_SCHEDSTATS=y Before: # perf record -o perf.data.before --kcore -a -e intel_pt//k -m,64M & # cat /proc/sys/kernel/sched_schedstats 0 # echo 1 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 1 # echo 0 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 0 # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 3.341 MB perf.data.before ] [1]+ Terminated perf record -o perf.data.before --kcore -a -e intel_pt//k -m,64M # perf script -i perf.data.before --itrace=e >/dev/null Warning: 474 instruction trace errors After: # perf record -o perf.data.after --kcore -a -e intel_pt//k -m,64M & # cat /proc/sys/kernel/sched_schedstats 0 # echo 1 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 1 # echo 0 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 0 # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 2.646 MB perf.data.after ] [1]+ Terminated perf record -o perf.data.after --kcore -a -e intel_pt//k -m,64M # perf script -i perf.data.after --itrace=e >/dev/null Example: The example requires kernel config: # CONFIG_FUNCTION_TRACER is not set Before: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __schedule Added new event: probe:__schedule (on __schedule) You can now use it in all perf tools, such as: perf record -e probe:__schedule -aR sleep 1 # perf record -e probe:__schedule -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.026 MB perf.data (68 samples) ] # perf probe -d probe:__schedule Removed event: probe:__schedule # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 41.268 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null Warning: 207 instruction trace errors After: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __schedule Added new event: probe:__schedule (on __schedule) You can now use it in all perf tools, such as: perf record -e probe:__schedule -aR sleep 1 # perf record -e probe:__schedule -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.028 MB perf.data (107 samples) ] # perf probe -d probe:__schedule Removed event: probe:__schedule # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 39.978 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null # perf script -i t1 --no-itrace -D | grep 'POKE\|KSYMBOL' 6 565303693547 0x291f18 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc027a000 len 4096 type 2 flags 0x0 name kprobe_insn_page 6 565303697010 0x291f68 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027a000 old len 0 new len 6 6 565303838278 0x291fa8 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc027c000 len 4096 type 2 flags 0x0 name kprobe_optinsn_page 6 565303848286 0x291ff8 [0xa0]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027c000 old len 0 new len 106 6 565369336743 0x292af8 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffff88ab8890 old len 5 new len 5 7 566434327704 0x217c208 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffff88ab8890 old len 5 new len 5 6 566456313475 0x293198 [0xa0]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027c000 old len 106 new len 0 6 566456314935 0x293238 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027a000 old len 6 new len 0 Example: The example requires kernel config: CONFIG_FUNCTION_TRACER=y Before: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __kmalloc Added new event: probe:__kmalloc (on __kmalloc) You can now use it in all perf tools, such as: perf record -e probe:__kmalloc -aR sleep 1 # perf record -e probe:__kmalloc -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.022 MB perf.data (6 samples) ] # perf probe -d probe:__kmalloc Removed event: probe:__kmalloc # kill %1 [ perf record: Woken up 2 times to write data ] [ perf record: Captured and wrote 43.850 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null Warning: 8 instruction trace errors After: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __kmalloc Added new event: probe:__kmalloc (on __kmalloc) You can now use it in all perf tools, such as: perf record -e probe:__kmalloc -aR sleep 1 # perf record -e probe:__kmalloc -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.037 MB perf.data (206 samples) ] # perf probe -d probe:__kmalloc Removed event: probe:__kmalloc # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 41.442 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null # perf script -i t1 --no-itrace -D | grep 'POKE\|KSYMBOL' 5 312216133258 0x8bafe0 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc0360000 len 415 type 2 flags 0x0 name ftrace_trampoline 5 312216133494 0x8bb030 [0x1d8]: PERF_RECORD_TEXT_POKE addr 0xffffffffc0360000 old len 0 new len 415 5 312216229563 0x8bb208 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 5 312216239063 0x8bb248 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 5 312216727230 0x8bb288 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffabbea190 old len 5 new len 5 5 312216739322 0x8bb2c8 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 5 312216748321 0x8bb308 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 7 313287163462 0x2817430 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 7 313287174890 0x2817470 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 7 313287818979 0x28174b0 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffabbea190 old len 5 new len 5 7 313287829357 0x28174f0 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 7 313287841246 0x2817530 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Borislav Petkov <bp@alien8.de> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Mathieu Poirier <mathieu.poirier@linaro.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: x86@kernel.org Link: http://lore.kernel.org/lkml/20200512121922.8997-14-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-05-12 20:19:20 +08:00
} else {
perf map: Add accessor for dso Later changes will add reference count checking for struct map, with dso being the most frequently accessed variable. Add an accessor so that the reference count check is only necessary in one place. Additional changes: - add a dso variable to avoid repeated map__dso calls. - in builtin-mem.c dump_raw_samples, code only partially tested for dso == NULL. Make the possibility of NULL consistent. - in thread.c thread__memcpy fix use of spaces and use tabs. Committer notes: Did missing conversions on these files: tools/perf/arch/powerpc/util/skip-callchain-idx.c tools/perf/arch/powerpc/util/sym-handling.c tools/perf/ui/browsers/hists.c tools/perf/ui/gtk/annotate.c tools/perf/util/cs-etm.c tools/perf/util/thread.c tools/perf/util/unwind-libunwind-local.c tools/perf/util/unwind-libunwind.c Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Darren Hart <dvhart@infradead.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Dmitriy Vyukov <dvyukov@google.com> Cc: Eric Dumazet <edumazet@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Hao Luo <haoluo@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Miaoqian Lin <linmq006@gmail.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Riccardo Mancini <rickyman7@gmail.com> Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com> Cc: Song Liu <song@kernel.org> Cc: Stephane Eranian <eranian@google.com> Cc: Stephen Brennan <stephen.s.brennan@oracle.com> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Yury Norov <yury.norov@gmail.com> Link: https://lore.kernel.org/r/20230320212248.1175731-2-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-03-21 05:22:35 +08:00
intel_pt_cache_invalidate(dso, machine, offset);
perf intel-pt: Add support for text poke events Select text poke events when available and the kernel is being traced. Process text poke events to invalidate entries in Intel PT's instruction cache. Example: The example requires kernel config: CONFIG_PROC_SYSCTL=y CONFIG_SCHED_DEBUG=y CONFIG_SCHEDSTATS=y Before: # perf record -o perf.data.before --kcore -a -e intel_pt//k -m,64M & # cat /proc/sys/kernel/sched_schedstats 0 # echo 1 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 1 # echo 0 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 0 # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 3.341 MB perf.data.before ] [1]+ Terminated perf record -o perf.data.before --kcore -a -e intel_pt//k -m,64M # perf script -i perf.data.before --itrace=e >/dev/null Warning: 474 instruction trace errors After: # perf record -o perf.data.after --kcore -a -e intel_pt//k -m,64M & # cat /proc/sys/kernel/sched_schedstats 0 # echo 1 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 1 # echo 0 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 0 # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 2.646 MB perf.data.after ] [1]+ Terminated perf record -o perf.data.after --kcore -a -e intel_pt//k -m,64M # perf script -i perf.data.after --itrace=e >/dev/null Example: The example requires kernel config: # CONFIG_FUNCTION_TRACER is not set Before: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __schedule Added new event: probe:__schedule (on __schedule) You can now use it in all perf tools, such as: perf record -e probe:__schedule -aR sleep 1 # perf record -e probe:__schedule -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.026 MB perf.data (68 samples) ] # perf probe -d probe:__schedule Removed event: probe:__schedule # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 41.268 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null Warning: 207 instruction trace errors After: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __schedule Added new event: probe:__schedule (on __schedule) You can now use it in all perf tools, such as: perf record -e probe:__schedule -aR sleep 1 # perf record -e probe:__schedule -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.028 MB perf.data (107 samples) ] # perf probe -d probe:__schedule Removed event: probe:__schedule # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 39.978 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null # perf script -i t1 --no-itrace -D | grep 'POKE\|KSYMBOL' 6 565303693547 0x291f18 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc027a000 len 4096 type 2 flags 0x0 name kprobe_insn_page 6 565303697010 0x291f68 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027a000 old len 0 new len 6 6 565303838278 0x291fa8 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc027c000 len 4096 type 2 flags 0x0 name kprobe_optinsn_page 6 565303848286 0x291ff8 [0xa0]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027c000 old len 0 new len 106 6 565369336743 0x292af8 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffff88ab8890 old len 5 new len 5 7 566434327704 0x217c208 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffff88ab8890 old len 5 new len 5 6 566456313475 0x293198 [0xa0]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027c000 old len 106 new len 0 6 566456314935 0x293238 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027a000 old len 6 new len 0 Example: The example requires kernel config: CONFIG_FUNCTION_TRACER=y Before: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __kmalloc Added new event: probe:__kmalloc (on __kmalloc) You can now use it in all perf tools, such as: perf record -e probe:__kmalloc -aR sleep 1 # perf record -e probe:__kmalloc -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.022 MB perf.data (6 samples) ] # perf probe -d probe:__kmalloc Removed event: probe:__kmalloc # kill %1 [ perf record: Woken up 2 times to write data ] [ perf record: Captured and wrote 43.850 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null Warning: 8 instruction trace errors After: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __kmalloc Added new event: probe:__kmalloc (on __kmalloc) You can now use it in all perf tools, such as: perf record -e probe:__kmalloc -aR sleep 1 # perf record -e probe:__kmalloc -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.037 MB perf.data (206 samples) ] # perf probe -d probe:__kmalloc Removed event: probe:__kmalloc # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 41.442 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null # perf script -i t1 --no-itrace -D | grep 'POKE\|KSYMBOL' 5 312216133258 0x8bafe0 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc0360000 len 415 type 2 flags 0x0 name ftrace_trampoline 5 312216133494 0x8bb030 [0x1d8]: PERF_RECORD_TEXT_POKE addr 0xffffffffc0360000 old len 0 new len 415 5 312216229563 0x8bb208 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 5 312216239063 0x8bb248 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 5 312216727230 0x8bb288 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffabbea190 old len 5 new len 5 5 312216739322 0x8bb2c8 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 5 312216748321 0x8bb308 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 7 313287163462 0x2817430 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 7 313287174890 0x2817470 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 7 313287818979 0x28174b0 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffabbea190 old len 5 new len 5 7 313287829357 0x28174f0 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 7 313287841246 0x2817530 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Borislav Petkov <bp@alien8.de> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Mathieu Poirier <mathieu.poirier@linaro.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: x86@kernel.org Link: http://lore.kernel.org/lkml/20200512121922.8997-14-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-05-12 20:19:20 +08:00
intel_pt_log("Invalidated instruction cache for %s at %#"PRIx64"\n",
perf map: Add accessor for dso Later changes will add reference count checking for struct map, with dso being the most frequently accessed variable. Add an accessor so that the reference count check is only necessary in one place. Additional changes: - add a dso variable to avoid repeated map__dso calls. - in builtin-mem.c dump_raw_samples, code only partially tested for dso == NULL. Make the possibility of NULL consistent. - in thread.c thread__memcpy fix use of spaces and use tabs. Committer notes: Did missing conversions on these files: tools/perf/arch/powerpc/util/skip-callchain-idx.c tools/perf/arch/powerpc/util/sym-handling.c tools/perf/ui/browsers/hists.c tools/perf/ui/gtk/annotate.c tools/perf/util/cs-etm.c tools/perf/util/thread.c tools/perf/util/unwind-libunwind-local.c tools/perf/util/unwind-libunwind.c Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Darren Hart <dvhart@infradead.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Dmitriy Vyukov <dvyukov@google.com> Cc: Eric Dumazet <edumazet@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Hao Luo <haoluo@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Miaoqian Lin <linmq006@gmail.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Riccardo Mancini <rickyman7@gmail.com> Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com> Cc: Song Liu <song@kernel.org> Cc: Stephane Eranian <eranian@google.com> Cc: Stephen Brennan <stephen.s.brennan@oracle.com> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Yury Norov <yury.norov@gmail.com> Link: https://lore.kernel.org/r/20230320212248.1175731-2-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-03-21 05:22:35 +08:00
dso->long_name, addr);
perf intel-pt: Add support for text poke events Select text poke events when available and the kernel is being traced. Process text poke events to invalidate entries in Intel PT's instruction cache. Example: The example requires kernel config: CONFIG_PROC_SYSCTL=y CONFIG_SCHED_DEBUG=y CONFIG_SCHEDSTATS=y Before: # perf record -o perf.data.before --kcore -a -e intel_pt//k -m,64M & # cat /proc/sys/kernel/sched_schedstats 0 # echo 1 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 1 # echo 0 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 0 # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 3.341 MB perf.data.before ] [1]+ Terminated perf record -o perf.data.before --kcore -a -e intel_pt//k -m,64M # perf script -i perf.data.before --itrace=e >/dev/null Warning: 474 instruction trace errors After: # perf record -o perf.data.after --kcore -a -e intel_pt//k -m,64M & # cat /proc/sys/kernel/sched_schedstats 0 # echo 1 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 1 # echo 0 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 0 # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 2.646 MB perf.data.after ] [1]+ Terminated perf record -o perf.data.after --kcore -a -e intel_pt//k -m,64M # perf script -i perf.data.after --itrace=e >/dev/null Example: The example requires kernel config: # CONFIG_FUNCTION_TRACER is not set Before: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __schedule Added new event: probe:__schedule (on __schedule) You can now use it in all perf tools, such as: perf record -e probe:__schedule -aR sleep 1 # perf record -e probe:__schedule -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.026 MB perf.data (68 samples) ] # perf probe -d probe:__schedule Removed event: probe:__schedule # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 41.268 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null Warning: 207 instruction trace errors After: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __schedule Added new event: probe:__schedule (on __schedule) You can now use it in all perf tools, such as: perf record -e probe:__schedule -aR sleep 1 # perf record -e probe:__schedule -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.028 MB perf.data (107 samples) ] # perf probe -d probe:__schedule Removed event: probe:__schedule # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 39.978 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null # perf script -i t1 --no-itrace -D | grep 'POKE\|KSYMBOL' 6 565303693547 0x291f18 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc027a000 len 4096 type 2 flags 0x0 name kprobe_insn_page 6 565303697010 0x291f68 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027a000 old len 0 new len 6 6 565303838278 0x291fa8 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc027c000 len 4096 type 2 flags 0x0 name kprobe_optinsn_page 6 565303848286 0x291ff8 [0xa0]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027c000 old len 0 new len 106 6 565369336743 0x292af8 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffff88ab8890 old len 5 new len 5 7 566434327704 0x217c208 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffff88ab8890 old len 5 new len 5 6 566456313475 0x293198 [0xa0]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027c000 old len 106 new len 0 6 566456314935 0x293238 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027a000 old len 6 new len 0 Example: The example requires kernel config: CONFIG_FUNCTION_TRACER=y Before: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __kmalloc Added new event: probe:__kmalloc (on __kmalloc) You can now use it in all perf tools, such as: perf record -e probe:__kmalloc -aR sleep 1 # perf record -e probe:__kmalloc -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.022 MB perf.data (6 samples) ] # perf probe -d probe:__kmalloc Removed event: probe:__kmalloc # kill %1 [ perf record: Woken up 2 times to write data ] [ perf record: Captured and wrote 43.850 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null Warning: 8 instruction trace errors After: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __kmalloc Added new event: probe:__kmalloc (on __kmalloc) You can now use it in all perf tools, such as: perf record -e probe:__kmalloc -aR sleep 1 # perf record -e probe:__kmalloc -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.037 MB perf.data (206 samples) ] # perf probe -d probe:__kmalloc Removed event: probe:__kmalloc # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 41.442 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null # perf script -i t1 --no-itrace -D | grep 'POKE\|KSYMBOL' 5 312216133258 0x8bafe0 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc0360000 len 415 type 2 flags 0x0 name ftrace_trampoline 5 312216133494 0x8bb030 [0x1d8]: PERF_RECORD_TEXT_POKE addr 0xffffffffc0360000 old len 0 new len 415 5 312216229563 0x8bb208 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 5 312216239063 0x8bb248 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 5 312216727230 0x8bb288 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffabbea190 old len 5 new len 5 5 312216739322 0x8bb2c8 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 5 312216748321 0x8bb308 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 7 313287163462 0x2817430 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 7 313287174890 0x2817470 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 7 313287818979 0x28174b0 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffabbea190 old len 5 new len 5 7 313287829357 0x28174f0 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 7 313287841246 0x2817530 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Borislav Petkov <bp@alien8.de> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Mathieu Poirier <mathieu.poirier@linaro.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: x86@kernel.org Link: http://lore.kernel.org/lkml/20200512121922.8997-14-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-05-12 20:19:20 +08:00
}
}
perf addr_location: Add init/exit/copy functions struct addr_location holds references to multiple reference counted objects. Add init/exit functions to make maintenance of those more consistent with the rest of the code and to try to avoid leaks. Modification of thread reference counts isn't included in this change. Committer notes: I needed to initialize result to sample->ip to make sure is set to something, fixing a compile time error, mostly keeping the previous logic as build_alloc_func_list() already does debugging/error prints about what went wrong if it takes the 'goto out'. Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ali Saidi <alisaidi@amazon.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Brian Robbins <brianrob@linux.microsoft.com> Cc: Changbin Du <changbin.du@huawei.com> Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: Fangrui Song <maskray@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ivan Babrou <ivan@cloudflare.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: K Prateek Nayak <kprateek.nayak@amd.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Mike Leach <mike.leach@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Steinar H. Gunderson <sesse@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Wenyu Liu <liuwenyu7@huawei.com> Cc: Will Deacon <will@kernel.org> Cc: Yang Jihong <yangjihong1@huawei.com> Cc: Ye Xingchen <ye.xingchen@zte.com.cn> Cc: Yuan Can <yuancan@huawei.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230608232823.4027869-7-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-09 07:28:03 +08:00
out:
addr_location__exit(&al);
return ret;
perf intel-pt: Add support for text poke events Select text poke events when available and the kernel is being traced. Process text poke events to invalidate entries in Intel PT's instruction cache. Example: The example requires kernel config: CONFIG_PROC_SYSCTL=y CONFIG_SCHED_DEBUG=y CONFIG_SCHEDSTATS=y Before: # perf record -o perf.data.before --kcore -a -e intel_pt//k -m,64M & # cat /proc/sys/kernel/sched_schedstats 0 # echo 1 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 1 # echo 0 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 0 # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 3.341 MB perf.data.before ] [1]+ Terminated perf record -o perf.data.before --kcore -a -e intel_pt//k -m,64M # perf script -i perf.data.before --itrace=e >/dev/null Warning: 474 instruction trace errors After: # perf record -o perf.data.after --kcore -a -e intel_pt//k -m,64M & # cat /proc/sys/kernel/sched_schedstats 0 # echo 1 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 1 # echo 0 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 0 # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 2.646 MB perf.data.after ] [1]+ Terminated perf record -o perf.data.after --kcore -a -e intel_pt//k -m,64M # perf script -i perf.data.after --itrace=e >/dev/null Example: The example requires kernel config: # CONFIG_FUNCTION_TRACER is not set Before: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __schedule Added new event: probe:__schedule (on __schedule) You can now use it in all perf tools, such as: perf record -e probe:__schedule -aR sleep 1 # perf record -e probe:__schedule -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.026 MB perf.data (68 samples) ] # perf probe -d probe:__schedule Removed event: probe:__schedule # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 41.268 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null Warning: 207 instruction trace errors After: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __schedule Added new event: probe:__schedule (on __schedule) You can now use it in all perf tools, such as: perf record -e probe:__schedule -aR sleep 1 # perf record -e probe:__schedule -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.028 MB perf.data (107 samples) ] # perf probe -d probe:__schedule Removed event: probe:__schedule # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 39.978 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null # perf script -i t1 --no-itrace -D | grep 'POKE\|KSYMBOL' 6 565303693547 0x291f18 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc027a000 len 4096 type 2 flags 0x0 name kprobe_insn_page 6 565303697010 0x291f68 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027a000 old len 0 new len 6 6 565303838278 0x291fa8 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc027c000 len 4096 type 2 flags 0x0 name kprobe_optinsn_page 6 565303848286 0x291ff8 [0xa0]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027c000 old len 0 new len 106 6 565369336743 0x292af8 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffff88ab8890 old len 5 new len 5 7 566434327704 0x217c208 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffff88ab8890 old len 5 new len 5 6 566456313475 0x293198 [0xa0]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027c000 old len 106 new len 0 6 566456314935 0x293238 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027a000 old len 6 new len 0 Example: The example requires kernel config: CONFIG_FUNCTION_TRACER=y Before: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __kmalloc Added new event: probe:__kmalloc (on __kmalloc) You can now use it in all perf tools, such as: perf record -e probe:__kmalloc -aR sleep 1 # perf record -e probe:__kmalloc -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.022 MB perf.data (6 samples) ] # perf probe -d probe:__kmalloc Removed event: probe:__kmalloc # kill %1 [ perf record: Woken up 2 times to write data ] [ perf record: Captured and wrote 43.850 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null Warning: 8 instruction trace errors After: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __kmalloc Added new event: probe:__kmalloc (on __kmalloc) You can now use it in all perf tools, such as: perf record -e probe:__kmalloc -aR sleep 1 # perf record -e probe:__kmalloc -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.037 MB perf.data (206 samples) ] # perf probe -d probe:__kmalloc Removed event: probe:__kmalloc # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 41.442 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null # perf script -i t1 --no-itrace -D | grep 'POKE\|KSYMBOL' 5 312216133258 0x8bafe0 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc0360000 len 415 type 2 flags 0x0 name ftrace_trampoline 5 312216133494 0x8bb030 [0x1d8]: PERF_RECORD_TEXT_POKE addr 0xffffffffc0360000 old len 0 new len 415 5 312216229563 0x8bb208 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 5 312216239063 0x8bb248 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 5 312216727230 0x8bb288 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffabbea190 old len 5 new len 5 5 312216739322 0x8bb2c8 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 5 312216748321 0x8bb308 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 7 313287163462 0x2817430 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 7 313287174890 0x2817470 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 7 313287818979 0x28174b0 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffabbea190 old len 5 new len 5 7 313287829357 0x28174f0 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 7 313287841246 0x2817530 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Borislav Petkov <bp@alien8.de> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Mathieu Poirier <mathieu.poirier@linaro.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: x86@kernel.org Link: http://lore.kernel.org/lkml/20200512121922.8997-14-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-05-12 20:19:20 +08:00
}
static int intel_pt_process_event(struct perf_session *session,
union perf_event *event,
struct perf_sample *sample,
struct perf_tool *tool)
{
struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
auxtrace);
u64 timestamp;
int err = 0;
if (dump_trace)
return 0;
if (!tool->ordered_events) {
pr_err("Intel Processor Trace requires ordered events\n");
return -EINVAL;
}
if (sample->time && sample->time != (u64)-1)
timestamp = perf_time_to_tsc(sample->time, &pt->tc);
else
timestamp = 0;
if (timestamp || pt->timeless_decoding) {
err = intel_pt_update_queues(pt);
if (err)
return err;
}
if (pt->timeless_decoding) {
if (pt->sampling_mode) {
if (sample->aux_sample.size)
err = intel_pt_process_timeless_sample(pt,
sample);
} else if (event->header.type == PERF_RECORD_EXIT) {
err = intel_pt_process_timeless_queues(pt,
perf tools: Fix use of wrong event when processing exit events In a couple of cases the 'comm' member of 'union event' has been used instead of the correct member ('fork') when processing exit events. In the cases where it has been used incorrectly, only the 'pid' and 'tid' are affected. The 'pid' value would be correct anyway because it is in the same position in 'comm' and 'fork' events, but the 'tid' would have been incorrectly assigned from 'ppid'. However, for exit events, the kernel puts the current task in the 'ppid' and 'ttid' which is the same as the exiting task. That is 'ppid' == 'pid' and if the task is not multi-threaded, 'pid' == 'tid' i.e. the data goes wrong only when tracing multi-threaded programs. It is hard to find an example of how this would produce an error in practice. There are 3 occurences of the fix: 1. perf script is only affected if !sample_id_all which only happens on old kernels. 2. intel_pt is only affected when decoding without timestamps and would probably still decode correctly - the exit event is only used to flush out data which anyway gets flushed at the end of the session 3. intel_bts also uses the exit event to flush data which would probably not cause errors as it would get flushed at the end of the session instead Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lkml.kernel.org/r/1439888825-27708-1-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-08-18 17:07:05 +08:00
event->fork.tid,
sample->time);
}
} else if (timestamp) {
if (!pt->first_timestamp)
intel_pt_first_timestamp(pt, timestamp);
err = intel_pt_process_queues(pt, timestamp);
}
if (err)
return err;
perf intel-pt: Add support for synthesizing callchains for regular events Currently, callchains can be synthesized only for synthesized events. Support also synthesizing callchains for regular events. Example: # perf record --kcore --aux-sample -e '{intel_pt//,cycles}' -c 10000 uname Linux [ perf record: Woken up 3 times to write data ] [ perf record: Captured and wrote 0.532 MB perf.data ] # perf script --itrace=Ge | head -20 uname 4864 2419025.358181: 10000 cycles: ffffffffbba56965 apparmor_bprm_committing_creds+0x35 ([kernel.kallsyms]) ffffffffbc400cd5 __indirect_thunk_start+0x5 ([kernel.kallsyms]) ffffffffbba07422 security_bprm_committing_creds+0x22 ([kernel.kallsyms]) ffffffffbb89805d install_exec_creds+0xd ([kernel.kallsyms]) ffffffffbb90d9ac load_elf_binary+0x3ac ([kernel.kallsyms]) uname 4864 2419025.358185: 10000 cycles: ffffffffbba56db0 apparmor_bprm_committed_creds+0x20 ([kernel.kallsyms]) ffffffffbc400cd5 __indirect_thunk_start+0x5 ([kernel.kallsyms]) ffffffffbba07452 security_bprm_committed_creds+0x22 ([kernel.kallsyms]) ffffffffbb89809a install_exec_creds+0x4a ([kernel.kallsyms]) ffffffffbb90d9ac load_elf_binary+0x3ac ([kernel.kallsyms]) uname 4864 2419025.358189: 10000 cycles: ffffffffbb86fdf6 vma_adjust_trans_huge+0x6 ([kernel.kallsyms]) ffffffffbb821660 __vma_adjust+0x160 ([kernel.kallsyms]) ffffffffbb897be7 shift_arg_pages+0x97 ([kernel.kallsyms]) ffffffffbb897ed9 setup_arg_pages+0x1e9 ([kernel.kallsyms]) ffffffffbb90d9f2 load_elf_binary+0x3f2 ([kernel.kallsyms]) Committer testing: # perf record --kcore --aux-sample -e '{intel_pt//,cycles}' -c 10000 uname Linux [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.233 MB perf.data ] # Then, before this patch: # perf script --itrace=Ge | head -20 uname 28642 168664.856384: 10000 cycles: ffffffff9810aeaa commit_creds+0x2a ([kernel.kallsyms]) uname 28642 168664.856388: 10000 cycles: ffffffff982a24f1 mprotect_fixup+0x151 ([kernel.kallsyms]) uname 28642 168664.856392: 10000 cycles: ffffffff982a385b move_page_tables+0xbcb ([kernel.kallsyms]) uname 28642 168664.856396: 10000 cycles: ffffffff982fd4ec __mod_memcg_state+0x1c ([kernel.kallsyms]) uname 28642 168664.856400: 10000 cycles: ffffffff9829fddd do_mmap+0xfd ([kernel.kallsyms]) uname 28642 168664.856404: 10000 cycles: ffffffff9829c879 __vma_adjust+0x479 ([kernel.kallsyms]) uname 28642 168664.856408: 10000 cycles: ffffffff98238e94 __perf_addr_filters_adjust+0x34 ([kernel.kallsyms]) uname 28642 168664.856412: 10000 cycles: ffffffff98a38e0b down_write+0x1b ([kernel.kallsyms]) uname 28642 168664.856416: 10000 cycles: ffffffff983006a0 memcg_kmem_get_cache+0x0 ([kernel.kallsyms]) uname 28642 168664.856421: 10000 cycles: ffffffff98396eaf load_elf_binary+0x92f ([kernel.kallsyms]) uname 28642 168664.856425: 10000 cycles: ffffffff982e0222 kfree+0x62 ([kernel.kallsyms]) uname 28642 168664.856428: 10000 cycles: ffffffff9846dfd4 file_has_perm+0x54 ([kernel.kallsyms]) uname 28642 168664.856433: 10000 cycles: ffffffff98288911 vma_interval_tree_insert+0x51 ([kernel.kallsyms]) uname 28642 168664.856437: 10000 cycles: ffffffff9823e577 perf_event_mmap_output+0x27 ([kernel.kallsyms]) uname 28642 168664.856441: 10000 cycles: ffffffff98a26fa0 xas_load+0x40 ([kernel.kallsyms]) uname 28642 168664.856445: 10000 cycles: ffffffff98004f30 arch_setup_additional_pages+0x0 ([kernel.kallsyms]) uname 28642 168664.856448: 10000 cycles: ffffffff98a297c0 copy_user_generic_unrolled+0xa0 ([kernel.kallsyms]) uname 28642 168664.856452: 10000 cycles: ffffffff9853a87a strnlen_user+0x10a ([kernel.kallsyms]) uname 28642 168664.856456: 10000 cycles: ffffffff986638a7 randomize_page+0x27 ([kernel.kallsyms]) uname 28642 168664.856460: 10000 cycles: ffffffff98a3b645 _raw_spin_lock+0x5 ([kernel.kallsyms]) # And after: # perf script --itrace=Ge | head -20 uname 28642 168664.856384: 10000 cycles: ffffffff9810aeaa commit_creds+0x2a ([kernel.kallsyms]) ffffffff9831fe87 install_exec_creds+0x17 ([kernel.kallsyms]) ffffffff983968d9 load_elf_binary+0x359 ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) uname 28642 168664.856388: 10000 cycles: ffffffff982a24f1 mprotect_fixup+0x151 ([kernel.kallsyms]) ffffffff9831fa83 setup_arg_pages+0x123 ([kernel.kallsyms]) ffffffff9839691f load_elf_binary+0x39f ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) uname 28642 168664.856392: 10000 cycles: ffffffff982a385b move_page_tables+0xbcb ([kernel.kallsyms]) ffffffff9831f889 shift_arg_pages+0xa9 ([kernel.kallsyms]) ffffffff9831fb4f setup_arg_pages+0x1ef ([kernel.kallsyms]) ffffffff9839691f load_elf_binary+0x39f ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) # Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lore.kernel.org/lkml/20200401101613.6201-12-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-04-01 18:16:08 +08:00
if (event->header.type == PERF_RECORD_SAMPLE) {
if (pt->synth_opts.add_callchain && !sample->callchain)
intel_pt_add_callchain(pt, sample);
perf intel-pt: Add support for synthesizing branch stacks for regular events Use the new thread_stack__br_sample_late() function to create a thread stack for regular events. Example: # perf record --kcore --aux-sample -e '{intel_pt//,cycles:ppp}' -c 10000 uname Linux [ perf record: Woken up 2 times to write data ] [ perf record: Captured and wrote 0.743 MB perf.data ] # perf report --itrace=Le --stdio | head -30 | tail -18 # Samples: 11K of event 'cycles:ppp' # Event count (approx.): 11648 # # Overhead Command Source Shared Object Source Symbol Target Symbol Basic Block Cycles # ........ ....... .................... ............................ ............................ .................. # 5.49% uname libc-2.30.so [.] _dl_addr [.] _dl_addr - 2.41% uname ld-2.30.so [.] _dl_relocate_object [.] _dl_relocate_object - 2.31% uname ld-2.30.so [.] do_lookup_x [.] do_lookup_x - 2.17% uname [kernel.kallsyms] [k] unmap_page_range [k] unmap_page_range - 2.05% uname ld-2.30.so [k] _dl_start [k] _dl_start - 1.97% uname ld-2.30.so [.] _dl_lookup_symbol_x [.] _dl_lookup_symbol_x - 1.94% uname [kernel.kallsyms] [k] filemap_map_pages [k] filemap_map_pages - 1.60% uname [kernel.kallsyms] [k] __handle_mm_fault [k] __handle_mm_fault - 1.44% uname [kernel.kallsyms] [k] page_add_file_rmap [k] page_add_file_rmap - 1.12% uname [kernel.kallsyms] [k] vma_interval_tree_insert [k] vma_interval_tree_insert - 0.94% uname [kernel.kallsyms] [k] perf_iterate_ctx [k] perf_iterate_ctx - Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lore.kernel.org/lkml/20200429150751.12570-8-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-04-29 23:07:49 +08:00
if (pt->synth_opts.add_last_branch && !sample->branch_stack)
intel_pt_add_br_stack(pt, sample);
perf intel-pt: Add support for synthesizing callchains for regular events Currently, callchains can be synthesized only for synthesized events. Support also synthesizing callchains for regular events. Example: # perf record --kcore --aux-sample -e '{intel_pt//,cycles}' -c 10000 uname Linux [ perf record: Woken up 3 times to write data ] [ perf record: Captured and wrote 0.532 MB perf.data ] # perf script --itrace=Ge | head -20 uname 4864 2419025.358181: 10000 cycles: ffffffffbba56965 apparmor_bprm_committing_creds+0x35 ([kernel.kallsyms]) ffffffffbc400cd5 __indirect_thunk_start+0x5 ([kernel.kallsyms]) ffffffffbba07422 security_bprm_committing_creds+0x22 ([kernel.kallsyms]) ffffffffbb89805d install_exec_creds+0xd ([kernel.kallsyms]) ffffffffbb90d9ac load_elf_binary+0x3ac ([kernel.kallsyms]) uname 4864 2419025.358185: 10000 cycles: ffffffffbba56db0 apparmor_bprm_committed_creds+0x20 ([kernel.kallsyms]) ffffffffbc400cd5 __indirect_thunk_start+0x5 ([kernel.kallsyms]) ffffffffbba07452 security_bprm_committed_creds+0x22 ([kernel.kallsyms]) ffffffffbb89809a install_exec_creds+0x4a ([kernel.kallsyms]) ffffffffbb90d9ac load_elf_binary+0x3ac ([kernel.kallsyms]) uname 4864 2419025.358189: 10000 cycles: ffffffffbb86fdf6 vma_adjust_trans_huge+0x6 ([kernel.kallsyms]) ffffffffbb821660 __vma_adjust+0x160 ([kernel.kallsyms]) ffffffffbb897be7 shift_arg_pages+0x97 ([kernel.kallsyms]) ffffffffbb897ed9 setup_arg_pages+0x1e9 ([kernel.kallsyms]) ffffffffbb90d9f2 load_elf_binary+0x3f2 ([kernel.kallsyms]) Committer testing: # perf record --kcore --aux-sample -e '{intel_pt//,cycles}' -c 10000 uname Linux [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.233 MB perf.data ] # Then, before this patch: # perf script --itrace=Ge | head -20 uname 28642 168664.856384: 10000 cycles: ffffffff9810aeaa commit_creds+0x2a ([kernel.kallsyms]) uname 28642 168664.856388: 10000 cycles: ffffffff982a24f1 mprotect_fixup+0x151 ([kernel.kallsyms]) uname 28642 168664.856392: 10000 cycles: ffffffff982a385b move_page_tables+0xbcb ([kernel.kallsyms]) uname 28642 168664.856396: 10000 cycles: ffffffff982fd4ec __mod_memcg_state+0x1c ([kernel.kallsyms]) uname 28642 168664.856400: 10000 cycles: ffffffff9829fddd do_mmap+0xfd ([kernel.kallsyms]) uname 28642 168664.856404: 10000 cycles: ffffffff9829c879 __vma_adjust+0x479 ([kernel.kallsyms]) uname 28642 168664.856408: 10000 cycles: ffffffff98238e94 __perf_addr_filters_adjust+0x34 ([kernel.kallsyms]) uname 28642 168664.856412: 10000 cycles: ffffffff98a38e0b down_write+0x1b ([kernel.kallsyms]) uname 28642 168664.856416: 10000 cycles: ffffffff983006a0 memcg_kmem_get_cache+0x0 ([kernel.kallsyms]) uname 28642 168664.856421: 10000 cycles: ffffffff98396eaf load_elf_binary+0x92f ([kernel.kallsyms]) uname 28642 168664.856425: 10000 cycles: ffffffff982e0222 kfree+0x62 ([kernel.kallsyms]) uname 28642 168664.856428: 10000 cycles: ffffffff9846dfd4 file_has_perm+0x54 ([kernel.kallsyms]) uname 28642 168664.856433: 10000 cycles: ffffffff98288911 vma_interval_tree_insert+0x51 ([kernel.kallsyms]) uname 28642 168664.856437: 10000 cycles: ffffffff9823e577 perf_event_mmap_output+0x27 ([kernel.kallsyms]) uname 28642 168664.856441: 10000 cycles: ffffffff98a26fa0 xas_load+0x40 ([kernel.kallsyms]) uname 28642 168664.856445: 10000 cycles: ffffffff98004f30 arch_setup_additional_pages+0x0 ([kernel.kallsyms]) uname 28642 168664.856448: 10000 cycles: ffffffff98a297c0 copy_user_generic_unrolled+0xa0 ([kernel.kallsyms]) uname 28642 168664.856452: 10000 cycles: ffffffff9853a87a strnlen_user+0x10a ([kernel.kallsyms]) uname 28642 168664.856456: 10000 cycles: ffffffff986638a7 randomize_page+0x27 ([kernel.kallsyms]) uname 28642 168664.856460: 10000 cycles: ffffffff98a3b645 _raw_spin_lock+0x5 ([kernel.kallsyms]) # And after: # perf script --itrace=Ge | head -20 uname 28642 168664.856384: 10000 cycles: ffffffff9810aeaa commit_creds+0x2a ([kernel.kallsyms]) ffffffff9831fe87 install_exec_creds+0x17 ([kernel.kallsyms]) ffffffff983968d9 load_elf_binary+0x359 ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) uname 28642 168664.856388: 10000 cycles: ffffffff982a24f1 mprotect_fixup+0x151 ([kernel.kallsyms]) ffffffff9831fa83 setup_arg_pages+0x123 ([kernel.kallsyms]) ffffffff9839691f load_elf_binary+0x39f ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) uname 28642 168664.856392: 10000 cycles: ffffffff982a385b move_page_tables+0xbcb ([kernel.kallsyms]) ffffffff9831f889 shift_arg_pages+0xa9 ([kernel.kallsyms]) ffffffff9831fb4f setup_arg_pages+0x1ef ([kernel.kallsyms]) ffffffff9839691f load_elf_binary+0x39f ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) # Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lore.kernel.org/lkml/20200401101613.6201-12-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-04-01 18:16:08 +08:00
}
if (event->header.type == PERF_RECORD_AUX &&
(event->aux.flags & PERF_AUX_FLAG_TRUNCATED) &&
pt->synth_opts.errors) {
err = intel_pt_lost(pt, sample);
if (err)
return err;
}
perf build: Use libtraceevent from the system Remove the LIBTRACEEVENT_DYNAMIC and LIBTRACEFS_DYNAMIC make command line variables. If libtraceevent isn't installed or NO_LIBTRACEEVENT=1 is passed to the build, don't compile in libtraceevent and libtracefs support. This also disables CONFIG_TRACE that controls "perf trace". CONFIG_LIBTRACEEVENT is used to control enablement in Build/Makefiles, HAVE_LIBTRACEEVENT is used in C code. Without HAVE_LIBTRACEEVENT tracepoints are disabled and as such the commands kmem, kwork, lock, sched and timechart are removed. The majority of commands continue to work including "perf test". Committer notes: Fixed up a tools/perf/util/Build reject and added: #include <traceevent/event-parse.h> to tools/perf/util/scripting-engines/trace-event-perl.c. Committer testing: $ rpm -qi libtraceevent-devel Name : libtraceevent-devel Version : 1.5.3 Release : 2.fc36 Architecture: x86_64 Install Date: Mon 25 Jul 2022 03:20:19 PM -03 Group : Unspecified Size : 27728 License : LGPLv2+ and GPLv2+ Signature : RSA/SHA256, Fri 15 Apr 2022 02:11:58 PM -03, Key ID 999f7cbf38ab71f4 Source RPM : libtraceevent-1.5.3-2.fc36.src.rpm Build Date : Fri 15 Apr 2022 10:57:01 AM -03 Build Host : buildvm-x86-05.iad2.fedoraproject.org Packager : Fedora Project Vendor : Fedora Project URL : https://git.kernel.org/pub/scm/libs/libtrace/libtraceevent.git/ Bug URL : https://bugz.fedoraproject.org/libtraceevent Summary : Development headers of libtraceevent Description : Development headers of libtraceevent-libs $ Default build: $ ldd ~/bin/perf | grep tracee libtraceevent.so.1 => /lib64/libtraceevent.so.1 (0x00007f1dcaf8f000) $ # perf trace -e sched:* --max-events 10 0.000 migration/0/17 sched:sched_migrate_task(comm: "", pid: 1603763 (perf), prio: 120, dest_cpu: 1) 0.005 migration/0/17 sched:sched_wake_idle_without_ipi(cpu: 1) 0.011 migration/0/17 sched:sched_switch(prev_comm: "", prev_pid: 17 (migration/0), prev_state: 1, next_comm: "", next_prio: 120) 1.173 :0/0 sched:sched_wakeup(comm: "", pid: 3138 (gnome-terminal-), prio: 120) 1.180 :0/0 sched:sched_switch(prev_comm: "", prev_prio: 120, next_comm: "", next_pid: 3138 (gnome-terminal-), next_prio: 120) 0.156 migration/1/21 sched:sched_migrate_task(comm: "", pid: 1603763 (perf), prio: 120, orig_cpu: 1, dest_cpu: 2) 0.160 migration/1/21 sched:sched_wake_idle_without_ipi(cpu: 2) 0.166 migration/1/21 sched:sched_switch(prev_comm: "", prev_pid: 21 (migration/1), prev_state: 1, next_comm: "", next_prio: 120) 1.183 :0/0 sched:sched_wakeup(comm: "", pid: 1602985 (kworker/u16:0-f), prio: 120, target_cpu: 1) 1.186 :0/0 sched:sched_switch(prev_comm: "", prev_prio: 120, next_comm: "", next_pid: 1602985 (kworker/u16:0-f), next_prio: 120) # Had to tweak tools/perf/util/setup.py to make sure the python binding shared object links with libtraceevent if -DHAVE_LIBTRACEEVENT is present in CFLAGS. Building with NO_LIBTRACEEVENT=1 uncovered some more build failures: - Make building of data-convert-bt.c to CONFIG_LIBTRACEEVENT=y - perf-$(CONFIG_LIBTRACEEVENT) += scripts/ - bpf_kwork.o needs also to be dependent on CONFIG_LIBTRACEEVENT=y - The python binding needed some fixups and util/trace-event.c can't be built and linked with the python binding shared object, so remove it in tools/perf/util/setup.py and exclude it from the list of dependencies in the python/perf.so Makefile.perf target. Building without libtraceevent-devel installed uncovered more build failures: - The python binding tools/perf/util/python.c was assuming that traceevent/parse-events.h was always available, which was the case when we defaulted to using the in-kernel tools/lib/traceevent/ files, now we need to enclose it under ifdef HAVE_LIBTRACEEVENT, just like the other parts of it that deal with tracepoints. - We have to ifdef the rules in the Build files with CONFIG_LIBTRACEEVENT=y to build builtin-trace.c and tools/perf/trace/beauty/ as we only ifdef setting CONFIG_TRACE=y when setting NO_LIBTRACEEVENT=1 in the make command line, not when we don't detect libtraceevent-devel installed in the system. Simplification here to avoid these two ways of disabling builtin-trace.c and not having CONFIG_TRACE=y when libtraceevent-devel isn't installed is the clean way. From Athira: <quote> tools/perf/arch/powerpc/util/Build -perf-y += kvm-stat.o +perf-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o </quote> Then, ditto for arm64 and s390, detected by container cross build tests. - s/390 uses test__checkevent_tracepoint() that is now only available if HAVE_LIBTRACEEVENT is defined, enclose the callsite with ifder HAVE_LIBTRACEEVENT. Also from Athira: <quote> With this change, I could successfully compile in these environment: - Without libtraceevent-devel installed - With libtraceevent-devel installed - With “make NO_LIBTRACEEVENT=1” </quote> Then, finally rename CONFIG_TRACEEVENT to CONFIG_LIBTRACEEVENT for consistency with other libraries detected in tools/perf/. Signed-off-by: Ian Rogers <irogers@google.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Tested-by: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Nick Desaulniers <ndesaulniers@google.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: bpf@vger.kernel.org Link: http://lore.kernel.org/lkml/20221205225940.3079667-3-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-12-06 06:59:39 +08:00
#ifdef HAVE_LIBTRACEEVENT
if (pt->switch_evsel && event->header.type == PERF_RECORD_SAMPLE)
err = intel_pt_process_switch(pt, sample);
perf build: Use libtraceevent from the system Remove the LIBTRACEEVENT_DYNAMIC and LIBTRACEFS_DYNAMIC make command line variables. If libtraceevent isn't installed or NO_LIBTRACEEVENT=1 is passed to the build, don't compile in libtraceevent and libtracefs support. This also disables CONFIG_TRACE that controls "perf trace". CONFIG_LIBTRACEEVENT is used to control enablement in Build/Makefiles, HAVE_LIBTRACEEVENT is used in C code. Without HAVE_LIBTRACEEVENT tracepoints are disabled and as such the commands kmem, kwork, lock, sched and timechart are removed. The majority of commands continue to work including "perf test". Committer notes: Fixed up a tools/perf/util/Build reject and added: #include <traceevent/event-parse.h> to tools/perf/util/scripting-engines/trace-event-perl.c. Committer testing: $ rpm -qi libtraceevent-devel Name : libtraceevent-devel Version : 1.5.3 Release : 2.fc36 Architecture: x86_64 Install Date: Mon 25 Jul 2022 03:20:19 PM -03 Group : Unspecified Size : 27728 License : LGPLv2+ and GPLv2+ Signature : RSA/SHA256, Fri 15 Apr 2022 02:11:58 PM -03, Key ID 999f7cbf38ab71f4 Source RPM : libtraceevent-1.5.3-2.fc36.src.rpm Build Date : Fri 15 Apr 2022 10:57:01 AM -03 Build Host : buildvm-x86-05.iad2.fedoraproject.org Packager : Fedora Project Vendor : Fedora Project URL : https://git.kernel.org/pub/scm/libs/libtrace/libtraceevent.git/ Bug URL : https://bugz.fedoraproject.org/libtraceevent Summary : Development headers of libtraceevent Description : Development headers of libtraceevent-libs $ Default build: $ ldd ~/bin/perf | grep tracee libtraceevent.so.1 => /lib64/libtraceevent.so.1 (0x00007f1dcaf8f000) $ # perf trace -e sched:* --max-events 10 0.000 migration/0/17 sched:sched_migrate_task(comm: "", pid: 1603763 (perf), prio: 120, dest_cpu: 1) 0.005 migration/0/17 sched:sched_wake_idle_without_ipi(cpu: 1) 0.011 migration/0/17 sched:sched_switch(prev_comm: "", prev_pid: 17 (migration/0), prev_state: 1, next_comm: "", next_prio: 120) 1.173 :0/0 sched:sched_wakeup(comm: "", pid: 3138 (gnome-terminal-), prio: 120) 1.180 :0/0 sched:sched_switch(prev_comm: "", prev_prio: 120, next_comm: "", next_pid: 3138 (gnome-terminal-), next_prio: 120) 0.156 migration/1/21 sched:sched_migrate_task(comm: "", pid: 1603763 (perf), prio: 120, orig_cpu: 1, dest_cpu: 2) 0.160 migration/1/21 sched:sched_wake_idle_without_ipi(cpu: 2) 0.166 migration/1/21 sched:sched_switch(prev_comm: "", prev_pid: 21 (migration/1), prev_state: 1, next_comm: "", next_prio: 120) 1.183 :0/0 sched:sched_wakeup(comm: "", pid: 1602985 (kworker/u16:0-f), prio: 120, target_cpu: 1) 1.186 :0/0 sched:sched_switch(prev_comm: "", prev_prio: 120, next_comm: "", next_pid: 1602985 (kworker/u16:0-f), next_prio: 120) # Had to tweak tools/perf/util/setup.py to make sure the python binding shared object links with libtraceevent if -DHAVE_LIBTRACEEVENT is present in CFLAGS. Building with NO_LIBTRACEEVENT=1 uncovered some more build failures: - Make building of data-convert-bt.c to CONFIG_LIBTRACEEVENT=y - perf-$(CONFIG_LIBTRACEEVENT) += scripts/ - bpf_kwork.o needs also to be dependent on CONFIG_LIBTRACEEVENT=y - The python binding needed some fixups and util/trace-event.c can't be built and linked with the python binding shared object, so remove it in tools/perf/util/setup.py and exclude it from the list of dependencies in the python/perf.so Makefile.perf target. Building without libtraceevent-devel installed uncovered more build failures: - The python binding tools/perf/util/python.c was assuming that traceevent/parse-events.h was always available, which was the case when we defaulted to using the in-kernel tools/lib/traceevent/ files, now we need to enclose it under ifdef HAVE_LIBTRACEEVENT, just like the other parts of it that deal with tracepoints. - We have to ifdef the rules in the Build files with CONFIG_LIBTRACEEVENT=y to build builtin-trace.c and tools/perf/trace/beauty/ as we only ifdef setting CONFIG_TRACE=y when setting NO_LIBTRACEEVENT=1 in the make command line, not when we don't detect libtraceevent-devel installed in the system. Simplification here to avoid these two ways of disabling builtin-trace.c and not having CONFIG_TRACE=y when libtraceevent-devel isn't installed is the clean way. From Athira: <quote> tools/perf/arch/powerpc/util/Build -perf-y += kvm-stat.o +perf-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o </quote> Then, ditto for arm64 and s390, detected by container cross build tests. - s/390 uses test__checkevent_tracepoint() that is now only available if HAVE_LIBTRACEEVENT is defined, enclose the callsite with ifder HAVE_LIBTRACEEVENT. Also from Athira: <quote> With this change, I could successfully compile in these environment: - Without libtraceevent-devel installed - With libtraceevent-devel installed - With “make NO_LIBTRACEEVENT=1” </quote> Then, finally rename CONFIG_TRACEEVENT to CONFIG_LIBTRACEEVENT for consistency with other libraries detected in tools/perf/. Signed-off-by: Ian Rogers <irogers@google.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Tested-by: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Nick Desaulniers <ndesaulniers@google.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: bpf@vger.kernel.org Link: http://lore.kernel.org/lkml/20221205225940.3079667-3-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-12-06 06:59:39 +08:00
else
#endif
if (event->header.type == PERF_RECORD_ITRACE_START)
err = intel_pt_process_itrace_start(pt, event, sample);
else if (event->header.type == PERF_RECORD_AUX_OUTPUT_HW_ID)
err = intel_pt_process_aux_output_hw_id(pt, event, sample);
else if (event->header.type == PERF_RECORD_SWITCH ||
event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
err = intel_pt_context_switch(pt, event, sample);
perf intel-pt: Add support for text poke events Select text poke events when available and the kernel is being traced. Process text poke events to invalidate entries in Intel PT's instruction cache. Example: The example requires kernel config: CONFIG_PROC_SYSCTL=y CONFIG_SCHED_DEBUG=y CONFIG_SCHEDSTATS=y Before: # perf record -o perf.data.before --kcore -a -e intel_pt//k -m,64M & # cat /proc/sys/kernel/sched_schedstats 0 # echo 1 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 1 # echo 0 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 0 # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 3.341 MB perf.data.before ] [1]+ Terminated perf record -o perf.data.before --kcore -a -e intel_pt//k -m,64M # perf script -i perf.data.before --itrace=e >/dev/null Warning: 474 instruction trace errors After: # perf record -o perf.data.after --kcore -a -e intel_pt//k -m,64M & # cat /proc/sys/kernel/sched_schedstats 0 # echo 1 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 1 # echo 0 > /proc/sys/kernel/sched_schedstats # cat /proc/sys/kernel/sched_schedstats 0 # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 2.646 MB perf.data.after ] [1]+ Terminated perf record -o perf.data.after --kcore -a -e intel_pt//k -m,64M # perf script -i perf.data.after --itrace=e >/dev/null Example: The example requires kernel config: # CONFIG_FUNCTION_TRACER is not set Before: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __schedule Added new event: probe:__schedule (on __schedule) You can now use it in all perf tools, such as: perf record -e probe:__schedule -aR sleep 1 # perf record -e probe:__schedule -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.026 MB perf.data (68 samples) ] # perf probe -d probe:__schedule Removed event: probe:__schedule # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 41.268 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null Warning: 207 instruction trace errors After: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __schedule Added new event: probe:__schedule (on __schedule) You can now use it in all perf tools, such as: perf record -e probe:__schedule -aR sleep 1 # perf record -e probe:__schedule -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.028 MB perf.data (107 samples) ] # perf probe -d probe:__schedule Removed event: probe:__schedule # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 39.978 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null # perf script -i t1 --no-itrace -D | grep 'POKE\|KSYMBOL' 6 565303693547 0x291f18 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc027a000 len 4096 type 2 flags 0x0 name kprobe_insn_page 6 565303697010 0x291f68 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027a000 old len 0 new len 6 6 565303838278 0x291fa8 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc027c000 len 4096 type 2 flags 0x0 name kprobe_optinsn_page 6 565303848286 0x291ff8 [0xa0]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027c000 old len 0 new len 106 6 565369336743 0x292af8 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffff88ab8890 old len 5 new len 5 7 566434327704 0x217c208 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffff88ab8890 old len 5 new len 5 6 566456313475 0x293198 [0xa0]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027c000 old len 106 new len 0 6 566456314935 0x293238 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffc027a000 old len 6 new len 0 Example: The example requires kernel config: CONFIG_FUNCTION_TRACER=y Before: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __kmalloc Added new event: probe:__kmalloc (on __kmalloc) You can now use it in all perf tools, such as: perf record -e probe:__kmalloc -aR sleep 1 # perf record -e probe:__kmalloc -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.022 MB perf.data (6 samples) ] # perf probe -d probe:__kmalloc Removed event: probe:__kmalloc # kill %1 [ perf record: Woken up 2 times to write data ] [ perf record: Captured and wrote 43.850 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null Warning: 8 instruction trace errors After: # perf record --kcore -m,64M -o t1 -a -e intel_pt//k & # perf probe __kmalloc Added new event: probe:__kmalloc (on __kmalloc) You can now use it in all perf tools, such as: perf record -e probe:__kmalloc -aR sleep 1 # perf record -e probe:__kmalloc -aR sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.037 MB perf.data (206 samples) ] # perf probe -d probe:__kmalloc Removed event: probe:__kmalloc # kill %1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 41.442 MB t1 ] [1]+ Terminated perf record --kcore -m,64M -o t1 -a -e intel_pt//k # perf script -i t1 --itrace=e >/dev/null # perf script -i t1 --no-itrace -D | grep 'POKE\|KSYMBOL' 5 312216133258 0x8bafe0 [0x50]: PERF_RECORD_KSYMBOL addr ffffffffc0360000 len 415 type 2 flags 0x0 name ftrace_trampoline 5 312216133494 0x8bb030 [0x1d8]: PERF_RECORD_TEXT_POKE addr 0xffffffffc0360000 old len 0 new len 415 5 312216229563 0x8bb208 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 5 312216239063 0x8bb248 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 5 312216727230 0x8bb288 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffabbea190 old len 5 new len 5 5 312216739322 0x8bb2c8 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 5 312216748321 0x8bb308 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 7 313287163462 0x2817430 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 7 313287174890 0x2817470 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 7 313287818979 0x28174b0 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffabbea190 old len 5 new len 5 7 313287829357 0x28174f0 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac6016f5 old len 5 new len 5 7 313287841246 0x2817530 [0x40]: PERF_RECORD_TEXT_POKE addr 0xffffffffac601803 old len 5 new len 5 Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Borislav Petkov <bp@alien8.de> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Mathieu Poirier <mathieu.poirier@linaro.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: x86@kernel.org Link: http://lore.kernel.org/lkml/20200512121922.8997-14-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-05-12 20:19:20 +08:00
if (!err && event->header.type == PERF_RECORD_TEXT_POKE)
err = intel_pt_text_poke(pt, event);
if (intel_pt_enable_logging && intel_pt_log_events(pt, sample->time)) {
intel_pt_log("event %u: cpu %d time %"PRIu64" tsc %#"PRIx64" ",
event->header.type, sample->cpu, sample->time, timestamp);
intel_pt_log_event(event);
}
return err;
}
static int intel_pt_flush(struct perf_session *session, struct perf_tool *tool)
{
struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
auxtrace);
int ret;
if (dump_trace)
return 0;
if (!tool->ordered_events)
return -EINVAL;
ret = intel_pt_update_queues(pt);
if (ret < 0)
return ret;
if (pt->timeless_decoding)
return intel_pt_process_timeless_queues(pt, -1,
MAX_TIMESTAMP - 1);
return intel_pt_process_queues(pt, MAX_TIMESTAMP);
}
static void intel_pt_free_events(struct perf_session *session)
{
struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
auxtrace);
struct auxtrace_queues *queues = &pt->queues;
unsigned int i;
for (i = 0; i < queues->nr_queues; i++) {
intel_pt_free_queue(queues->queue_array[i].priv);
queues->queue_array[i].priv = NULL;
}
intel_pt_log_disable();
auxtrace_queues__free(queues);
}
static void intel_pt_free(struct perf_session *session)
{
struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
auxtrace);
auxtrace_heap__free(&pt->heap);
intel_pt_free_events(session);
session->auxtrace = NULL;
intel_pt_free_vmcs_info(pt);
thread__put(pt->unknown_thread);
addr_filters__exit(&pt->filts);
perf intel-pt: Add support for synthesizing callchains for regular events Currently, callchains can be synthesized only for synthesized events. Support also synthesizing callchains for regular events. Example: # perf record --kcore --aux-sample -e '{intel_pt//,cycles}' -c 10000 uname Linux [ perf record: Woken up 3 times to write data ] [ perf record: Captured and wrote 0.532 MB perf.data ] # perf script --itrace=Ge | head -20 uname 4864 2419025.358181: 10000 cycles: ffffffffbba56965 apparmor_bprm_committing_creds+0x35 ([kernel.kallsyms]) ffffffffbc400cd5 __indirect_thunk_start+0x5 ([kernel.kallsyms]) ffffffffbba07422 security_bprm_committing_creds+0x22 ([kernel.kallsyms]) ffffffffbb89805d install_exec_creds+0xd ([kernel.kallsyms]) ffffffffbb90d9ac load_elf_binary+0x3ac ([kernel.kallsyms]) uname 4864 2419025.358185: 10000 cycles: ffffffffbba56db0 apparmor_bprm_committed_creds+0x20 ([kernel.kallsyms]) ffffffffbc400cd5 __indirect_thunk_start+0x5 ([kernel.kallsyms]) ffffffffbba07452 security_bprm_committed_creds+0x22 ([kernel.kallsyms]) ffffffffbb89809a install_exec_creds+0x4a ([kernel.kallsyms]) ffffffffbb90d9ac load_elf_binary+0x3ac ([kernel.kallsyms]) uname 4864 2419025.358189: 10000 cycles: ffffffffbb86fdf6 vma_adjust_trans_huge+0x6 ([kernel.kallsyms]) ffffffffbb821660 __vma_adjust+0x160 ([kernel.kallsyms]) ffffffffbb897be7 shift_arg_pages+0x97 ([kernel.kallsyms]) ffffffffbb897ed9 setup_arg_pages+0x1e9 ([kernel.kallsyms]) ffffffffbb90d9f2 load_elf_binary+0x3f2 ([kernel.kallsyms]) Committer testing: # perf record --kcore --aux-sample -e '{intel_pt//,cycles}' -c 10000 uname Linux [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.233 MB perf.data ] # Then, before this patch: # perf script --itrace=Ge | head -20 uname 28642 168664.856384: 10000 cycles: ffffffff9810aeaa commit_creds+0x2a ([kernel.kallsyms]) uname 28642 168664.856388: 10000 cycles: ffffffff982a24f1 mprotect_fixup+0x151 ([kernel.kallsyms]) uname 28642 168664.856392: 10000 cycles: ffffffff982a385b move_page_tables+0xbcb ([kernel.kallsyms]) uname 28642 168664.856396: 10000 cycles: ffffffff982fd4ec __mod_memcg_state+0x1c ([kernel.kallsyms]) uname 28642 168664.856400: 10000 cycles: ffffffff9829fddd do_mmap+0xfd ([kernel.kallsyms]) uname 28642 168664.856404: 10000 cycles: ffffffff9829c879 __vma_adjust+0x479 ([kernel.kallsyms]) uname 28642 168664.856408: 10000 cycles: ffffffff98238e94 __perf_addr_filters_adjust+0x34 ([kernel.kallsyms]) uname 28642 168664.856412: 10000 cycles: ffffffff98a38e0b down_write+0x1b ([kernel.kallsyms]) uname 28642 168664.856416: 10000 cycles: ffffffff983006a0 memcg_kmem_get_cache+0x0 ([kernel.kallsyms]) uname 28642 168664.856421: 10000 cycles: ffffffff98396eaf load_elf_binary+0x92f ([kernel.kallsyms]) uname 28642 168664.856425: 10000 cycles: ffffffff982e0222 kfree+0x62 ([kernel.kallsyms]) uname 28642 168664.856428: 10000 cycles: ffffffff9846dfd4 file_has_perm+0x54 ([kernel.kallsyms]) uname 28642 168664.856433: 10000 cycles: ffffffff98288911 vma_interval_tree_insert+0x51 ([kernel.kallsyms]) uname 28642 168664.856437: 10000 cycles: ffffffff9823e577 perf_event_mmap_output+0x27 ([kernel.kallsyms]) uname 28642 168664.856441: 10000 cycles: ffffffff98a26fa0 xas_load+0x40 ([kernel.kallsyms]) uname 28642 168664.856445: 10000 cycles: ffffffff98004f30 arch_setup_additional_pages+0x0 ([kernel.kallsyms]) uname 28642 168664.856448: 10000 cycles: ffffffff98a297c0 copy_user_generic_unrolled+0xa0 ([kernel.kallsyms]) uname 28642 168664.856452: 10000 cycles: ffffffff9853a87a strnlen_user+0x10a ([kernel.kallsyms]) uname 28642 168664.856456: 10000 cycles: ffffffff986638a7 randomize_page+0x27 ([kernel.kallsyms]) uname 28642 168664.856460: 10000 cycles: ffffffff98a3b645 _raw_spin_lock+0x5 ([kernel.kallsyms]) # And after: # perf script --itrace=Ge | head -20 uname 28642 168664.856384: 10000 cycles: ffffffff9810aeaa commit_creds+0x2a ([kernel.kallsyms]) ffffffff9831fe87 install_exec_creds+0x17 ([kernel.kallsyms]) ffffffff983968d9 load_elf_binary+0x359 ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) uname 28642 168664.856388: 10000 cycles: ffffffff982a24f1 mprotect_fixup+0x151 ([kernel.kallsyms]) ffffffff9831fa83 setup_arg_pages+0x123 ([kernel.kallsyms]) ffffffff9839691f load_elf_binary+0x39f ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) uname 28642 168664.856392: 10000 cycles: ffffffff982a385b move_page_tables+0xbcb ([kernel.kallsyms]) ffffffff9831f889 shift_arg_pages+0xa9 ([kernel.kallsyms]) ffffffff9831fb4f setup_arg_pages+0x1ef ([kernel.kallsyms]) ffffffff9839691f load_elf_binary+0x39f ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) # Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lore.kernel.org/lkml/20200401101613.6201-12-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-04-01 18:16:08 +08:00
zfree(&pt->chain);
zfree(&pt->filter);
zfree(&pt->time_ranges);
perf intel-pt: Fix missed put and leak Add missing put and free, detected with leak sanitizer. Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ali Saidi <alisaidi@amazon.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Brian Robbins <brianrob@linux.microsoft.com> Cc: Changbin Du <changbin.du@huawei.com> Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: Fangrui Song <maskray@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ivan Babrou <ivan@cloudflare.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: K Prateek Nayak <kprateek.nayak@amd.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Mike Leach <mike.leach@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Steinar H. Gunderson <sesse@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Wenyu Liu <liuwenyu7@huawei.com> Cc: Will Deacon <will@kernel.org> Cc: Yang Jihong <yangjihong1@huawei.com> Cc: Ye Xingchen <ye.xingchen@zte.com.cn> Cc: Yuan Can <yuancan@huawei.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230608232823.4027869-13-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-06-09 07:28:09 +08:00
zfree(&pt->br_stack);
free(pt);
}
static bool intel_pt_evsel_is_auxtrace(struct perf_session *session,
struct evsel *evsel)
{
struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
auxtrace);
return evsel->core.attr.type == pt->pmu_type;
}
static int intel_pt_process_auxtrace_event(struct perf_session *session,
union perf_event *event,
struct perf_tool *tool __maybe_unused)
{
struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
auxtrace);
if (!pt->data_queued) {
struct auxtrace_buffer *buffer;
off_t data_offset;
int fd = perf_data__fd(session->data);
int err;
if (perf_data__is_pipe(session->data)) {
data_offset = 0;
} else {
data_offset = lseek(fd, 0, SEEK_CUR);
if (data_offset == -1)
return -errno;
}
err = auxtrace_queues__add_event(&pt->queues, session, event,
data_offset, &buffer);
if (err)
return err;
/* Dump here now we have copied a piped trace out of the pipe */
if (dump_trace) {
if (auxtrace_buffer__get_data(buffer, fd)) {
intel_pt_dump_event(pt, buffer->data,
buffer->size);
auxtrace_buffer__put_data(buffer);
}
}
}
return 0;
}
static int intel_pt_queue_data(struct perf_session *session,
struct perf_sample *sample,
union perf_event *event, u64 data_offset)
{
struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
auxtrace);
u64 timestamp;
if (event) {
return auxtrace_queues__add_event(&pt->queues, session, event,
data_offset, NULL);
}
if (sample->time && sample->time != (u64)-1)
timestamp = perf_time_to_tsc(sample->time, &pt->tc);
else
timestamp = 0;
return auxtrace_queues__add_sample(&pt->queues, session, sample,
data_offset, timestamp);
}
struct intel_pt_synth {
struct perf_tool dummy_tool;
struct perf_session *session;
};
static int intel_pt_event_synth(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
{
struct intel_pt_synth *intel_pt_synth =
container_of(tool, struct intel_pt_synth, dummy_tool);
return perf_session__deliver_synth_event(intel_pt_synth->session, event,
NULL);
}
static int intel_pt_synth_event(struct perf_session *session, const char *name,
struct perf_event_attr *attr, u64 id)
{
struct intel_pt_synth intel_pt_synth;
int err;
pr_debug("Synthesizing '%s' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
name, id, (u64)attr->sample_type);
memset(&intel_pt_synth, 0, sizeof(struct intel_pt_synth));
intel_pt_synth.session = session;
err = perf_event__synthesize_attr(&intel_pt_synth.dummy_tool, attr, 1,
&id, intel_pt_event_synth);
if (err)
pr_err("%s: failed to synthesize '%s' event type\n",
__func__, name);
return err;
}
static void intel_pt_set_event_name(struct evlist *evlist, u64 id,
const char *name)
{
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
if (evsel->core.id && evsel->core.id[0] == id) {
if (evsel->name)
zfree(&evsel->name);
evsel->name = strdup(name);
break;
}
}
}
static struct evsel *intel_pt_evsel(struct intel_pt *pt,
struct evlist *evlist)
{
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
if (evsel->core.attr.type == pt->pmu_type && evsel->core.ids)
return evsel;
}
return NULL;
}
static int intel_pt_synth_events(struct intel_pt *pt,
struct perf_session *session)
{
struct evlist *evlist = session->evlist;
struct evsel *evsel = intel_pt_evsel(pt, evlist);
struct perf_event_attr attr;
u64 id;
int err;
if (!evsel) {
pr_debug("There are no selected events with Intel Processor Trace data\n");
return 0;
}
memset(&attr, 0, sizeof(struct perf_event_attr));
attr.size = sizeof(struct perf_event_attr);
attr.type = PERF_TYPE_HARDWARE;
libperf: Move perf_event_attr field from perf's evsel to libperf's perf_evsel Move the perf_event_attr struct fron 'struct evsel' to 'struct perf_evsel'. Committer notes: Fixed up these: tools/perf/arch/arm/util/auxtrace.c tools/perf/arch/arm/util/cs-etm.c tools/perf/arch/arm64/util/arm-spe.c tools/perf/arch/s390/util/auxtrace.c tools/perf/util/cs-etm.c Also cc1: warnings being treated as errors tests/sample-parsing.c: In function 'do_test': tests/sample-parsing.c:162: error: missing initializer tests/sample-parsing.c:162: error: (near initialization for 'evsel.core.cpus') struct evsel evsel = { .needs_swap = false, - .core.attr = { - .sample_type = sample_type, - .read_format = read_format, + .core = { + . attr = { + .sample_type = sample_type, + .read_format = read_format, + }, [perfbuilder@a70e4eeb5549 /]$ gcc --version |& head -1 gcc (GCC) 4.4.7 Also we don't need to include perf_event.h in tools/perf/lib/include/perf/evsel.h, forward declaring 'struct perf_event_attr' is enough. And this even fixes the build in some systems where things are used somewhere down the include path from perf_event.h without defining __always_inline. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Budankov <alexey.budankov@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Michael Petlan <mpetlan@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20190721112506.12306-43-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-07-21 19:24:29 +08:00
attr.sample_type = evsel->core.attr.sample_type & PERF_SAMPLE_MASK;
attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
PERF_SAMPLE_PERIOD;
if (pt->timeless_decoding)
attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
else
attr.sample_type |= PERF_SAMPLE_TIME;
if (!pt->per_cpu_mmaps)
attr.sample_type &= ~(u64)PERF_SAMPLE_CPU;
libperf: Move perf_event_attr field from perf's evsel to libperf's perf_evsel Move the perf_event_attr struct fron 'struct evsel' to 'struct perf_evsel'. Committer notes: Fixed up these: tools/perf/arch/arm/util/auxtrace.c tools/perf/arch/arm/util/cs-etm.c tools/perf/arch/arm64/util/arm-spe.c tools/perf/arch/s390/util/auxtrace.c tools/perf/util/cs-etm.c Also cc1: warnings being treated as errors tests/sample-parsing.c: In function 'do_test': tests/sample-parsing.c:162: error: missing initializer tests/sample-parsing.c:162: error: (near initialization for 'evsel.core.cpus') struct evsel evsel = { .needs_swap = false, - .core.attr = { - .sample_type = sample_type, - .read_format = read_format, + .core = { + . attr = { + .sample_type = sample_type, + .read_format = read_format, + }, [perfbuilder@a70e4eeb5549 /]$ gcc --version |& head -1 gcc (GCC) 4.4.7 Also we don't need to include perf_event.h in tools/perf/lib/include/perf/evsel.h, forward declaring 'struct perf_event_attr' is enough. And this even fixes the build in some systems where things are used somewhere down the include path from perf_event.h without defining __always_inline. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Budankov <alexey.budankov@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Michael Petlan <mpetlan@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20190721112506.12306-43-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-07-21 19:24:29 +08:00
attr.exclude_user = evsel->core.attr.exclude_user;
attr.exclude_kernel = evsel->core.attr.exclude_kernel;
attr.exclude_hv = evsel->core.attr.exclude_hv;
attr.exclude_host = evsel->core.attr.exclude_host;
attr.exclude_guest = evsel->core.attr.exclude_guest;
attr.sample_id_all = evsel->core.attr.sample_id_all;
attr.read_format = evsel->core.attr.read_format;
id = evsel->core.id[0] + 1000000000;
if (!id)
id = 1;
if (pt->synth_opts.branches) {
attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
attr.sample_period = 1;
attr.sample_type |= PERF_SAMPLE_ADDR;
err = intel_pt_synth_event(session, "branches", &attr, id);
if (err)
return err;
pt->sample_branches = true;
pt->branches_sample_type = attr.sample_type;
pt->branches_id = id;
id += 1;
attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
}
if (pt->synth_opts.callchain)
attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
if (pt->synth_opts.last_branch) {
attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
/*
* We don't use the hardware index, but the sample generation
* code uses the new format branch_stack with this field,
* so the event attributes must indicate that it's present.
*/
attr.branch_sample_type |= PERF_SAMPLE_BRANCH_HW_INDEX;
}
if (pt->synth_opts.instructions) {
attr.config = PERF_COUNT_HW_INSTRUCTIONS;
if (pt->synth_opts.period_type == PERF_ITRACE_PERIOD_NANOSECS)
attr.sample_period =
intel_pt_ns_to_ticks(pt, pt->synth_opts.period);
else
attr.sample_period = pt->synth_opts.period;
err = intel_pt_synth_event(session, "instructions", &attr, id);
if (err)
return err;
pt->sample_instructions = true;
pt->instructions_sample_type = attr.sample_type;
pt->instructions_id = id;
id += 1;
}
perf intel-pt: Synthesize cycle events There is no good reason why we cannot synthesize "cycle" events from Intel PT just as we can synthesize "instruction" events, in particular when CYC packets are available. This enables using PT to getting much more accurate cycle profiles than regular sampling (record -e cycles) when the work last for very short periods (<10 ms). Thus, add support for this, based off of the existing IPC calculation framework. The new option to --itrace is "y" (for cYcles), as c was taken for calls. Cycle and instruction events can be synthesized together, and are by default. The only real caveat is that CYC packets are only emitted whenever some other packet is, which in practice is when a branch instruction is encountered (and not even all branches). Thus, even at no subsampling (e.g. --itrace=y0ns), it is impossible to get more accuracy than a single basic block, and all cycles spent executing that block will get attributed to the branch instruction that ends the packet. Thus, one cannot know whether the cycles came from e.g. a specific load, a mispredicted branch, or something else. When subsampling (which is the default), the cycle events will get smeared out even more, but will still be generally useful to attribute cycle counts to functions. Reviewed-by: Adrian Hunter <adrian.hunter@intel.com> Signed-off-by: Steinar H. Gunderson <sesse@google.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: https://lore.kernel.org/r/20220322082452.1429091-1-sesse@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-03-22 16:24:52 +08:00
if (pt->synth_opts.cycles) {
attr.config = PERF_COUNT_HW_CPU_CYCLES;
if (pt->synth_opts.period_type == PERF_ITRACE_PERIOD_NANOSECS)
attr.sample_period =
intel_pt_ns_to_ticks(pt, pt->synth_opts.period);
else
attr.sample_period = pt->synth_opts.period;
err = intel_pt_synth_event(session, "cycles", &attr, id);
if (err)
return err;
pt->sample_cycles = true;
pt->cycles_sample_type = attr.sample_type;
pt->cycles_id = id;
id += 1;
}
attr.sample_type &= ~(u64)PERF_SAMPLE_PERIOD;
attr.sample_period = 1;
if (pt->synth_opts.transactions) {
attr.config = PERF_COUNT_HW_INSTRUCTIONS;
err = intel_pt_synth_event(session, "transactions", &attr, id);
if (err)
return err;
pt->sample_transactions = true;
pt->transactions_sample_type = attr.sample_type;
pt->transactions_id = id;
intel_pt_set_event_name(evlist, id, "transactions");
id += 1;
}
attr.type = PERF_TYPE_SYNTH;
attr.sample_type |= PERF_SAMPLE_RAW;
if (pt->synth_opts.ptwrites) {
attr.config = PERF_SYNTH_INTEL_PTWRITE;
err = intel_pt_synth_event(session, "ptwrite", &attr, id);
if (err)
return err;
pt->sample_ptwrites = true;
pt->ptwrites_sample_type = attr.sample_type;
pt->ptwrites_id = id;
intel_pt_set_event_name(evlist, id, "ptwrite");
id += 1;
}
if (pt->synth_opts.pwr_events) {
pt->sample_pwr_events = true;
pt->pwr_events_sample_type = attr.sample_type;
attr.config = PERF_SYNTH_INTEL_CBR;
err = intel_pt_synth_event(session, "cbr", &attr, id);
if (err)
return err;
pt->cbr_id = id;
intel_pt_set_event_name(evlist, id, "cbr");
id += 1;
perf intel-pt: Add PSB events Emitting a PSB+ can cause a CPU a slight delay. When doing timing analysis of code with Intel PT, it is useful to know if a timing bubble was caused by Intel PT or not. Add reporting of PSB events via perf script. PSB events are printed with the existing itrace 'p' option which also prints power and frequency changes. The PSB event contains the trace offset at which the PSB occurs, to allow easy reference back to the PSB+ packets. The PSB event timestamp is always the timestamp from the PSB+ TSC packet, and the ip is always the address from the PSB+ FUP packet. The code changes are non-trivial because the decoder must walk to the PSB+ FUP address before outputting the PSB event. Example: $ perf record -e intel_pt/cyc,psb_period=0/u uname Linux [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.046 MB perf.data ] $ perf script --itrace=p --ns perf 17981 [006] 25617.510820383: psb: psb offs: 0 0 [unknown] ([unknown]) perf 17981 [006] 25617.510820383: cbr: cbr: 42 freq: 4219 MHz (156%) 0 [unknown] ([unknown]) uname 17981 [006] 25617.510889753: psb: psb offs: 0xb50 7f78c12a212e __GI___tunables_init+0xee (/usr/lib/x86_64-linux-gnu/ld-2.31.so) uname 17981 [006] 25617.510899162: psb: psb offs: 0x12d0 7f78c128af1c dl_main+0x93c (/usr/lib/x86_64-linux-gnu/ld-2.31.so) uname 17981 [006] 25617.510939242: psb: psb offs: 0x1a50 7f78c128eefc _dl_map_object_from_fd+0x13c (/usr/lib/x86_64-linux-gnu/ld-2.31.so) uname 17981 [006] 25617.510981274: psb: psb offs: 0x21c8 7f78c1296307 _dl_relocate_object+0x927 (/usr/lib/x86_64-linux-gnu/ld-2.31.so) uname 17981 [006] 25617.510993034: psb: psb offs: 0x2948 7f78c12940e4 _dl_lookup_symbol_x+0x14 (/usr/lib/x86_64-linux-gnu/ld-2.31.so) uname 17981 [006] 25617.511003871: psb: psb offs: 0x30c8 7f78c12937b3 do_lookup_x+0x2f3 (/usr/lib/x86_64-linux-gnu/ld-2.31.so) uname 17981 [006] 25617.511019854: psb: psb offs: 0x3850 7f78c1295eed _dl_relocate_object+0x50d (/usr/lib/x86_64-linux-gnu/ld-2.31.so) uname 17981 [006] 25617.511029015: psb: psb offs: 0x4390 7f78c12a855a strcmp+0xf6a (/usr/lib/x86_64-linux-gnu/ld-2.31.so) uname 17981 [006] 25617.511064876: psb: psb offs: 0x4b10 0 [unknown] ([unknown]) uname 17981 [006] 25617.511080762: psb: psb offs: 0x5290 7f78c11db53d _dl_addr+0x13d (/usr/lib/x86_64-linux-gnu/libc-2.31.so) uname 17981 [006] 25617.511086035: psb: psb offs: 0x5a08 7f78c11db538 _dl_addr+0x138 (/usr/lib/x86_64-linux-gnu/libc-2.31.so) uname 17981 [006] 25617.511091381: psb: psb offs: 0x6190 7f78c11db534 _dl_addr+0x134 (/usr/lib/x86_64-linux-gnu/libc-2.31.so) uname 17981 [006] 25617.511096681: psb: psb offs: 0x6910 7f78c11db4c3 _dl_addr+0xc3 (/usr/lib/x86_64-linux-gnu/libc-2.31.so) uname 17981 [006] 25617.511119520: psb: psb offs: 0x7090 7f78c10ada5e _nl_intern_locale_data+0x12e (/usr/lib/x86_64-linux-gnu/libc-2.31.so) uname 17981 [006] 25617.511126584: psb: psb offs: 0x7818 7f78c10ada50 _nl_intern_locale_data+0x120 (/usr/lib/x86_64-linux-gnu/libc-2.31.so) uname 17981 [006] 25617.511132775: psb: psb offs: 0x8358 7f78c10c20c0 getenv+0xa0 (/usr/lib/x86_64-linux-gnu/libc-2.31.so) uname 17981 [006] 25617.511134598: psb: psb offs: 0x8ad0 7f78c10ada09 _nl_intern_locale_data+0xd9 (/usr/lib/x86_64-linux-gnu/libc-2.31.so) uname 17981 [006] 25617.511135685: psb: psb offs: 0x9258 7f78c10ada50 _nl_intern_locale_data+0x120 (/usr/lib/x86_64-linux-gnu/libc-2.31.so) uname 17981 [006] 25617.511138322: psb: psb offs: 0x99d0 7f78c11fffd9 __strncmp_avx2+0x39 (/usr/lib/x86_64-linux-gnu/libc-2.31.so) uname 17981 [006] 25617.511158907: psb: psb offs: 0xa150 0 [unknown] ([unknown]) Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Reviewed-by: Andi Kleen <ak@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: https://lore.kernel.org/r/20210205175350.23817-5-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2021-02-06 01:53:50 +08:00
attr.config = PERF_SYNTH_INTEL_PSB;
err = intel_pt_synth_event(session, "psb", &attr, id);
if (err)
return err;
pt->psb_id = id;
intel_pt_set_event_name(evlist, id, "psb");
id += 1;
}
if (pt->synth_opts.pwr_events && (evsel->core.attr.config & INTEL_PT_CFG_PWR_EVT_EN)) {
attr.config = PERF_SYNTH_INTEL_MWAIT;
err = intel_pt_synth_event(session, "mwait", &attr, id);
if (err)
return err;
pt->mwait_id = id;
intel_pt_set_event_name(evlist, id, "mwait");
id += 1;
attr.config = PERF_SYNTH_INTEL_PWRE;
err = intel_pt_synth_event(session, "pwre", &attr, id);
if (err)
return err;
pt->pwre_id = id;
intel_pt_set_event_name(evlist, id, "pwre");
id += 1;
attr.config = PERF_SYNTH_INTEL_EXSTOP;
err = intel_pt_synth_event(session, "exstop", &attr, id);
if (err)
return err;
pt->exstop_id = id;
intel_pt_set_event_name(evlist, id, "exstop");
id += 1;
attr.config = PERF_SYNTH_INTEL_PWRX;
err = intel_pt_synth_event(session, "pwrx", &attr, id);
if (err)
return err;
pt->pwrx_id = id;
intel_pt_set_event_name(evlist, id, "pwrx");
id += 1;
}
if (pt->synth_opts.intr_events && (evsel->core.attr.config & INTEL_PT_CFG_EVT_EN)) {
attr.config = PERF_SYNTH_INTEL_EVT;
err = intel_pt_synth_event(session, "evt", &attr, id);
if (err)
return err;
pt->evt_sample_type = attr.sample_type;
pt->evt_id = id;
intel_pt_set_event_name(evlist, id, "evt");
id += 1;
}
if (pt->synth_opts.intr_events && pt->cap_event_trace) {
attr.config = PERF_SYNTH_INTEL_IFLAG_CHG;
err = intel_pt_synth_event(session, "iflag", &attr, id);
if (err)
return err;
pt->iflag_chg_sample_type = attr.sample_type;
pt->iflag_chg_id = id;
intel_pt_set_event_name(evlist, id, "iflag");
id += 1;
}
return 0;
}
static void intel_pt_setup_pebs_events(struct intel_pt *pt)
{
struct evsel *evsel;
if (!pt->synth_opts.other_events)
return;
evlist__for_each_entry(pt->session->evlist, evsel) {
if (evsel->core.attr.aux_output && evsel->core.id) {
if (pt->single_pebs) {
pt->single_pebs = false;
return;
}
pt->single_pebs = true;
pt->sample_pebs = true;
pt->pebs_evsel = evsel;
}
}
}
static struct evsel *intel_pt_find_sched_switch(struct evlist *evlist)
{
struct evsel *evsel;
evlist__for_each_entry_reverse(evlist, evsel) {
const char *name = evsel__name(evsel);
if (!strcmp(name, "sched:sched_switch"))
return evsel;
}
return NULL;
}
static bool intel_pt_find_switch(struct evlist *evlist)
{
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
libperf: Move perf_event_attr field from perf's evsel to libperf's perf_evsel Move the perf_event_attr struct fron 'struct evsel' to 'struct perf_evsel'. Committer notes: Fixed up these: tools/perf/arch/arm/util/auxtrace.c tools/perf/arch/arm/util/cs-etm.c tools/perf/arch/arm64/util/arm-spe.c tools/perf/arch/s390/util/auxtrace.c tools/perf/util/cs-etm.c Also cc1: warnings being treated as errors tests/sample-parsing.c: In function 'do_test': tests/sample-parsing.c:162: error: missing initializer tests/sample-parsing.c:162: error: (near initialization for 'evsel.core.cpus') struct evsel evsel = { .needs_swap = false, - .core.attr = { - .sample_type = sample_type, - .read_format = read_format, + .core = { + . attr = { + .sample_type = sample_type, + .read_format = read_format, + }, [perfbuilder@a70e4eeb5549 /]$ gcc --version |& head -1 gcc (GCC) 4.4.7 Also we don't need to include perf_event.h in tools/perf/lib/include/perf/evsel.h, forward declaring 'struct perf_event_attr' is enough. And this even fixes the build in some systems where things are used somewhere down the include path from perf_event.h without defining __always_inline. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Budankov <alexey.budankov@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Michael Petlan <mpetlan@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20190721112506.12306-43-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-07-21 19:24:29 +08:00
if (evsel->core.attr.context_switch)
return true;
}
return false;
}
perf intel-pt: Add mispred-all config option to aid use with autofdo autofdo incorrectly expects branch flags to include either mispred or predicted. In fact mispred = predicted = 0 is valid and means the flags are not supported, which they aren't by Intel PT. To make autofdo work, add a config option which will cause Intel PT decoder to set the mispred flag on all branches. Below is an example of using Intel PT with autofdo. The example is also added to the Intel PT documentation. It requires autofdo (https://github.com/google/autofdo) and gcc version 5. The bubble sort example is from the AutoFDO tutorial (https://gcc.gnu.org/wiki/AutoFDO/Tutorial) amended to take the number of elements as a parameter. $ gcc-5 -O3 sort.c -o sort_optimized $ ./sort_optimized 30000 Bubble sorting array of 30000 elements 2254 ms $ cat ~/.perfconfig [intel-pt] mispred-all $ perf record -e intel_pt//u ./sort 3000 Bubble sorting array of 3000 elements 58 ms [ perf record: Woken up 2 times to write data ] [ perf record: Captured and wrote 3.939 MB perf.data ] $ perf inject -i perf.data -o inj --itrace=i100usle --strip $ ./create_gcov --binary=./sort --profile=inj --gcov=sort.gcov -gcov_version=1 $ gcc-5 -O3 -fauto-profile=sort.gcov sort.c -o sort_autofdo $ ./sort_autofdo 30000 Bubble sorting array of 30000 elements 2155 ms Note there is currently no advantage to using Intel PT instead of LBR, but that may change in the future if greater use is made of the data. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lkml.kernel.org/r/1443186956-18718-26-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-09-25 21:15:56 +08:00
static int intel_pt_perf_config(const char *var, const char *value, void *data)
{
struct intel_pt *pt = data;
if (!strcmp(var, "intel-pt.mispred-all"))
pt->mispred_all = perf_config_bool(var, value);
if (!strcmp(var, "intel-pt.max-loops"))
perf_config_int(&pt->max_loops, var, value);
perf intel-pt: Add mispred-all config option to aid use with autofdo autofdo incorrectly expects branch flags to include either mispred or predicted. In fact mispred = predicted = 0 is valid and means the flags are not supported, which they aren't by Intel PT. To make autofdo work, add a config option which will cause Intel PT decoder to set the mispred flag on all branches. Below is an example of using Intel PT with autofdo. The example is also added to the Intel PT documentation. It requires autofdo (https://github.com/google/autofdo) and gcc version 5. The bubble sort example is from the AutoFDO tutorial (https://gcc.gnu.org/wiki/AutoFDO/Tutorial) amended to take the number of elements as a parameter. $ gcc-5 -O3 sort.c -o sort_optimized $ ./sort_optimized 30000 Bubble sorting array of 30000 elements 2254 ms $ cat ~/.perfconfig [intel-pt] mispred-all $ perf record -e intel_pt//u ./sort 3000 Bubble sorting array of 3000 elements 58 ms [ perf record: Woken up 2 times to write data ] [ perf record: Captured and wrote 3.939 MB perf.data ] $ perf inject -i perf.data -o inj --itrace=i100usle --strip $ ./create_gcov --binary=./sort --profile=inj --gcov=sort.gcov -gcov_version=1 $ gcc-5 -O3 -fauto-profile=sort.gcov sort.c -o sort_autofdo $ ./sort_autofdo 30000 Bubble sorting array of 30000 elements 2155 ms Note there is currently no advantage to using Intel PT instead of LBR, but that may change in the future if greater use is made of the data. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lkml.kernel.org/r/1443186956-18718-26-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-09-25 21:15:56 +08:00
return 0;
}
/* Find least TSC which converts to ns or later */
static u64 intel_pt_tsc_start(u64 ns, struct intel_pt *pt)
{
u64 tsc, tm;
tsc = perf_time_to_tsc(ns, &pt->tc);
while (1) {
tm = tsc_to_perf_time(tsc, &pt->tc);
if (tm < ns)
break;
tsc -= 1;
}
while (tm < ns)
tm = tsc_to_perf_time(++tsc, &pt->tc);
return tsc;
}
/* Find greatest TSC which converts to ns or earlier */
static u64 intel_pt_tsc_end(u64 ns, struct intel_pt *pt)
{
u64 tsc, tm;
tsc = perf_time_to_tsc(ns, &pt->tc);
while (1) {
tm = tsc_to_perf_time(tsc, &pt->tc);
if (tm > ns)
break;
tsc += 1;
}
while (tm > ns)
tm = tsc_to_perf_time(--tsc, &pt->tc);
return tsc;
}
static int intel_pt_setup_time_ranges(struct intel_pt *pt,
struct itrace_synth_opts *opts)
{
struct perf_time_interval *p = opts->ptime_range;
int n = opts->range_num;
int i;
if (!n || !p || pt->timeless_decoding)
return 0;
pt->time_ranges = calloc(n, sizeof(struct range));
if (!pt->time_ranges)
return -ENOMEM;
pt->range_cnt = n;
intel_pt_log("%s: %u range(s)\n", __func__, n);
for (i = 0; i < n; i++) {
struct range *r = &pt->time_ranges[i];
u64 ts = p[i].start;
u64 te = p[i].end;
/*
* Take care to ensure the TSC range matches the perf-time range
* when converted back to perf-time.
*/
r->start = ts ? intel_pt_tsc_start(ts, pt) : 0;
r->end = te ? intel_pt_tsc_end(te, pt) : 0;
intel_pt_log("range %d: perf time interval: %"PRIu64" to %"PRIu64"\n",
i, ts, te);
intel_pt_log("range %d: TSC time interval: %#"PRIx64" to %#"PRIx64"\n",
i, r->start, r->end);
}
return 0;
}
static int intel_pt_parse_vm_tm_corr_arg(struct intel_pt *pt, char **args)
{
struct intel_pt_vmcs_info *vmcs_info;
u64 tsc_offset, vmcs;
char *p = *args;
errno = 0;
p = skip_spaces(p);
if (!*p)
return 1;
tsc_offset = strtoull(p, &p, 0);
if (errno)
return -errno;
p = skip_spaces(p);
if (*p != ':') {
pt->dflt_tsc_offset = tsc_offset;
*args = p;
return 0;
}
p += 1;
while (1) {
vmcs = strtoull(p, &p, 0);
if (errno)
return -errno;
if (!vmcs)
return -EINVAL;
vmcs_info = intel_pt_findnew_vmcs(&pt->vmcs_info, vmcs, tsc_offset);
if (!vmcs_info)
return -ENOMEM;
p = skip_spaces(p);
if (*p != ',')
break;
p += 1;
}
*args = p;
return 0;
}
static int intel_pt_parse_vm_tm_corr_args(struct intel_pt *pt)
{
char *args = pt->synth_opts.vm_tm_corr_args;
int ret;
if (!args)
return 0;
do {
ret = intel_pt_parse_vm_tm_corr_arg(pt, &args);
} while (!ret);
if (ret < 0) {
pr_err("Failed to parse VM Time Correlation options\n");
return ret;
}
return 0;
}
static const char * const intel_pt_info_fmts[] = {
[INTEL_PT_PMU_TYPE] = " PMU Type %"PRId64"\n",
[INTEL_PT_TIME_SHIFT] = " Time Shift %"PRIu64"\n",
[INTEL_PT_TIME_MULT] = " Time Muliplier %"PRIu64"\n",
[INTEL_PT_TIME_ZERO] = " Time Zero %"PRIu64"\n",
[INTEL_PT_CAP_USER_TIME_ZERO] = " Cap Time Zero %"PRId64"\n",
[INTEL_PT_TSC_BIT] = " TSC bit %#"PRIx64"\n",
[INTEL_PT_NORETCOMP_BIT] = " NoRETComp bit %#"PRIx64"\n",
[INTEL_PT_HAVE_SCHED_SWITCH] = " Have sched_switch %"PRId64"\n",
[INTEL_PT_SNAPSHOT_MODE] = " Snapshot mode %"PRId64"\n",
[INTEL_PT_PER_CPU_MMAPS] = " Per-cpu maps %"PRId64"\n",
[INTEL_PT_MTC_BIT] = " MTC bit %#"PRIx64"\n",
[INTEL_PT_MTC_FREQ_BITS] = " MTC freq bits %#"PRIx64"\n",
[INTEL_PT_TSC_CTC_N] = " TSC:CTC numerator %"PRIu64"\n",
[INTEL_PT_TSC_CTC_D] = " TSC:CTC denominator %"PRIu64"\n",
[INTEL_PT_CYC_BIT] = " CYC bit %#"PRIx64"\n",
[INTEL_PT_MAX_NONTURBO_RATIO] = " Max non-turbo ratio %"PRIu64"\n",
[INTEL_PT_FILTER_STR_LEN] = " Filter string len. %"PRIu64"\n",
};
static void intel_pt_print_info(__u64 *arr, int start, int finish)
{
int i;
if (!dump_trace)
return;
for (i = start; i <= finish; i++) {
const char *fmt = intel_pt_info_fmts[i];
if (fmt)
fprintf(stdout, fmt, arr[i]);
}
}
static void intel_pt_print_info_str(const char *name, const char *str)
{
if (!dump_trace)
return;
fprintf(stdout, " %-20s%s\n", name, str ? str : "");
}
static bool intel_pt_has(struct perf_record_auxtrace_info *auxtrace_info, int pos)
{
return auxtrace_info->header.size >=
sizeof(struct perf_record_auxtrace_info) + (sizeof(u64) * (pos + 1));
}
int intel_pt_process_auxtrace_info(union perf_event *event,
struct perf_session *session)
{
struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info;
size_t min_sz = sizeof(u64) * INTEL_PT_PER_CPU_MMAPS;
struct intel_pt *pt;
void *info_end;
__u64 *info;
int err;
if (auxtrace_info->header.size < sizeof(struct perf_record_auxtrace_info) +
min_sz)
return -EINVAL;
pt = zalloc(sizeof(struct intel_pt));
if (!pt)
return -ENOMEM;
pt->vmcs_info = RB_ROOT;
addr_filters__init(&pt->filts);
perf tools: Propagate perf_config() errors Previously these were being ignored, sometimes silently. Stop doing that, emitting debug messages and handling the errors. Testing it: $ cat ~/.perfconfig cat: /home/acme/.perfconfig: No such file or directory $ perf stat -e cycles usleep 1 Performance counter stats for 'usleep 1': 938,996 cycles:u 0.003813731 seconds time elapsed $ perf top --stdio Error: You may not have permission to collect system-wide stats. Consider tweaking /proc/sys/kernel/perf_event_paranoid, <SNIP> [ perf record: Captured and wrote 0.019 MB perf.data (7 samples) ] [acme@jouet linux]$ perf report --stdio # To display the perf.data header info, please use --header/--header-only options. # Overhead Command Shared Object Symbol # ........ ....... ................. ......................... 71.77% usleep libc-2.24.so [.] _dl_addr 27.07% usleep ld-2.24.so [.] _dl_next_ld_env_entry 1.13% usleep [kernel.kallsyms] [k] page_fault $ $ touch ~/.perfconfig $ ls -la ~/.perfconfig -rw-rw-r--. 1 acme acme 0 Jan 27 12:14 /home/acme/.perfconfig $ $ perf stat -e instructions usleep 1 Performance counter stats for 'usleep 1': 244,610 instructions:u 0.000805383 seconds time elapsed $ [root@jouet ~]# chown acme.acme ~/.perfconfig [root@jouet ~]# perf stat -e cycles usleep 1 Warning: File /root/.perfconfig not owned by current user or root, ignoring it. Performance counter stats for 'usleep 1': 937,615 cycles 0.000836931 seconds time elapsed # Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Wang Nan <wangnan0@huawei.com> Link: http://lkml.kernel.org/n/tip-j2rq96so6xdqlr8p8rd6a3jx@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-01-25 00:44:10 +08:00
err = perf_config(intel_pt_perf_config, pt);
if (err)
goto err_free;
perf intel-pt: Add mispred-all config option to aid use with autofdo autofdo incorrectly expects branch flags to include either mispred or predicted. In fact mispred = predicted = 0 is valid and means the flags are not supported, which they aren't by Intel PT. To make autofdo work, add a config option which will cause Intel PT decoder to set the mispred flag on all branches. Below is an example of using Intel PT with autofdo. The example is also added to the Intel PT documentation. It requires autofdo (https://github.com/google/autofdo) and gcc version 5. The bubble sort example is from the AutoFDO tutorial (https://gcc.gnu.org/wiki/AutoFDO/Tutorial) amended to take the number of elements as a parameter. $ gcc-5 -O3 sort.c -o sort_optimized $ ./sort_optimized 30000 Bubble sorting array of 30000 elements 2254 ms $ cat ~/.perfconfig [intel-pt] mispred-all $ perf record -e intel_pt//u ./sort 3000 Bubble sorting array of 3000 elements 58 ms [ perf record: Woken up 2 times to write data ] [ perf record: Captured and wrote 3.939 MB perf.data ] $ perf inject -i perf.data -o inj --itrace=i100usle --strip $ ./create_gcov --binary=./sort --profile=inj --gcov=sort.gcov -gcov_version=1 $ gcc-5 -O3 -fauto-profile=sort.gcov sort.c -o sort_autofdo $ ./sort_autofdo 30000 Bubble sorting array of 30000 elements 2155 ms Note there is currently no advantage to using Intel PT instead of LBR, but that may change in the future if greater use is made of the data. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lkml.kernel.org/r/1443186956-18718-26-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-09-25 21:15:56 +08:00
err = auxtrace_queues__init(&pt->queues);
if (err)
goto err_free;
if (session->itrace_synth_opts->set) {
pt->synth_opts = *session->itrace_synth_opts;
} else {
struct itrace_synth_opts *opts = session->itrace_synth_opts;
itrace_synth_opts__set_default(&pt->synth_opts, opts->default_no_sample);
if (!opts->default_no_sample && !opts->inject) {
pt->synth_opts.branches = false;
pt->synth_opts.callchain = true;
pt->synth_opts.add_callchain = true;
}
pt->synth_opts.thread_stack = opts->thread_stack;
}
if (!(pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_USE_STDOUT))
intel_pt_log_set_name(INTEL_PT_PMU_NAME);
pt->session = session;
pt->machine = &session->machines.host; /* No kvm support */
pt->auxtrace_type = auxtrace_info->type;
pt->pmu_type = auxtrace_info->priv[INTEL_PT_PMU_TYPE];
pt->tc.time_shift = auxtrace_info->priv[INTEL_PT_TIME_SHIFT];
pt->tc.time_mult = auxtrace_info->priv[INTEL_PT_TIME_MULT];
pt->tc.time_zero = auxtrace_info->priv[INTEL_PT_TIME_ZERO];
pt->cap_user_time_zero = auxtrace_info->priv[INTEL_PT_CAP_USER_TIME_ZERO];
pt->tsc_bit = auxtrace_info->priv[INTEL_PT_TSC_BIT];
pt->noretcomp_bit = auxtrace_info->priv[INTEL_PT_NORETCOMP_BIT];
pt->have_sched_switch = auxtrace_info->priv[INTEL_PT_HAVE_SCHED_SWITCH];
pt->snapshot_mode = auxtrace_info->priv[INTEL_PT_SNAPSHOT_MODE];
pt->per_cpu_mmaps = auxtrace_info->priv[INTEL_PT_PER_CPU_MMAPS];
intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_PMU_TYPE,
INTEL_PT_PER_CPU_MMAPS);
if (intel_pt_has(auxtrace_info, INTEL_PT_CYC_BIT)) {
pt->mtc_bit = auxtrace_info->priv[INTEL_PT_MTC_BIT];
pt->mtc_freq_bits = auxtrace_info->priv[INTEL_PT_MTC_FREQ_BITS];
pt->tsc_ctc_ratio_n = auxtrace_info->priv[INTEL_PT_TSC_CTC_N];
pt->tsc_ctc_ratio_d = auxtrace_info->priv[INTEL_PT_TSC_CTC_D];
pt->cyc_bit = auxtrace_info->priv[INTEL_PT_CYC_BIT];
intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_MTC_BIT,
INTEL_PT_CYC_BIT);
}
if (intel_pt_has(auxtrace_info, INTEL_PT_MAX_NONTURBO_RATIO)) {
pt->max_non_turbo_ratio =
auxtrace_info->priv[INTEL_PT_MAX_NONTURBO_RATIO];
intel_pt_print_info(&auxtrace_info->priv[0],
INTEL_PT_MAX_NONTURBO_RATIO,
INTEL_PT_MAX_NONTURBO_RATIO);
}
info = &auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN] + 1;
info_end = (void *)auxtrace_info + auxtrace_info->header.size;
if (intel_pt_has(auxtrace_info, INTEL_PT_FILTER_STR_LEN)) {
size_t len;
len = auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN];
intel_pt_print_info(&auxtrace_info->priv[0],
INTEL_PT_FILTER_STR_LEN,
INTEL_PT_FILTER_STR_LEN);
if (len) {
const char *filter = (const char *)info;
len = roundup(len + 1, 8);
info += len >> 3;
if ((void *)info > info_end) {
pr_err("%s: bad filter string length\n", __func__);
err = -EINVAL;
goto err_free_queues;
}
pt->filter = memdup(filter, len);
if (!pt->filter) {
err = -ENOMEM;
goto err_free_queues;
}
if (session->header.needs_swap)
mem_bswap_64(pt->filter, len);
if (pt->filter[len - 1]) {
pr_err("%s: filter string not null terminated\n", __func__);
err = -EINVAL;
goto err_free_queues;
}
err = addr_filters__parse_bare_filter(&pt->filts,
filter);
if (err)
goto err_free_queues;
}
intel_pt_print_info_str("Filter string", pt->filter);
}
if ((void *)info < info_end) {
pt->cap_event_trace = *info++;
if (dump_trace)
fprintf(stdout, " Cap Event Trace %d\n",
pt->cap_event_trace);
}
pt->timeless_decoding = intel_pt_timeless_decoding(pt);
if (pt->timeless_decoding && !pt->tc.time_mult)
pt->tc.time_mult = 1;
pt->have_tsc = intel_pt_have_tsc(pt);
pt->sampling_mode = intel_pt_sampling_mode(pt);
pt->est_tsc = !pt->timeless_decoding;
if (pt->synth_opts.vm_time_correlation) {
if (pt->timeless_decoding) {
pr_err("Intel PT has no time information for VM Time Correlation\n");
err = -EINVAL;
goto err_free_queues;
}
if (session->itrace_synth_opts->ptime_range) {
pr_err("Time ranges cannot be specified with VM Time Correlation\n");
err = -EINVAL;
goto err_free_queues;
}
/* Currently TSC Offset is calculated using MTC packets */
if (!intel_pt_have_mtc(pt)) {
pr_err("MTC packets must have been enabled for VM Time Correlation\n");
err = -EINVAL;
goto err_free_queues;
}
err = intel_pt_parse_vm_tm_corr_args(pt);
if (err)
goto err_free_queues;
}
pt->unknown_thread = thread__new(999999999, 999999999);
if (!pt->unknown_thread) {
err = -ENOMEM;
goto err_free_queues;
}
perf tools: Fix thread lifetime related segfaut in intel_pt intel_pt_process_auxtrace_info() creates a pt->unknown_thread thread that eventually needs to be freed by the last thread__put() on it, when its refcount hits zero, which may happen in intel_pt_process_auxtrace_info() error handling path and triggers the following segfault, which would happen as well at intel_pt_free, when tools using this intel_pt codebase frees up resources: # perf record -I -e intel_pt/tsc=1,noretcomp=1/u /bin/ls 0 a anaconda-ks.cfg bin perf.data perf.data.old perf-f23-bringup.todo [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.217 MB perf.data ] # # perf script -F event,comm,pid,tid,time,addr,ip,sym,dso,iregs Samples for 'instructions:u' event do not have IREGS attribute set. Cannot print 'iregs' field. intel_pt_synth_events: failed to synthesize 'instructions' event type Segmentation fault (core dumped) # The problem is: there's a union in 'struct thread' combines a list_head and a rb_node. The standard life cycle of a thread is: init rb_node in the constructor, insert it into machine->threads rbtree using rb_node, move it to machine->dead_threads using list_head, clean in the last thread__put: list_del_init(&thread->node). In the above command, it clean a thread before adding it into list, causes the above segfault. Since pt->unknown_thread will never live in an rbtree, initialize its list node so that when list_del_init() is done on it we don't segfault. After this patch: # perf script -F event,comm,pid,tid,time,addr,ip,sym,dso,iregs Samples for 'instructions:u' event do not have IREGS attribute set. Cannot print 'iregs' field. intel_pt_synth_events: failed to synthesize 'instructions' event type 0x248 [0x88]: failed to process type: 70 # Reported-by: Tong Zhang <ztong@vt.edu> Reported-by: Wang Nan <wangnan0@huawei.com> Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Link: http://lkml.kernel.org/r/1454296865-19749-1-git-send-email-wangnan0@huawei.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-02-01 11:21:04 +08:00
err = thread__set_comm(pt->unknown_thread, "unknown", 0);
if (err)
goto err_delete_thread;
if (thread__init_maps(pt->unknown_thread, pt->machine)) {
err = -ENOMEM;
goto err_delete_thread;
}
pt->auxtrace.process_event = intel_pt_process_event;
pt->auxtrace.process_auxtrace_event = intel_pt_process_auxtrace_event;
pt->auxtrace.queue_data = intel_pt_queue_data;
pt->auxtrace.dump_auxtrace_sample = intel_pt_dump_sample;
pt->auxtrace.flush_events = intel_pt_flush;
pt->auxtrace.free_events = intel_pt_free_events;
pt->auxtrace.free = intel_pt_free;
pt->auxtrace.evsel_is_auxtrace = intel_pt_evsel_is_auxtrace;
session->auxtrace = &pt->auxtrace;
if (dump_trace)
return 0;
if (pt->have_sched_switch == 1) {
pt->switch_evsel = intel_pt_find_sched_switch(session->evlist);
if (!pt->switch_evsel) {
pr_err("%s: missing sched_switch event\n", __func__);
err = -EINVAL;
goto err_delete_thread;
}
} else if (pt->have_sched_switch == 2 &&
!intel_pt_find_switch(session->evlist)) {
pr_err("%s: missing context_switch attribute flag\n", __func__);
err = -EINVAL;
goto err_delete_thread;
}
perf intel-pt: Support itrace option flag d+e to log on error Pass d+e option and log size via intel_pt_log_enable(). Allocate a buffer for log messages and provide intel_pt_log_dump_buf() to dump and reset the buffer upon decoder errors. Example: $ sudo perf record -e intel_pt// sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.094 MB perf.data ] $ sudo perf config itrace.debug-log-buffer-size=300 $ sudo perf script --itrace=ed+e+o | head -20 Dumping debug log buffer (first line may be sliced) Other ffffffff96ca22f6: 48 89 e5 Other ffffffff96ca22f9: 65 48 8b 05 ff e0 38 69 Other ffffffff96ca2301: 48 3d c0 a5 c1 98 Other ffffffff96ca2307: 74 08 Jcc +8 ffffffff96ca2311: 5d Other ffffffff96ca2312: c3 Ret ERROR: Bad RET compression (TNT=N) at 0xffffffff96ca2312 End of debug log buffer dump instruction trace error type 1 time 15913.537143482 cpu 5 pid 36292 tid 36292 ip 0xffffffff96ca2312 code 6: Trace doesn't match instruction Dumping debug log buffer (first line may be sliced) Other ffffffff96ce7fe9: f6 47 2e 20 Other ffffffff96ce7fed: 74 11 Jcc +17 ffffffff96ce7fef: 48 8b 87 28 0a 00 00 Other ffffffff96ce7ff6: 5d Other ffffffff96ce7ff7: 48 8b 40 18 Other ffffffff96ce7ffb: c3 Ret ERROR: Bad RET compression (TNT=N) at 0xffffffff96ce7ffb Warning: 8 instruction trace errors Reviewed-by: Andi Kleen <ak@linux.intel.com> Reviewed-by: Namhyung Kim <namhyung@kernel.org> Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Ian Rogers <irogers@google.com> Cc: Jiri Olsa <jolsa@kernel.org> Link: https://lore.kernel.org/r/20220905073424.3971-6-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-09-05 15:34:23 +08:00
if (pt->synth_opts.log) {
bool log_on_error = pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_ON_ERROR;
unsigned int log_on_error_size = pt->synth_opts.log_on_error_size;
intel_pt_log_enable(log_on_error, log_on_error_size);
}
/* Maximum non-turbo ratio is TSC freq / 100 MHz */
if (pt->tc.time_mult) {
u64 tsc_freq = intel_pt_ns_to_ticks(pt, 1000000000);
if (!pt->max_non_turbo_ratio)
pt->max_non_turbo_ratio =
(tsc_freq + 50000000) / 100000000;
intel_pt_log("TSC frequency %"PRIu64"\n", tsc_freq);
intel_pt_log("Maximum non-turbo ratio %u\n",
pt->max_non_turbo_ratio);
pt->cbr2khz = tsc_freq / pt->max_non_turbo_ratio / 1000;
}
perf intel-pt: Fix potential NULL pointer dereference found by the smatch tool Based on the following report from Smatch, fix the potential NULL pointer dereference check. tools/perf/util/intel-pt.c:3200 intel_pt_process_auxtrace_info() error: we previously assumed 'session->itrace_synth_opts' could be null (see line 3196) tools/perf/util/intel-pt.c:3206 intel_pt_process_auxtrace_info() warn: variable dereferenced before check 'session->itrace_synth_opts' (see line 3200) tools/perf/util/intel-pt.c 3196 if (session->itrace_synth_opts && session->itrace_synth_opts->set) { 3197 pt->synth_opts = *session->itrace_synth_opts; 3198 } else { 3199 itrace_synth_opts__set_default(&pt->synth_opts, 3200 session->itrace_synth_opts->default_no_sample); ^^^^^^^^^^^^^^^^^^^^^^^^^^ 3201 if (!session->itrace_synth_opts->default_no_sample && 3202 !session->itrace_synth_opts->inject) { 3203 pt->synth_opts.branches = false; 3204 pt->synth_opts.callchain = true; 3205 } 3206 if (session->itrace_synth_opts) ^^^^^^^^^^^^^^^^^^^^^^^^^^ 3207 pt->synth_opts.thread_stack = 3208 session->itrace_synth_opts->thread_stack; 3209 } 'session->itrace_synth_opts' is impossible to be a NULL pointer in intel_pt_process_auxtrace_info(), thus this patch removes the NULL test for 'session->itrace_synth_opts'. Signed-off-by: Leo Yan <leo.yan@linaro.org> Acked-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Mathieu Poirier <mathieu.poirier@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: linux-arm-kernel@lists.infradead.org Link: http://lkml.kernel.org/r/20190708143937.7722-4-leo.yan@linaro.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-07-08 22:39:36 +08:00
err = intel_pt_setup_time_ranges(pt, session->itrace_synth_opts);
if (err)
goto err_delete_thread;
if (pt->synth_opts.calls)
pt->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC |
PERF_IP_FLAG_TRACE_END;
if (pt->synth_opts.returns)
pt->branches_filter |= PERF_IP_FLAG_RETURN |
PERF_IP_FLAG_TRACE_BEGIN;
perf intel-pt: Add support for synthesizing callchains for regular events Currently, callchains can be synthesized only for synthesized events. Support also synthesizing callchains for regular events. Example: # perf record --kcore --aux-sample -e '{intel_pt//,cycles}' -c 10000 uname Linux [ perf record: Woken up 3 times to write data ] [ perf record: Captured and wrote 0.532 MB perf.data ] # perf script --itrace=Ge | head -20 uname 4864 2419025.358181: 10000 cycles: ffffffffbba56965 apparmor_bprm_committing_creds+0x35 ([kernel.kallsyms]) ffffffffbc400cd5 __indirect_thunk_start+0x5 ([kernel.kallsyms]) ffffffffbba07422 security_bprm_committing_creds+0x22 ([kernel.kallsyms]) ffffffffbb89805d install_exec_creds+0xd ([kernel.kallsyms]) ffffffffbb90d9ac load_elf_binary+0x3ac ([kernel.kallsyms]) uname 4864 2419025.358185: 10000 cycles: ffffffffbba56db0 apparmor_bprm_committed_creds+0x20 ([kernel.kallsyms]) ffffffffbc400cd5 __indirect_thunk_start+0x5 ([kernel.kallsyms]) ffffffffbba07452 security_bprm_committed_creds+0x22 ([kernel.kallsyms]) ffffffffbb89809a install_exec_creds+0x4a ([kernel.kallsyms]) ffffffffbb90d9ac load_elf_binary+0x3ac ([kernel.kallsyms]) uname 4864 2419025.358189: 10000 cycles: ffffffffbb86fdf6 vma_adjust_trans_huge+0x6 ([kernel.kallsyms]) ffffffffbb821660 __vma_adjust+0x160 ([kernel.kallsyms]) ffffffffbb897be7 shift_arg_pages+0x97 ([kernel.kallsyms]) ffffffffbb897ed9 setup_arg_pages+0x1e9 ([kernel.kallsyms]) ffffffffbb90d9f2 load_elf_binary+0x3f2 ([kernel.kallsyms]) Committer testing: # perf record --kcore --aux-sample -e '{intel_pt//,cycles}' -c 10000 uname Linux [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.233 MB perf.data ] # Then, before this patch: # perf script --itrace=Ge | head -20 uname 28642 168664.856384: 10000 cycles: ffffffff9810aeaa commit_creds+0x2a ([kernel.kallsyms]) uname 28642 168664.856388: 10000 cycles: ffffffff982a24f1 mprotect_fixup+0x151 ([kernel.kallsyms]) uname 28642 168664.856392: 10000 cycles: ffffffff982a385b move_page_tables+0xbcb ([kernel.kallsyms]) uname 28642 168664.856396: 10000 cycles: ffffffff982fd4ec __mod_memcg_state+0x1c ([kernel.kallsyms]) uname 28642 168664.856400: 10000 cycles: ffffffff9829fddd do_mmap+0xfd ([kernel.kallsyms]) uname 28642 168664.856404: 10000 cycles: ffffffff9829c879 __vma_adjust+0x479 ([kernel.kallsyms]) uname 28642 168664.856408: 10000 cycles: ffffffff98238e94 __perf_addr_filters_adjust+0x34 ([kernel.kallsyms]) uname 28642 168664.856412: 10000 cycles: ffffffff98a38e0b down_write+0x1b ([kernel.kallsyms]) uname 28642 168664.856416: 10000 cycles: ffffffff983006a0 memcg_kmem_get_cache+0x0 ([kernel.kallsyms]) uname 28642 168664.856421: 10000 cycles: ffffffff98396eaf load_elf_binary+0x92f ([kernel.kallsyms]) uname 28642 168664.856425: 10000 cycles: ffffffff982e0222 kfree+0x62 ([kernel.kallsyms]) uname 28642 168664.856428: 10000 cycles: ffffffff9846dfd4 file_has_perm+0x54 ([kernel.kallsyms]) uname 28642 168664.856433: 10000 cycles: ffffffff98288911 vma_interval_tree_insert+0x51 ([kernel.kallsyms]) uname 28642 168664.856437: 10000 cycles: ffffffff9823e577 perf_event_mmap_output+0x27 ([kernel.kallsyms]) uname 28642 168664.856441: 10000 cycles: ffffffff98a26fa0 xas_load+0x40 ([kernel.kallsyms]) uname 28642 168664.856445: 10000 cycles: ffffffff98004f30 arch_setup_additional_pages+0x0 ([kernel.kallsyms]) uname 28642 168664.856448: 10000 cycles: ffffffff98a297c0 copy_user_generic_unrolled+0xa0 ([kernel.kallsyms]) uname 28642 168664.856452: 10000 cycles: ffffffff9853a87a strnlen_user+0x10a ([kernel.kallsyms]) uname 28642 168664.856456: 10000 cycles: ffffffff986638a7 randomize_page+0x27 ([kernel.kallsyms]) uname 28642 168664.856460: 10000 cycles: ffffffff98a3b645 _raw_spin_lock+0x5 ([kernel.kallsyms]) # And after: # perf script --itrace=Ge | head -20 uname 28642 168664.856384: 10000 cycles: ffffffff9810aeaa commit_creds+0x2a ([kernel.kallsyms]) ffffffff9831fe87 install_exec_creds+0x17 ([kernel.kallsyms]) ffffffff983968d9 load_elf_binary+0x359 ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) uname 28642 168664.856388: 10000 cycles: ffffffff982a24f1 mprotect_fixup+0x151 ([kernel.kallsyms]) ffffffff9831fa83 setup_arg_pages+0x123 ([kernel.kallsyms]) ffffffff9839691f load_elf_binary+0x39f ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) uname 28642 168664.856392: 10000 cycles: ffffffff982a385b move_page_tables+0xbcb ([kernel.kallsyms]) ffffffff9831f889 shift_arg_pages+0xa9 ([kernel.kallsyms]) ffffffff9831fb4f setup_arg_pages+0x1ef ([kernel.kallsyms]) ffffffff9839691f load_elf_binary+0x39f ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) # Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lore.kernel.org/lkml/20200401101613.6201-12-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-04-01 18:16:08 +08:00
if ((pt->synth_opts.callchain || pt->synth_opts.add_callchain) &&
!symbol_conf.use_callchain) {
symbol_conf.use_callchain = true;
if (callchain_register_param(&callchain_param) < 0) {
symbol_conf.use_callchain = false;
pt->synth_opts.callchain = false;
perf intel-pt: Add support for synthesizing callchains for regular events Currently, callchains can be synthesized only for synthesized events. Support also synthesizing callchains for regular events. Example: # perf record --kcore --aux-sample -e '{intel_pt//,cycles}' -c 10000 uname Linux [ perf record: Woken up 3 times to write data ] [ perf record: Captured and wrote 0.532 MB perf.data ] # perf script --itrace=Ge | head -20 uname 4864 2419025.358181: 10000 cycles: ffffffffbba56965 apparmor_bprm_committing_creds+0x35 ([kernel.kallsyms]) ffffffffbc400cd5 __indirect_thunk_start+0x5 ([kernel.kallsyms]) ffffffffbba07422 security_bprm_committing_creds+0x22 ([kernel.kallsyms]) ffffffffbb89805d install_exec_creds+0xd ([kernel.kallsyms]) ffffffffbb90d9ac load_elf_binary+0x3ac ([kernel.kallsyms]) uname 4864 2419025.358185: 10000 cycles: ffffffffbba56db0 apparmor_bprm_committed_creds+0x20 ([kernel.kallsyms]) ffffffffbc400cd5 __indirect_thunk_start+0x5 ([kernel.kallsyms]) ffffffffbba07452 security_bprm_committed_creds+0x22 ([kernel.kallsyms]) ffffffffbb89809a install_exec_creds+0x4a ([kernel.kallsyms]) ffffffffbb90d9ac load_elf_binary+0x3ac ([kernel.kallsyms]) uname 4864 2419025.358189: 10000 cycles: ffffffffbb86fdf6 vma_adjust_trans_huge+0x6 ([kernel.kallsyms]) ffffffffbb821660 __vma_adjust+0x160 ([kernel.kallsyms]) ffffffffbb897be7 shift_arg_pages+0x97 ([kernel.kallsyms]) ffffffffbb897ed9 setup_arg_pages+0x1e9 ([kernel.kallsyms]) ffffffffbb90d9f2 load_elf_binary+0x3f2 ([kernel.kallsyms]) Committer testing: # perf record --kcore --aux-sample -e '{intel_pt//,cycles}' -c 10000 uname Linux [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.233 MB perf.data ] # Then, before this patch: # perf script --itrace=Ge | head -20 uname 28642 168664.856384: 10000 cycles: ffffffff9810aeaa commit_creds+0x2a ([kernel.kallsyms]) uname 28642 168664.856388: 10000 cycles: ffffffff982a24f1 mprotect_fixup+0x151 ([kernel.kallsyms]) uname 28642 168664.856392: 10000 cycles: ffffffff982a385b move_page_tables+0xbcb ([kernel.kallsyms]) uname 28642 168664.856396: 10000 cycles: ffffffff982fd4ec __mod_memcg_state+0x1c ([kernel.kallsyms]) uname 28642 168664.856400: 10000 cycles: ffffffff9829fddd do_mmap+0xfd ([kernel.kallsyms]) uname 28642 168664.856404: 10000 cycles: ffffffff9829c879 __vma_adjust+0x479 ([kernel.kallsyms]) uname 28642 168664.856408: 10000 cycles: ffffffff98238e94 __perf_addr_filters_adjust+0x34 ([kernel.kallsyms]) uname 28642 168664.856412: 10000 cycles: ffffffff98a38e0b down_write+0x1b ([kernel.kallsyms]) uname 28642 168664.856416: 10000 cycles: ffffffff983006a0 memcg_kmem_get_cache+0x0 ([kernel.kallsyms]) uname 28642 168664.856421: 10000 cycles: ffffffff98396eaf load_elf_binary+0x92f ([kernel.kallsyms]) uname 28642 168664.856425: 10000 cycles: ffffffff982e0222 kfree+0x62 ([kernel.kallsyms]) uname 28642 168664.856428: 10000 cycles: ffffffff9846dfd4 file_has_perm+0x54 ([kernel.kallsyms]) uname 28642 168664.856433: 10000 cycles: ffffffff98288911 vma_interval_tree_insert+0x51 ([kernel.kallsyms]) uname 28642 168664.856437: 10000 cycles: ffffffff9823e577 perf_event_mmap_output+0x27 ([kernel.kallsyms]) uname 28642 168664.856441: 10000 cycles: ffffffff98a26fa0 xas_load+0x40 ([kernel.kallsyms]) uname 28642 168664.856445: 10000 cycles: ffffffff98004f30 arch_setup_additional_pages+0x0 ([kernel.kallsyms]) uname 28642 168664.856448: 10000 cycles: ffffffff98a297c0 copy_user_generic_unrolled+0xa0 ([kernel.kallsyms]) uname 28642 168664.856452: 10000 cycles: ffffffff9853a87a strnlen_user+0x10a ([kernel.kallsyms]) uname 28642 168664.856456: 10000 cycles: ffffffff986638a7 randomize_page+0x27 ([kernel.kallsyms]) uname 28642 168664.856460: 10000 cycles: ffffffff98a3b645 _raw_spin_lock+0x5 ([kernel.kallsyms]) # And after: # perf script --itrace=Ge | head -20 uname 28642 168664.856384: 10000 cycles: ffffffff9810aeaa commit_creds+0x2a ([kernel.kallsyms]) ffffffff9831fe87 install_exec_creds+0x17 ([kernel.kallsyms]) ffffffff983968d9 load_elf_binary+0x359 ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) uname 28642 168664.856388: 10000 cycles: ffffffff982a24f1 mprotect_fixup+0x151 ([kernel.kallsyms]) ffffffff9831fa83 setup_arg_pages+0x123 ([kernel.kallsyms]) ffffffff9839691f load_elf_binary+0x39f ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) uname 28642 168664.856392: 10000 cycles: ffffffff982a385b move_page_tables+0xbcb ([kernel.kallsyms]) ffffffff9831f889 shift_arg_pages+0xa9 ([kernel.kallsyms]) ffffffff9831fb4f setup_arg_pages+0x1ef ([kernel.kallsyms]) ffffffff9839691f load_elf_binary+0x39f ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) # Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lore.kernel.org/lkml/20200401101613.6201-12-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-04-01 18:16:08 +08:00
pt->synth_opts.add_callchain = false;
}
}
perf intel-pt: Add support for synthesizing callchains for regular events Currently, callchains can be synthesized only for synthesized events. Support also synthesizing callchains for regular events. Example: # perf record --kcore --aux-sample -e '{intel_pt//,cycles}' -c 10000 uname Linux [ perf record: Woken up 3 times to write data ] [ perf record: Captured and wrote 0.532 MB perf.data ] # perf script --itrace=Ge | head -20 uname 4864 2419025.358181: 10000 cycles: ffffffffbba56965 apparmor_bprm_committing_creds+0x35 ([kernel.kallsyms]) ffffffffbc400cd5 __indirect_thunk_start+0x5 ([kernel.kallsyms]) ffffffffbba07422 security_bprm_committing_creds+0x22 ([kernel.kallsyms]) ffffffffbb89805d install_exec_creds+0xd ([kernel.kallsyms]) ffffffffbb90d9ac load_elf_binary+0x3ac ([kernel.kallsyms]) uname 4864 2419025.358185: 10000 cycles: ffffffffbba56db0 apparmor_bprm_committed_creds+0x20 ([kernel.kallsyms]) ffffffffbc400cd5 __indirect_thunk_start+0x5 ([kernel.kallsyms]) ffffffffbba07452 security_bprm_committed_creds+0x22 ([kernel.kallsyms]) ffffffffbb89809a install_exec_creds+0x4a ([kernel.kallsyms]) ffffffffbb90d9ac load_elf_binary+0x3ac ([kernel.kallsyms]) uname 4864 2419025.358189: 10000 cycles: ffffffffbb86fdf6 vma_adjust_trans_huge+0x6 ([kernel.kallsyms]) ffffffffbb821660 __vma_adjust+0x160 ([kernel.kallsyms]) ffffffffbb897be7 shift_arg_pages+0x97 ([kernel.kallsyms]) ffffffffbb897ed9 setup_arg_pages+0x1e9 ([kernel.kallsyms]) ffffffffbb90d9f2 load_elf_binary+0x3f2 ([kernel.kallsyms]) Committer testing: # perf record --kcore --aux-sample -e '{intel_pt//,cycles}' -c 10000 uname Linux [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.233 MB perf.data ] # Then, before this patch: # perf script --itrace=Ge | head -20 uname 28642 168664.856384: 10000 cycles: ffffffff9810aeaa commit_creds+0x2a ([kernel.kallsyms]) uname 28642 168664.856388: 10000 cycles: ffffffff982a24f1 mprotect_fixup+0x151 ([kernel.kallsyms]) uname 28642 168664.856392: 10000 cycles: ffffffff982a385b move_page_tables+0xbcb ([kernel.kallsyms]) uname 28642 168664.856396: 10000 cycles: ffffffff982fd4ec __mod_memcg_state+0x1c ([kernel.kallsyms]) uname 28642 168664.856400: 10000 cycles: ffffffff9829fddd do_mmap+0xfd ([kernel.kallsyms]) uname 28642 168664.856404: 10000 cycles: ffffffff9829c879 __vma_adjust+0x479 ([kernel.kallsyms]) uname 28642 168664.856408: 10000 cycles: ffffffff98238e94 __perf_addr_filters_adjust+0x34 ([kernel.kallsyms]) uname 28642 168664.856412: 10000 cycles: ffffffff98a38e0b down_write+0x1b ([kernel.kallsyms]) uname 28642 168664.856416: 10000 cycles: ffffffff983006a0 memcg_kmem_get_cache+0x0 ([kernel.kallsyms]) uname 28642 168664.856421: 10000 cycles: ffffffff98396eaf load_elf_binary+0x92f ([kernel.kallsyms]) uname 28642 168664.856425: 10000 cycles: ffffffff982e0222 kfree+0x62 ([kernel.kallsyms]) uname 28642 168664.856428: 10000 cycles: ffffffff9846dfd4 file_has_perm+0x54 ([kernel.kallsyms]) uname 28642 168664.856433: 10000 cycles: ffffffff98288911 vma_interval_tree_insert+0x51 ([kernel.kallsyms]) uname 28642 168664.856437: 10000 cycles: ffffffff9823e577 perf_event_mmap_output+0x27 ([kernel.kallsyms]) uname 28642 168664.856441: 10000 cycles: ffffffff98a26fa0 xas_load+0x40 ([kernel.kallsyms]) uname 28642 168664.856445: 10000 cycles: ffffffff98004f30 arch_setup_additional_pages+0x0 ([kernel.kallsyms]) uname 28642 168664.856448: 10000 cycles: ffffffff98a297c0 copy_user_generic_unrolled+0xa0 ([kernel.kallsyms]) uname 28642 168664.856452: 10000 cycles: ffffffff9853a87a strnlen_user+0x10a ([kernel.kallsyms]) uname 28642 168664.856456: 10000 cycles: ffffffff986638a7 randomize_page+0x27 ([kernel.kallsyms]) uname 28642 168664.856460: 10000 cycles: ffffffff98a3b645 _raw_spin_lock+0x5 ([kernel.kallsyms]) # And after: # perf script --itrace=Ge | head -20 uname 28642 168664.856384: 10000 cycles: ffffffff9810aeaa commit_creds+0x2a ([kernel.kallsyms]) ffffffff9831fe87 install_exec_creds+0x17 ([kernel.kallsyms]) ffffffff983968d9 load_elf_binary+0x359 ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) uname 28642 168664.856388: 10000 cycles: ffffffff982a24f1 mprotect_fixup+0x151 ([kernel.kallsyms]) ffffffff9831fa83 setup_arg_pages+0x123 ([kernel.kallsyms]) ffffffff9839691f load_elf_binary+0x39f ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) uname 28642 168664.856392: 10000 cycles: ffffffff982a385b move_page_tables+0xbcb ([kernel.kallsyms]) ffffffff9831f889 shift_arg_pages+0xa9 ([kernel.kallsyms]) ffffffff9831fb4f setup_arg_pages+0x1ef ([kernel.kallsyms]) ffffffff9839691f load_elf_binary+0x39f ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) # Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lore.kernel.org/lkml/20200401101613.6201-12-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-04-01 18:16:08 +08:00
if (pt->synth_opts.add_callchain) {
err = intel_pt_callchain_init(pt);
if (err)
goto err_delete_thread;
}
perf intel-pt: Add support for synthesizing branch stacks for regular events Use the new thread_stack__br_sample_late() function to create a thread stack for regular events. Example: # perf record --kcore --aux-sample -e '{intel_pt//,cycles:ppp}' -c 10000 uname Linux [ perf record: Woken up 2 times to write data ] [ perf record: Captured and wrote 0.743 MB perf.data ] # perf report --itrace=Le --stdio | head -30 | tail -18 # Samples: 11K of event 'cycles:ppp' # Event count (approx.): 11648 # # Overhead Command Source Shared Object Source Symbol Target Symbol Basic Block Cycles # ........ ....... .................... ............................ ............................ .................. # 5.49% uname libc-2.30.so [.] _dl_addr [.] _dl_addr - 2.41% uname ld-2.30.so [.] _dl_relocate_object [.] _dl_relocate_object - 2.31% uname ld-2.30.so [.] do_lookup_x [.] do_lookup_x - 2.17% uname [kernel.kallsyms] [k] unmap_page_range [k] unmap_page_range - 2.05% uname ld-2.30.so [k] _dl_start [k] _dl_start - 1.97% uname ld-2.30.so [.] _dl_lookup_symbol_x [.] _dl_lookup_symbol_x - 1.94% uname [kernel.kallsyms] [k] filemap_map_pages [k] filemap_map_pages - 1.60% uname [kernel.kallsyms] [k] __handle_mm_fault [k] __handle_mm_fault - 1.44% uname [kernel.kallsyms] [k] page_add_file_rmap [k] page_add_file_rmap - 1.12% uname [kernel.kallsyms] [k] vma_interval_tree_insert [k] vma_interval_tree_insert - 0.94% uname [kernel.kallsyms] [k] perf_iterate_ctx [k] perf_iterate_ctx - Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lore.kernel.org/lkml/20200429150751.12570-8-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-04-29 23:07:49 +08:00
if (pt->synth_opts.last_branch || pt->synth_opts.add_last_branch) {
pt->br_stack_sz = pt->synth_opts.last_branch_sz;
perf intel-pt: Add support for synthesizing branch stacks for regular events Use the new thread_stack__br_sample_late() function to create a thread stack for regular events. Example: # perf record --kcore --aux-sample -e '{intel_pt//,cycles:ppp}' -c 10000 uname Linux [ perf record: Woken up 2 times to write data ] [ perf record: Captured and wrote 0.743 MB perf.data ] # perf report --itrace=Le --stdio | head -30 | tail -18 # Samples: 11K of event 'cycles:ppp' # Event count (approx.): 11648 # # Overhead Command Source Shared Object Source Symbol Target Symbol Basic Block Cycles # ........ ....... .................... ............................ ............................ .................. # 5.49% uname libc-2.30.so [.] _dl_addr [.] _dl_addr - 2.41% uname ld-2.30.so [.] _dl_relocate_object [.] _dl_relocate_object - 2.31% uname ld-2.30.so [.] do_lookup_x [.] do_lookup_x - 2.17% uname [kernel.kallsyms] [k] unmap_page_range [k] unmap_page_range - 2.05% uname ld-2.30.so [k] _dl_start [k] _dl_start - 1.97% uname ld-2.30.so [.] _dl_lookup_symbol_x [.] _dl_lookup_symbol_x - 1.94% uname [kernel.kallsyms] [k] filemap_map_pages [k] filemap_map_pages - 1.60% uname [kernel.kallsyms] [k] __handle_mm_fault [k] __handle_mm_fault - 1.44% uname [kernel.kallsyms] [k] page_add_file_rmap [k] page_add_file_rmap - 1.12% uname [kernel.kallsyms] [k] vma_interval_tree_insert [k] vma_interval_tree_insert - 0.94% uname [kernel.kallsyms] [k] perf_iterate_ctx [k] perf_iterate_ctx - Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lore.kernel.org/lkml/20200429150751.12570-8-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-04-29 23:07:49 +08:00
pt->br_stack_sz_plus = pt->br_stack_sz;
}
if (pt->synth_opts.add_last_branch) {
err = intel_pt_br_stack_init(pt);
if (err)
goto err_delete_thread;
/*
* Additional branch stack size to cater for tracing from the
* actual sample ip to where the sample time is recorded.
* Measured at about 200 branches, but generously set to 1024.
* If kernel space is not being traced, then add just 1 for the
* branch to kernel space.
*/
if (intel_pt_tracing_kernel(pt))
pt->br_stack_sz_plus += 1024;
else
pt->br_stack_sz_plus += 1;
}
pt->use_thread_stack = pt->synth_opts.callchain ||
pt->synth_opts.add_callchain ||
pt->synth_opts.thread_stack ||
perf intel-pt: Add support for synthesizing branch stacks for regular events Use the new thread_stack__br_sample_late() function to create a thread stack for regular events. Example: # perf record --kcore --aux-sample -e '{intel_pt//,cycles:ppp}' -c 10000 uname Linux [ perf record: Woken up 2 times to write data ] [ perf record: Captured and wrote 0.743 MB perf.data ] # perf report --itrace=Le --stdio | head -30 | tail -18 # Samples: 11K of event 'cycles:ppp' # Event count (approx.): 11648 # # Overhead Command Source Shared Object Source Symbol Target Symbol Basic Block Cycles # ........ ....... .................... ............................ ............................ .................. # 5.49% uname libc-2.30.so [.] _dl_addr [.] _dl_addr - 2.41% uname ld-2.30.so [.] _dl_relocate_object [.] _dl_relocate_object - 2.31% uname ld-2.30.so [.] do_lookup_x [.] do_lookup_x - 2.17% uname [kernel.kallsyms] [k] unmap_page_range [k] unmap_page_range - 2.05% uname ld-2.30.so [k] _dl_start [k] _dl_start - 1.97% uname ld-2.30.so [.] _dl_lookup_symbol_x [.] _dl_lookup_symbol_x - 1.94% uname [kernel.kallsyms] [k] filemap_map_pages [k] filemap_map_pages - 1.60% uname [kernel.kallsyms] [k] __handle_mm_fault [k] __handle_mm_fault - 1.44% uname [kernel.kallsyms] [k] page_add_file_rmap [k] page_add_file_rmap - 1.12% uname [kernel.kallsyms] [k] vma_interval_tree_insert [k] vma_interval_tree_insert - 0.94% uname [kernel.kallsyms] [k] perf_iterate_ctx [k] perf_iterate_ctx - Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lore.kernel.org/lkml/20200429150751.12570-8-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-04-29 23:07:49 +08:00
pt->synth_opts.last_branch ||
pt->synth_opts.add_last_branch;
pt->callstack = pt->synth_opts.callchain ||
pt->synth_opts.add_callchain ||
pt->synth_opts.thread_stack;
err = intel_pt_synth_events(pt, session);
if (err)
goto err_delete_thread;
intel_pt_setup_pebs_events(pt);
if (perf_data__is_pipe(session->data)) {
pr_warning("WARNING: Intel PT with pipe mode is not recommended.\n"
" The output cannot relied upon. In particular,\n"
" timestamps and the order of events may be incorrect.\n");
}
if (pt->sampling_mode || list_empty(&session->auxtrace_index))
err = auxtrace_queue_data(session, true, true);
else
err = auxtrace_queues__process_index(&pt->queues, session);
if (err)
goto err_delete_thread;
if (pt->queues.populated)
pt->data_queued = true;
if (pt->timeless_decoding)
pr_debug2("Intel PT decoding without timestamps\n");
return 0;
err_delete_thread:
perf intel-pt: Add support for synthesizing callchains for regular events Currently, callchains can be synthesized only for synthesized events. Support also synthesizing callchains for regular events. Example: # perf record --kcore --aux-sample -e '{intel_pt//,cycles}' -c 10000 uname Linux [ perf record: Woken up 3 times to write data ] [ perf record: Captured and wrote 0.532 MB perf.data ] # perf script --itrace=Ge | head -20 uname 4864 2419025.358181: 10000 cycles: ffffffffbba56965 apparmor_bprm_committing_creds+0x35 ([kernel.kallsyms]) ffffffffbc400cd5 __indirect_thunk_start+0x5 ([kernel.kallsyms]) ffffffffbba07422 security_bprm_committing_creds+0x22 ([kernel.kallsyms]) ffffffffbb89805d install_exec_creds+0xd ([kernel.kallsyms]) ffffffffbb90d9ac load_elf_binary+0x3ac ([kernel.kallsyms]) uname 4864 2419025.358185: 10000 cycles: ffffffffbba56db0 apparmor_bprm_committed_creds+0x20 ([kernel.kallsyms]) ffffffffbc400cd5 __indirect_thunk_start+0x5 ([kernel.kallsyms]) ffffffffbba07452 security_bprm_committed_creds+0x22 ([kernel.kallsyms]) ffffffffbb89809a install_exec_creds+0x4a ([kernel.kallsyms]) ffffffffbb90d9ac load_elf_binary+0x3ac ([kernel.kallsyms]) uname 4864 2419025.358189: 10000 cycles: ffffffffbb86fdf6 vma_adjust_trans_huge+0x6 ([kernel.kallsyms]) ffffffffbb821660 __vma_adjust+0x160 ([kernel.kallsyms]) ffffffffbb897be7 shift_arg_pages+0x97 ([kernel.kallsyms]) ffffffffbb897ed9 setup_arg_pages+0x1e9 ([kernel.kallsyms]) ffffffffbb90d9f2 load_elf_binary+0x3f2 ([kernel.kallsyms]) Committer testing: # perf record --kcore --aux-sample -e '{intel_pt//,cycles}' -c 10000 uname Linux [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.233 MB perf.data ] # Then, before this patch: # perf script --itrace=Ge | head -20 uname 28642 168664.856384: 10000 cycles: ffffffff9810aeaa commit_creds+0x2a ([kernel.kallsyms]) uname 28642 168664.856388: 10000 cycles: ffffffff982a24f1 mprotect_fixup+0x151 ([kernel.kallsyms]) uname 28642 168664.856392: 10000 cycles: ffffffff982a385b move_page_tables+0xbcb ([kernel.kallsyms]) uname 28642 168664.856396: 10000 cycles: ffffffff982fd4ec __mod_memcg_state+0x1c ([kernel.kallsyms]) uname 28642 168664.856400: 10000 cycles: ffffffff9829fddd do_mmap+0xfd ([kernel.kallsyms]) uname 28642 168664.856404: 10000 cycles: ffffffff9829c879 __vma_adjust+0x479 ([kernel.kallsyms]) uname 28642 168664.856408: 10000 cycles: ffffffff98238e94 __perf_addr_filters_adjust+0x34 ([kernel.kallsyms]) uname 28642 168664.856412: 10000 cycles: ffffffff98a38e0b down_write+0x1b ([kernel.kallsyms]) uname 28642 168664.856416: 10000 cycles: ffffffff983006a0 memcg_kmem_get_cache+0x0 ([kernel.kallsyms]) uname 28642 168664.856421: 10000 cycles: ffffffff98396eaf load_elf_binary+0x92f ([kernel.kallsyms]) uname 28642 168664.856425: 10000 cycles: ffffffff982e0222 kfree+0x62 ([kernel.kallsyms]) uname 28642 168664.856428: 10000 cycles: ffffffff9846dfd4 file_has_perm+0x54 ([kernel.kallsyms]) uname 28642 168664.856433: 10000 cycles: ffffffff98288911 vma_interval_tree_insert+0x51 ([kernel.kallsyms]) uname 28642 168664.856437: 10000 cycles: ffffffff9823e577 perf_event_mmap_output+0x27 ([kernel.kallsyms]) uname 28642 168664.856441: 10000 cycles: ffffffff98a26fa0 xas_load+0x40 ([kernel.kallsyms]) uname 28642 168664.856445: 10000 cycles: ffffffff98004f30 arch_setup_additional_pages+0x0 ([kernel.kallsyms]) uname 28642 168664.856448: 10000 cycles: ffffffff98a297c0 copy_user_generic_unrolled+0xa0 ([kernel.kallsyms]) uname 28642 168664.856452: 10000 cycles: ffffffff9853a87a strnlen_user+0x10a ([kernel.kallsyms]) uname 28642 168664.856456: 10000 cycles: ffffffff986638a7 randomize_page+0x27 ([kernel.kallsyms]) uname 28642 168664.856460: 10000 cycles: ffffffff98a3b645 _raw_spin_lock+0x5 ([kernel.kallsyms]) # And after: # perf script --itrace=Ge | head -20 uname 28642 168664.856384: 10000 cycles: ffffffff9810aeaa commit_creds+0x2a ([kernel.kallsyms]) ffffffff9831fe87 install_exec_creds+0x17 ([kernel.kallsyms]) ffffffff983968d9 load_elf_binary+0x359 ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) uname 28642 168664.856388: 10000 cycles: ffffffff982a24f1 mprotect_fixup+0x151 ([kernel.kallsyms]) ffffffff9831fa83 setup_arg_pages+0x123 ([kernel.kallsyms]) ffffffff9839691f load_elf_binary+0x39f ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) uname 28642 168664.856392: 10000 cycles: ffffffff982a385b move_page_tables+0xbcb ([kernel.kallsyms]) ffffffff9831f889 shift_arg_pages+0xa9 ([kernel.kallsyms]) ffffffff9831fb4f setup_arg_pages+0x1ef ([kernel.kallsyms]) ffffffff9839691f load_elf_binary+0x39f ([kernel.kallsyms]) ffffffff98e00c45 __x86_indirect_thunk_rax+0x5 ([kernel.kallsyms]) # Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lore.kernel.org/lkml/20200401101613.6201-12-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-04-01 18:16:08 +08:00
zfree(&pt->chain);
thread__zput(pt->unknown_thread);
err_free_queues:
intel_pt_log_disable();
auxtrace_queues__free(&pt->queues);
session->auxtrace = NULL;
err_free:
addr_filters__exit(&pt->filts);
zfree(&pt->filter);
zfree(&pt->time_ranges);
free(pt);
return err;
}