From 06786963020cc42c50f2c95d1ca5cb839460fa0e Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 14 Aug 2017 13:53:10 -0300 Subject: [PATCH 01/10] perf tests shell: Remove duplicate skip_if_no_debuginfo() function Cc: Adrian Hunter Cc: David Ahern Cc: Jiri Olsa Cc: Namhyung Kim Cc: Wang Nan Link: http://lkml.kernel.org/n/tip-3zxjswdbs2au3ih0rino0iy1@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/shell/lib/probe_vfs_getname.sh | 5 ----- 1 file changed, 5 deletions(-) diff --git a/tools/perf/tests/shell/lib/probe_vfs_getname.sh b/tools/perf/tests/shell/lib/probe_vfs_getname.sh index 42308040f882..3eaddf190a30 100644 --- a/tools/perf/tests/shell/lib/probe_vfs_getname.sh +++ b/tools/perf/tests/shell/lib/probe_vfs_getname.sh @@ -21,8 +21,3 @@ skip_if_no_debuginfo() { add_probe_vfs_getname -v 2>&1 | grep -q "^Failed to find the path for kernel" && return 2 return 1 } - -skip_if_no_debuginfo() { - add_probe_vfs_getname -v 2>&1 | grep -q "^Failed to find the path for kernel" && return 2 - return 1 -} From 1ad5a182690c60b7effafec3c1c4512c11e5b545 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 14 Aug 2017 14:26:09 -0300 Subject: [PATCH 02/10] perf test shell: Check if 'perf probe' is available, skip tests if not Add a library function that checks if 'perf probe' is built into the tool being tested, skipping tests that need it. Testing it on a system after removing the library needed to build 'probe' as a perf subcommand: # perf test ping vfs_getname 59: Use vfs_getname probe to get syscall args filenames : Skip 60: probe libc's inet_pton & backtrace it with ping : Skip 61: Check open filename arg using perf trace + vfs_getname: Skip 62: Add vfs_getname probe to get syscall args filenames : Skip # perf probe perf: 'probe' is not a perf-command. See 'perf --help'. # Now reinstalling elfutils-libelf-devel on this Fedora 26 system to rebuild perf and then retest this: # perf test ping vfs_getname 60: Use vfs_getname probe to get syscall args filenames : Ok 61: probe libc's inet_pton & backtrace it with ping : Ok 62: Check open filename arg using perf trace + vfs_getname: Ok 63: Add vfs_getname probe to get syscall args filenames : Ok # Reported-by: Kim Phillips Cc: Adrian Hunter Cc: David Ahern Cc: Jiri Olsa Cc: Michael Petlan Cc: Namhyung Kim Cc: Thomas Richter Cc: Wang Nan Link: http://lkml.kernel.org/n/tip-ctdck2gzsskqhjzu3ebb62zm@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/shell/lib/probe.sh | 6 ++++++ tools/perf/tests/shell/lib/probe_vfs_getname.sh | 2 +- tools/perf/tests/shell/probe_vfs_getname.sh | 4 ++++ tools/perf/tests/shell/record+script_probe_vfs_getname.sh | 4 ++++ tools/perf/tests/shell/trace+probe_libc_inet_pton.sh | 3 +++ tools/perf/tests/shell/trace+probe_vfs_getname.sh | 4 ++++ 6 files changed, 22 insertions(+), 1 deletion(-) create mode 100644 tools/perf/tests/shell/lib/probe.sh diff --git a/tools/perf/tests/shell/lib/probe.sh b/tools/perf/tests/shell/lib/probe.sh new file mode 100644 index 000000000000..61da3b2c9bca --- /dev/null +++ b/tools/perf/tests/shell/lib/probe.sh @@ -0,0 +1,6 @@ +# Arnaldo Carvalho de Melo , 2017 + +skip_if_no_perf_probe() { + perf probe |& grep -q 'is not a perf-command' && return 2 + return 0 +} diff --git a/tools/perf/tests/shell/lib/probe_vfs_getname.sh b/tools/perf/tests/shell/lib/probe_vfs_getname.sh index 3eaddf190a30..46c1bb707600 100644 --- a/tools/perf/tests/shell/lib/probe_vfs_getname.sh +++ b/tools/perf/tests/shell/lib/probe_vfs_getname.sh @@ -1,6 +1,6 @@ # Arnaldo Carvalho de Melo , 2017 -perf probe -l | grep -q probe:vfs_getname +perf probe -l |& grep -q probe:vfs_getname had_vfs_getname=$? cleanup_probe_vfs_getname() { diff --git a/tools/perf/tests/shell/probe_vfs_getname.sh b/tools/perf/tests/shell/probe_vfs_getname.sh index c8380137beef..9b7635184dc2 100755 --- a/tools/perf/tests/shell/probe_vfs_getname.sh +++ b/tools/perf/tests/shell/probe_vfs_getname.sh @@ -2,6 +2,10 @@ # # Arnaldo Carvalho de Melo , 2017 +. $(dirname $0)/lib/probe.sh + +skip_if_no_perf_probe || exit 2 + . $(dirname $0)/lib/probe_vfs_getname.sh add_probe_vfs_getname || skip_if_no_debuginfo diff --git a/tools/perf/tests/shell/record+script_probe_vfs_getname.sh b/tools/perf/tests/shell/record+script_probe_vfs_getname.sh index 2725c5db699a..ba29535b8580 100755 --- a/tools/perf/tests/shell/record+script_probe_vfs_getname.sh +++ b/tools/perf/tests/shell/record+script_probe_vfs_getname.sh @@ -7,6 +7,10 @@ # Arnaldo Carvalho de Melo , 2017 +. $(dirname $0)/lib/probe.sh + +skip_if_no_perf_probe || exit 2 + . $(dirname $0)/lib/probe_vfs_getname.sh perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX) diff --git a/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh b/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh index 1b9a276e2ace..99dafad61954 100755 --- a/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh +++ b/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh @@ -8,6 +8,8 @@ # Arnaldo Carvalho de Melo , 2017 +. $(dirname $0)/lib/probe.sh + trace_libc_inet_pton_backtrace() { idx=0 expected[0]="PING.*bytes" @@ -32,6 +34,7 @@ trace_libc_inet_pton_backtrace() { done } +skip_if_no_perf_probe && \ perf probe -q /lib64/libc-*.so inet_pton && \ trace_libc_inet_pton_backtrace err=$? diff --git a/tools/perf/tests/shell/trace+probe_vfs_getname.sh b/tools/perf/tests/shell/trace+probe_vfs_getname.sh index d0fba0c5e354..2e68c5f120da 100755 --- a/tools/perf/tests/shell/trace+probe_vfs_getname.sh +++ b/tools/perf/tests/shell/trace+probe_vfs_getname.sh @@ -8,6 +8,10 @@ # Arnaldo Carvalho de Melo , 2017 +. $(dirname $0)/lib/probe.sh + +skip_if_no_perf_probe || exit 2 + . $(dirname $0)/lib/probe_vfs_getname.sh file=$(mktemp /tmp/temporary_file.XXXXX) From 2b728861a63a7ba6073a476244a83f575864273e Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 14 Aug 2017 14:40:58 -0300 Subject: [PATCH 03/10] perf test shell vfs_getname: Skip for tools built with NO_LIBDWARF=1 If that is the case, or if the required lib is not present, e.g. elfutils-devel in Fedora systems, then just skip the tests requiring DWARF analysis. Before: # rpm -e elfutils-devel # perf test ping vfs_getname 60: Use vfs_getname probe to get syscall args filenames : FAILED! 61: probe libc's inet_pton & backtrace it with ping : Ok 62: Check open filename arg using perf trace + vfs_getname: FAILED! 63: Add vfs_getname probe to get syscall args filenames : FAILED! # After: # perf test vfs_getname 60: Use vfs_getname probe to get syscall args filenames : Skip 62: Check open filename arg using perf trace + vfs_getname: Skip 63: Add vfs_getname probe to get syscall args filenames : Skip # Then, reinstalling elfutils-devel, rebuilding the tool and running again: # perf test vfs_getname 60: Use vfs_getname probe to get syscall args filenames : Ok 62: Check open filename arg using perf trace + vfs_getname: Ok 63: Add vfs_getname probe to get syscall args filenames : Ok # Reported-by: Kim Phillips Cc: Adrian Hunter Cc: David Ahern Cc: Jiri Olsa Cc: Michael Petlan Cc: Namhyung Kim Cc: Thomas Richter Cc: Wang Nan Link: http://lkml.kernel.org/n/tip-d67tvn401fxrwr97pu5ihfb1@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/shell/lib/probe_vfs_getname.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/perf/tests/shell/lib/probe_vfs_getname.sh b/tools/perf/tests/shell/lib/probe_vfs_getname.sh index 46c1bb707600..f832395daad7 100644 --- a/tools/perf/tests/shell/lib/probe_vfs_getname.sh +++ b/tools/perf/tests/shell/lib/probe_vfs_getname.sh @@ -12,12 +12,12 @@ cleanup_probe_vfs_getname() { add_probe_vfs_getname() { local verbose=$1 if [ $had_vfs_getname -eq 1 ] ; then - line=$(perf probe -L getname_flags | egrep 'result.*=.*filename;' | sed -r 's/[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*/\1/') - perf probe $verbose "vfs_getname=getname_flags:${line} pathname=result->name:string" + line=$(perf probe -L getname_flags |& egrep 'result.*=.*filename;' | sed -r 's/[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*/\1/') + perf probe $verbose "vfs_getname=getname_flags:${line} pathname=result->name:string" fi } skip_if_no_debuginfo() { - add_probe_vfs_getname -v 2>&1 | grep -q "^Failed to find the path for kernel" && return 2 + add_probe_vfs_getname -v 2>&1 | egrep -q "^(Failed to find the path for kernel|Debuginfo-analysis is not supported)" && return 2 return 1 } From c8a827285c33e7a19e81dfbff2740e1e67ca42df Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Thu, 3 Aug 2017 11:31:26 +0300 Subject: [PATCH 04/10] perf scripts python: Fix missing call_path_id in export-to-postgresql script The export does not work if only branches are exported because of a missing column in the samples table. Fix by adding the missing call_path_id. Fixes: 3521f3bc9dae ("perf script: Update export-to-postgresql to support callchain export") Signed-off-by: Adrian Hunter Link: http://lkml.kernel.org/r/1501749090-20357-2-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/scripts/python/export-to-postgresql.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py index 7656ff8aa066..f57811443beb 100644 --- a/tools/perf/scripts/python/export-to-postgresql.py +++ b/tools/perf/scripts/python/export-to-postgresql.py @@ -340,7 +340,8 @@ if branches: 'to_sym_offset bigint,' 'to_ip bigint,' 'branch_type integer,' - 'in_tx boolean)') + 'in_tx boolean,' + 'call_path_id bigint)') else: do_query(query, 'CREATE TABLE samples (' 'id bigint NOT NULL,' From 2295e9f850b0efbc57c81fccdd8bd8d26fe10029 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Thu, 3 Aug 2017 11:31:27 +0300 Subject: [PATCH 05/10] perf scripts python: Fix query in call-graph-from-postgresql.py Add a missing space which seemed not to affect PostgreSQL but upsets SQLite. Signed-off-by: Adrian Hunter Link: http://lkml.kernel.org/r/1501749090-20357-3-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/scripts/python/call-graph-from-postgresql.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/scripts/python/call-graph-from-postgresql.py b/tools/perf/scripts/python/call-graph-from-postgresql.py index e78fdc2a5a9d..ed9f7f3ccf22 100644 --- a/tools/perf/scripts/python/call-graph-from-postgresql.py +++ b/tools/perf/scripts/python/call-graph-from-postgresql.py @@ -160,7 +160,7 @@ class TreeItem(): '( SELECT short_name FROM dsos WHERE id = ( SELECT dso_id FROM symbols WHERE id = ( SELECT symbol_id FROM call_paths WHERE id = call_path_id ) ) ), ' '( SELECT ip FROM call_paths where id = call_path_id ) ' 'FROM calls WHERE parent_call_path_id = ' + str(self.call_path_id) + ' AND comm_id = ' + str(self.comm_id) + ' AND thread_id = ' + str(self.thread_id) + - 'ORDER BY call_path_id') + ' ORDER BY call_path_id') if not ret: raise Exception("Query failed: " + query.lastError().text()) last_call_path_id = 0 From 564b9527d1ccf5d581275391e39ac4b1f29f0d08 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Thu, 3 Aug 2017 11:31:28 +0300 Subject: [PATCH 06/10] perf script python: Add support for exporting to sqlite3 Add support for exporting to SQLite 3 the same data as the PostgreSQL export. Committer note: Tested on RHEL 7.4 using the 1.2.2-4el python-pyside packages from EPEL. Signed-off-by: Adrian Hunter Tested-by: Arnaldo Carvalho de Melo Link: http://lkml.kernel.org/r/1501749090-20357-4-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/intel-pt.txt | 6 +- .../python/bin/export-to-sqlite-record | 8 + .../python/bin/export-to-sqlite-report | 29 ++ tools/perf/scripts/python/export-to-sqlite.py | 451 ++++++++++++++++++ 4 files changed, 491 insertions(+), 3 deletions(-) create mode 100644 tools/perf/scripts/python/bin/export-to-sqlite-record create mode 100644 tools/perf/scripts/python/bin/export-to-sqlite-report create mode 100644 tools/perf/scripts/python/export-to-sqlite.py diff --git a/tools/perf/Documentation/intel-pt.txt b/tools/perf/Documentation/intel-pt.txt index 4b6cdbf8f935..8e8ae3ad4cbf 100644 --- a/tools/perf/Documentation/intel-pt.txt +++ b/tools/perf/Documentation/intel-pt.txt @@ -104,9 +104,9 @@ system, asynchronous, interrupt, transaction abort, trace begin, trace end, and in transaction, respectively. While it is possible to create scripts to analyze the data, an alternative -approach is available to export the data to a postgresql database. Refer to -script export-to-postgresql.py for more details, and to script -call-graph-from-postgresql.py for an example of using the database. +approach is available to export the data to a sqlite or postgresql database. +Refer to script export-to-sqlite.py or export-to-postgresql.py for more details, +and to script call-graph-from-postgresql.py for an example of using the database. There is also script intel-pt-events.py which provides an example of how to unpack the raw data for power events and PTWRITE. diff --git a/tools/perf/scripts/python/bin/export-to-sqlite-record b/tools/perf/scripts/python/bin/export-to-sqlite-record new file mode 100644 index 000000000000..070204fd6d00 --- /dev/null +++ b/tools/perf/scripts/python/bin/export-to-sqlite-record @@ -0,0 +1,8 @@ +#!/bin/bash + +# +# export perf data to a sqlite3 database. Can cover +# perf ip samples (excluding the tracepoints). No special +# record requirements, just record what you want to export. +# +perf record $@ diff --git a/tools/perf/scripts/python/bin/export-to-sqlite-report b/tools/perf/scripts/python/bin/export-to-sqlite-report new file mode 100644 index 000000000000..5ff6033e70ba --- /dev/null +++ b/tools/perf/scripts/python/bin/export-to-sqlite-report @@ -0,0 +1,29 @@ +#!/bin/bash +# description: export perf data to a sqlite3 database +# args: [database name] [columns] [calls] +n_args=0 +for i in "$@" +do + if expr match "$i" "-" > /dev/null ; then + break + fi + n_args=$(( $n_args + 1 )) +done +if [ "$n_args" -gt 3 ] ; then + echo "usage: export-to-sqlite-report [database name] [columns] [calls]" + exit +fi +if [ "$n_args" -gt 2 ] ; then + dbname=$1 + columns=$2 + calls=$3 + shift 3 +elif [ "$n_args" -gt 1 ] ; then + dbname=$1 + columns=$2 + shift 2 +elif [ "$n_args" -gt 0 ] ; then + dbname=$1 + shift +fi +perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/export-to-sqlite.py $dbname $columns $calls diff --git a/tools/perf/scripts/python/export-to-sqlite.py b/tools/perf/scripts/python/export-to-sqlite.py new file mode 100644 index 000000000000..f827bf77e9d2 --- /dev/null +++ b/tools/perf/scripts/python/export-to-sqlite.py @@ -0,0 +1,451 @@ +# export-to-sqlite.py: export perf data to a sqlite3 database +# Copyright (c) 2017, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. + +import os +import sys +import struct +import datetime + +# To use this script you will need to have installed package python-pyside which +# provides LGPL-licensed Python bindings for Qt. You will also need the package +# libqt4-sql-sqlite for Qt sqlite3 support. +# +# An example of using this script with Intel PT: +# +# $ perf record -e intel_pt//u ls +# $ perf script -s ~/libexec/perf-core/scripts/python/export-to-sqlite.py pt_example branches calls +# 2017-07-31 14:26:07.326913 Creating database... +# 2017-07-31 14:26:07.538097 Writing records... +# 2017-07-31 14:26:09.889292 Adding indexes +# 2017-07-31 14:26:09.958746 Done +# +# To browse the database, sqlite3 can be used e.g. +# +# $ sqlite3 pt_example +# sqlite> .header on +# sqlite> select * from samples_view where id < 10; +# sqlite> .mode column +# sqlite> select * from samples_view where id < 10; +# sqlite> .tables +# sqlite> .schema samples_view +# sqlite> .quit +# +# An example of using the database is provided by the script +# call-graph-from-sql.py. Refer to that script for details. +# +# The database structure is practically the same as created by the script +# export-to-postgresql.py. Refer to that script for details. A notable +# difference is the 'transaction' column of the 'samples' table which is +# renamed 'transaction_' in sqlite because 'transaction' is a reserved word. + +from PySide.QtSql import * + +sys.path.append(os.environ['PERF_EXEC_PATH'] + \ + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') + +# These perf imports are not used at present +#from perf_trace_context import * +#from Core import * + +perf_db_export_mode = True +perf_db_export_calls = False +perf_db_export_callchains = False + +def usage(): + print >> sys.stderr, "Usage is: export-to-sqlite.py [] [] []" + print >> sys.stderr, "where: columns 'all' or 'branches'" + print >> sys.stderr, " calls 'calls' => create calls and call_paths table" + print >> sys.stderr, " callchains 'callchains' => create call_paths table" + raise Exception("Too few arguments") + +if (len(sys.argv) < 2): + usage() + +dbname = sys.argv[1] + +if (len(sys.argv) >= 3): + columns = sys.argv[2] +else: + columns = "all" + +if columns not in ("all", "branches"): + usage() + +branches = (columns == "branches") + +for i in range(3,len(sys.argv)): + if (sys.argv[i] == "calls"): + perf_db_export_calls = True + elif (sys.argv[i] == "callchains"): + perf_db_export_callchains = True + else: + usage() + +def do_query(q, s): + if (q.exec_(s)): + return + raise Exception("Query failed: " + q.lastError().text()) + +def do_query_(q): + if (q.exec_()): + return + raise Exception("Query failed: " + q.lastError().text()) + +print datetime.datetime.today(), "Creating database..." + +db_exists = False +try: + f = open(dbname) + f.close() + db_exists = True +except: + pass + +if db_exists: + raise Exception(dbname + " already exists") + +db = QSqlDatabase.addDatabase('QSQLITE') +db.setDatabaseName(dbname) +db.open() + +query = QSqlQuery(db) + +do_query(query, 'PRAGMA journal_mode = OFF') +do_query(query, 'BEGIN TRANSACTION') + +do_query(query, 'CREATE TABLE selected_events (' + 'id integer NOT NULL PRIMARY KEY,' + 'name varchar(80))') +do_query(query, 'CREATE TABLE machines (' + 'id integer NOT NULL PRIMARY KEY,' + 'pid integer,' + 'root_dir varchar(4096))') +do_query(query, 'CREATE TABLE threads (' + 'id integer NOT NULL PRIMARY KEY,' + 'machine_id bigint,' + 'process_id bigint,' + 'pid integer,' + 'tid integer)') +do_query(query, 'CREATE TABLE comms (' + 'id integer NOT NULL PRIMARY KEY,' + 'comm varchar(16))') +do_query(query, 'CREATE TABLE comm_threads (' + 'id integer NOT NULL PRIMARY KEY,' + 'comm_id bigint,' + 'thread_id bigint)') +do_query(query, 'CREATE TABLE dsos (' + 'id integer NOT NULL PRIMARY KEY,' + 'machine_id bigint,' + 'short_name varchar(256),' + 'long_name varchar(4096),' + 'build_id varchar(64))') +do_query(query, 'CREATE TABLE symbols (' + 'id integer NOT NULL PRIMARY KEY,' + 'dso_id bigint,' + 'sym_start bigint,' + 'sym_end bigint,' + 'binding integer,' + 'name varchar(2048))') +do_query(query, 'CREATE TABLE branch_types (' + 'id integer NOT NULL PRIMARY KEY,' + 'name varchar(80))') + +if branches: + do_query(query, 'CREATE TABLE samples (' + 'id integer NOT NULL PRIMARY KEY,' + 'evsel_id bigint,' + 'machine_id bigint,' + 'thread_id bigint,' + 'comm_id bigint,' + 'dso_id bigint,' + 'symbol_id bigint,' + 'sym_offset bigint,' + 'ip bigint,' + 'time bigint,' + 'cpu integer,' + 'to_dso_id bigint,' + 'to_symbol_id bigint,' + 'to_sym_offset bigint,' + 'to_ip bigint,' + 'branch_type integer,' + 'in_tx boolean,' + 'call_path_id bigint)') +else: + do_query(query, 'CREATE TABLE samples (' + 'id integer NOT NULL PRIMARY KEY,' + 'evsel_id bigint,' + 'machine_id bigint,' + 'thread_id bigint,' + 'comm_id bigint,' + 'dso_id bigint,' + 'symbol_id bigint,' + 'sym_offset bigint,' + 'ip bigint,' + 'time bigint,' + 'cpu integer,' + 'to_dso_id bigint,' + 'to_symbol_id bigint,' + 'to_sym_offset bigint,' + 'to_ip bigint,' + 'period bigint,' + 'weight bigint,' + 'transaction_ bigint,' + 'data_src bigint,' + 'branch_type integer,' + 'in_tx boolean,' + 'call_path_id bigint)') + +if perf_db_export_calls or perf_db_export_callchains: + do_query(query, 'CREATE TABLE call_paths (' + 'id integer NOT NULL PRIMARY KEY,' + 'parent_id bigint,' + 'symbol_id bigint,' + 'ip bigint)') +if perf_db_export_calls: + do_query(query, 'CREATE TABLE calls (' + 'id integer NOT NULL PRIMARY KEY,' + 'thread_id bigint,' + 'comm_id bigint,' + 'call_path_id bigint,' + 'call_time bigint,' + 'return_time bigint,' + 'branch_count bigint,' + 'call_id bigint,' + 'return_id bigint,' + 'parent_call_path_id bigint,' + 'flags integer)') + +# printf was added to sqlite in version 3.8.3 +sqlite_has_printf = False +try: + do_query(query, 'SELECT printf("") FROM machines') + sqlite_has_printf = True +except: + pass + +def emit_to_hex(x): + if sqlite_has_printf: + return 'printf("%x", ' + x + ')' + else: + return x + +do_query(query, 'CREATE VIEW machines_view AS ' + 'SELECT ' + 'id,' + 'pid,' + 'root_dir,' + 'CASE WHEN id=0 THEN \'unknown\' WHEN pid=-1 THEN \'host\' ELSE \'guest\' END AS host_or_guest' + ' FROM machines') + +do_query(query, 'CREATE VIEW dsos_view AS ' + 'SELECT ' + 'id,' + 'machine_id,' + '(SELECT host_or_guest FROM machines_view WHERE id = machine_id) AS host_or_guest,' + 'short_name,' + 'long_name,' + 'build_id' + ' FROM dsos') + +do_query(query, 'CREATE VIEW symbols_view AS ' + 'SELECT ' + 'id,' + 'name,' + '(SELECT short_name FROM dsos WHERE id=dso_id) AS dso,' + 'dso_id,' + 'sym_start,' + 'sym_end,' + 'CASE WHEN binding=0 THEN \'local\' WHEN binding=1 THEN \'global\' ELSE \'weak\' END AS binding' + ' FROM symbols') + +do_query(query, 'CREATE VIEW threads_view AS ' + 'SELECT ' + 'id,' + 'machine_id,' + '(SELECT host_or_guest FROM machines_view WHERE id = machine_id) AS host_or_guest,' + 'process_id,' + 'pid,' + 'tid' + ' FROM threads') + +do_query(query, 'CREATE VIEW comm_threads_view AS ' + 'SELECT ' + 'comm_id,' + '(SELECT comm FROM comms WHERE id = comm_id) AS command,' + 'thread_id,' + '(SELECT pid FROM threads WHERE id = thread_id) AS pid,' + '(SELECT tid FROM threads WHERE id = thread_id) AS tid' + ' FROM comm_threads') + +if perf_db_export_calls or perf_db_export_callchains: + do_query(query, 'CREATE VIEW call_paths_view AS ' + 'SELECT ' + 'c.id,' + + emit_to_hex('c.ip') + ' AS ip,' + 'c.symbol_id,' + '(SELECT name FROM symbols WHERE id = c.symbol_id) AS symbol,' + '(SELECT dso_id FROM symbols WHERE id = c.symbol_id) AS dso_id,' + '(SELECT dso FROM symbols_view WHERE id = c.symbol_id) AS dso_short_name,' + 'c.parent_id,' + + emit_to_hex('p.ip') + ' AS parent_ip,' + 'p.symbol_id AS parent_symbol_id,' + '(SELECT name FROM symbols WHERE id = p.symbol_id) AS parent_symbol,' + '(SELECT dso_id FROM symbols WHERE id = p.symbol_id) AS parent_dso_id,' + '(SELECT dso FROM symbols_view WHERE id = p.symbol_id) AS parent_dso_short_name' + ' FROM call_paths c INNER JOIN call_paths p ON p.id = c.parent_id') +if perf_db_export_calls: + do_query(query, 'CREATE VIEW calls_view AS ' + 'SELECT ' + 'calls.id,' + 'thread_id,' + '(SELECT pid FROM threads WHERE id = thread_id) AS pid,' + '(SELECT tid FROM threads WHERE id = thread_id) AS tid,' + '(SELECT comm FROM comms WHERE id = comm_id) AS command,' + 'call_path_id,' + + emit_to_hex('ip') + ' AS ip,' + 'symbol_id,' + '(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,' + 'call_time,' + 'return_time,' + 'return_time - call_time AS elapsed_time,' + 'branch_count,' + 'call_id,' + 'return_id,' + 'CASE WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' ELSE \'\' END AS flags,' + 'parent_call_path_id' + ' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id') + +do_query(query, 'CREATE VIEW samples_view AS ' + 'SELECT ' + 'id,' + 'time,' + 'cpu,' + '(SELECT pid FROM threads WHERE id = thread_id) AS pid,' + '(SELECT tid FROM threads WHERE id = thread_id) AS tid,' + '(SELECT comm FROM comms WHERE id = comm_id) AS command,' + '(SELECT name FROM selected_events WHERE id = evsel_id) AS event,' + + emit_to_hex('ip') + ' AS ip_hex,' + '(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,' + 'sym_offset,' + '(SELECT short_name FROM dsos WHERE id = dso_id) AS dso_short_name,' + + emit_to_hex('to_ip') + ' AS to_ip_hex,' + '(SELECT name FROM symbols WHERE id = to_symbol_id) AS to_symbol,' + 'to_sym_offset,' + '(SELECT short_name FROM dsos WHERE id = to_dso_id) AS to_dso_short_name,' + '(SELECT name FROM branch_types WHERE id = branch_type) AS branch_type_name,' + 'in_tx' + ' FROM samples') + +do_query(query, 'END TRANSACTION') + +evsel_query = QSqlQuery(db) +evsel_query.prepare("INSERT INTO selected_events VALUES (?, ?)") +machine_query = QSqlQuery(db) +machine_query.prepare("INSERT INTO machines VALUES (?, ?, ?)") +thread_query = QSqlQuery(db) +thread_query.prepare("INSERT INTO threads VALUES (?, ?, ?, ?, ?)") +comm_query = QSqlQuery(db) +comm_query.prepare("INSERT INTO comms VALUES (?, ?)") +comm_thread_query = QSqlQuery(db) +comm_thread_query.prepare("INSERT INTO comm_threads VALUES (?, ?, ?)") +dso_query = QSqlQuery(db) +dso_query.prepare("INSERT INTO dsos VALUES (?, ?, ?, ?, ?)") +symbol_query = QSqlQuery(db) +symbol_query.prepare("INSERT INTO symbols VALUES (?, ?, ?, ?, ?, ?)") +branch_type_query = QSqlQuery(db) +branch_type_query.prepare("INSERT INTO branch_types VALUES (?, ?)") +sample_query = QSqlQuery(db) +if branches: + sample_query.prepare("INSERT INTO samples VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)") +else: + sample_query.prepare("INSERT INTO samples VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)") +if perf_db_export_calls or perf_db_export_callchains: + call_path_query = QSqlQuery(db) + call_path_query.prepare("INSERT INTO call_paths VALUES (?, ?, ?, ?)") +if perf_db_export_calls: + call_query = QSqlQuery(db) + call_query.prepare("INSERT INTO calls VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)") + +def trace_begin(): + print datetime.datetime.today(), "Writing records..." + do_query(query, 'BEGIN TRANSACTION') + # id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs + evsel_table(0, "unknown") + machine_table(0, 0, "unknown") + thread_table(0, 0, 0, -1, -1) + comm_table(0, "unknown") + dso_table(0, 0, "unknown", "unknown", "") + symbol_table(0, 0, 0, 0, 0, "unknown") + sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) + if perf_db_export_calls or perf_db_export_callchains: + call_path_table(0, 0, 0, 0) + +unhandled_count = 0 + +def trace_end(): + do_query(query, 'END TRANSACTION') + + print datetime.datetime.today(), "Adding indexes" + if perf_db_export_calls: + do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)') + + if (unhandled_count): + print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events" + print datetime.datetime.today(), "Done" + +def trace_unhandled(event_name, context, event_fields_dict): + global unhandled_count + unhandled_count += 1 + +def sched__sched_switch(*x): + pass + +def bind_exec(q, n, x): + for xx in x[0:n]: + q.addBindValue(str(xx)) + do_query_(q) + +def evsel_table(*x): + bind_exec(evsel_query, 2, x) + +def machine_table(*x): + bind_exec(machine_query, 3, x) + +def thread_table(*x): + bind_exec(thread_query, 5, x) + +def comm_table(*x): + bind_exec(comm_query, 2, x) + +def comm_thread_table(*x): + bind_exec(comm_thread_query, 3, x) + +def dso_table(*x): + bind_exec(dso_query, 5, x) + +def symbol_table(*x): + bind_exec(symbol_query, 6, x) + +def branch_type_table(*x): + bind_exec(branch_type_query, 2, x) + +def sample_table(*x): + if branches: + bind_exec(sample_query, 18, x) + else: + bind_exec(sample_query, 22, x) + +def call_path_table(*x): + bind_exec(call_path_query, 4, x) + +def call_return_table(*x): + bind_exec(call_query, 11, x) From 69e6e410f1a1e69cb656e8ebddaae0ba2138b235 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Thu, 3 Aug 2017 11:31:29 +0300 Subject: [PATCH 07/10] perf script python: Rename call-graph-from-postgresql.py to call-graph-from-sql.py Rename call-graph-from-postgresql.py to call-graph-from-sql.py in preparation for adding support to it for SQLite 3. Signed-off-by: Adrian Hunter Link: http://lkml.kernel.org/r/1501749090-20357-5-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/intel-pt.txt | 2 +- ...graph-from-postgresql.py => call-graph-from-sql.py} | 10 +++++----- tools/perf/scripts/python/export-to-postgresql.py | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) rename tools/perf/scripts/python/{call-graph-from-postgresql.py => call-graph-from-sql.py} (96%) diff --git a/tools/perf/Documentation/intel-pt.txt b/tools/perf/Documentation/intel-pt.txt index 8e8ae3ad4cbf..ab1b0825130a 100644 --- a/tools/perf/Documentation/intel-pt.txt +++ b/tools/perf/Documentation/intel-pt.txt @@ -106,7 +106,7 @@ in transaction, respectively. While it is possible to create scripts to analyze the data, an alternative approach is available to export the data to a sqlite or postgresql database. Refer to script export-to-sqlite.py or export-to-postgresql.py for more details, -and to script call-graph-from-postgresql.py for an example of using the database. +and to script call-graph-from-sql.py for an example of using the database. There is also script intel-pt-events.py which provides an example of how to unpack the raw data for power events and PTWRITE. diff --git a/tools/perf/scripts/python/call-graph-from-postgresql.py b/tools/perf/scripts/python/call-graph-from-sql.py similarity index 96% rename from tools/perf/scripts/python/call-graph-from-postgresql.py rename to tools/perf/scripts/python/call-graph-from-sql.py index ed9f7f3ccf22..f18406d3faf7 100644 --- a/tools/perf/scripts/python/call-graph-from-postgresql.py +++ b/tools/perf/scripts/python/call-graph-from-sql.py @@ -1,6 +1,6 @@ #!/usr/bin/python2 -# call-graph-from-postgresql.py: create call-graph from postgresql database -# Copyright (c) 2014, Intel Corporation. +# call-graph-from-sql.py: create call-graph from postgresql database +# Copyright (c) 2014-2017, Intel Corporation. # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, @@ -17,12 +17,12 @@ # Following on from the example in the export-to-postgresql.py script, a # call-graph can be displayed for the pt_example database like this: # -# python tools/perf/scripts/python/call-graph-from-postgresql.py pt_example +# python tools/perf/scripts/python/call-graph-from-sql.py pt_example # # Note this script supports connecting to remote databases by setting hostname, # port, username, password, and dbname e.g. # -# python tools/perf/scripts/python/call-graph-from-postgresql.py "hostname=myhost username=myuser password=mypassword dbname=pt_example" +# python tools/perf/scripts/python/call-graph-from-sql.py "hostname=myhost username=myuser password=mypassword dbname=pt_example" # # The result is a GUI window with a tree representing a context-sensitive # call-graph. Expanding a couple of levels of the tree and adjusting column @@ -291,7 +291,7 @@ class MainWindow(QMainWindow): if __name__ == '__main__': if (len(sys.argv) < 2): - print >> sys.stderr, "Usage is: call-graph-from-postgresql.py " + print >> sys.stderr, "Usage is: call-graph-from-sql.py " raise Exception("Too few arguments") dbname = sys.argv[1] diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py index f57811443beb..efcaf6cac2eb 100644 --- a/tools/perf/scripts/python/export-to-postgresql.py +++ b/tools/perf/scripts/python/export-to-postgresql.py @@ -59,7 +59,7 @@ import datetime # pt_example=# \q # # An example of using the database is provided by the script -# call-graph-from-postgresql.py. Refer to that script for details. +# call-graph-from-sql.py. Refer to that script for details. # # Tables: # From 1fe03b5f2d70b48c4a10785edf2678ff05506e59 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Thu, 3 Aug 2017 11:31:30 +0300 Subject: [PATCH 08/10] perf script python: Add support for sqlite3 to call-graph-from-sql.py Add support for SQLite 3 to the call-graph-from-sql.py script. The SQL statements work as is, so just detect the database type by checking if the SQLite 3 file exists. Committer notes: Tested collecting the PT data on a RHEL7.4, generating the SQLite3 database there and then moving it to a Fedora 26 system where the call-graph-from-sql.py script was run, using python-pyside version 1.2.2-7fc26 to see the callgraphs using Qt4. Signed-off-by: Adrian Hunter Tested-by: Arnaldo Carvalho de Melo Link: http://lkml.kernel.org/r/1501749090-20357-6-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo --- .../scripts/python/call-graph-from-sql.py | 58 +++++++++++-------- 1 file changed, 35 insertions(+), 23 deletions(-) diff --git a/tools/perf/scripts/python/call-graph-from-sql.py b/tools/perf/scripts/python/call-graph-from-sql.py index f18406d3faf7..b494a67a1c67 100644 --- a/tools/perf/scripts/python/call-graph-from-sql.py +++ b/tools/perf/scripts/python/call-graph-from-sql.py @@ -1,5 +1,5 @@ #!/usr/bin/python2 -# call-graph-from-sql.py: create call-graph from postgresql database +# call-graph-from-sql.py: create call-graph from sql database # Copyright (c) 2014-2017, Intel Corporation. # # This program is free software; you can redistribute it and/or modify it @@ -11,16 +11,17 @@ # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. -# To use this script you will need to have exported data using the -# export-to-postgresql.py script. Refer to that script for details. +# To use this script you will need to have exported data using either the +# export-to-sqlite.py or the export-to-postgresql.py script. Refer to those +# scripts for details. # -# Following on from the example in the export-to-postgresql.py script, a +# Following on from the example in the export scripts, a # call-graph can be displayed for the pt_example database like this: # # python tools/perf/scripts/python/call-graph-from-sql.py pt_example # -# Note this script supports connecting to remote databases by setting hostname, -# port, username, password, and dbname e.g. +# Note that for PostgreSQL, this script supports connecting to remote databases +# by setting hostname, port, username, password, and dbname e.g. # # python tools/perf/scripts/python/call-graph-from-sql.py "hostname=myhost username=myuser password=mypassword dbname=pt_example" # @@ -296,24 +297,35 @@ if __name__ == '__main__': dbname = sys.argv[1] - db = QSqlDatabase.addDatabase('QPSQL') + is_sqlite3 = False + try: + f = open(dbname) + if f.read(15) == "SQLite format 3": + is_sqlite3 = True + f.close() + except: + pass - opts = dbname.split() - for opt in opts: - if '=' in opt: - opt = opt.split('=') - if opt[0] == 'hostname': - db.setHostName(opt[1]) - elif opt[0] == 'port': - db.setPort(int(opt[1])) - elif opt[0] == 'username': - db.setUserName(opt[1]) - elif opt[0] == 'password': - db.setPassword(opt[1]) - elif opt[0] == 'dbname': - dbname = opt[1] - else: - dbname = opt + if is_sqlite3: + db = QSqlDatabase.addDatabase('QSQLITE') + else: + db = QSqlDatabase.addDatabase('QPSQL') + opts = dbname.split() + for opt in opts: + if '=' in opt: + opt = opt.split('=') + if opt[0] == 'hostname': + db.setHostName(opt[1]) + elif opt[0] == 'port': + db.setPort(int(opt[1])) + elif opt[0] == 'username': + db.setUserName(opt[1]) + elif opt[0] == 'password': + db.setPassword(opt[1]) + elif opt[0] == 'dbname': + dbname = opt[1] + else: + dbname = opt db.setDatabaseName(dbname) if not db.open(): From db26984a363e8b8e35783c402978e8acdf9041a5 Mon Sep 17 00:00:00 2001 From: Wang Nan Date: Tue, 15 Aug 2017 11:21:59 +0200 Subject: [PATCH 09/10] perf bpf: Fix endianness problem when loading parameters in prologue Perf's BPF prologue generator unconditionally fetches 8 bytes for function parameters, which causes problems on big endian machines. Thomas gives a detailed analysis for this problem: http://lkml.kernel.org/r/968ebda5-abe4-8830-8d69-49f62529d151@linux.vnet.ibm.com ---- 8< ---- I investigated perf test BPF for s390x and have a question regarding the 38.3 subtest (bpf-prologue test) which fails on s390x. When I turn on trace_printk in tests/bpf-script-test-prologue.c I see this output in /sys/kernel/debug/tracing/trace: [root@s8360047 perf]# cat /sys/kernel/debug/tracing/trace perf-30229 [000] d..2 170161.535791: : f_mode 2001d00000000 offset:0 orig:0 perf-30229 [000] d..2 170161.535809: : f_mode 6001f00000000 offset:0 orig:0 perf-30229 [000] d..2 170161.535815: : f_mode 6001f00000000 offset:1 orig:0 perf-30229 [000] d..2 170161.535819: : f_mode 2001d00000000 offset:1 orig:0 perf-30229 [000] d..2 170161.535822: : f_mode 2001d00000000 offset:2 orig:1 perf-30229 [000] d..2 170161.535825: : f_mode 6001f00000000 offset:2 orig:1 perf-30229 [000] d..2 170161.535828: : f_mode 6001f00000000 offset:3 orig:1 perf-30229 [000] d..2 170161.535832: : f_mode 2001d00000000 offset:3 orig:1 perf-30229 [000] d..2 170161.535835: : f_mode 2001d00000000 offset:4 orig:0 perf-30229 [000] d..2 170161.535841: : f_mode 6001f00000000 offset:4 orig:0 [...] There are 3 parameters the eBPF program tests/bpf-script-test-prologue.c accesses: f_mode (member of struct file at offset 140) offset and orig. They are parameters of the lseek() system call triggered in this test case in function llseek_loop(). What is really strange is the value of f_mode. It is an 8 byte value, whereas in the probe event it is defined as a 4 byte value. The lower 4 bytes are all zero and do not belong to member f_mode. The correct value should be 2001d for read-only and 6001f for read-write open mode. Here is the output of the 'perf test -vv bpf' trace: Try to find probe point from debuginfo. Matched function: null_lseek [2d9310d] Probe point found: null_lseek+0 Searching 'file' variable in context. Converting variable file into trace event. converting f_mode in file f_mode type is unsigned int. Opening /sys/kernel/debug/tracing//README write=0 Searching 'offset' variable in context. Converting variable offset into trace event. offset type is long long int. Searching 'orig' variable in context. Converting variable orig into trace event. orig type is int. Found 1 probe_trace_events. Opening /sys/kernel/debug/tracing//kprobe_events write=1 Writing event: p:perf_bpf_probe/func _text+8794224 f_mode=+140(%r2):x32 ---- 8< ---- This patch parses the type of each argument and converts data from memory to expected type. Now the test runs successfully on 4.13.0-rc5: [root@s8360046 perf]# ./perf test bpf 38: BPF filter : 38.1: Basic BPF filtering : Ok 38.2: BPF pinning : Ok 38.3: BPF prologue generation : Ok 38.4: BPF relocation checker : Ok [root@s8360046 perf]# Signed-off-by: Wang Nan Cc: Hendrik Brueckner Link: http://lkml.kernel.org/r/20170815092159.31912-1-tmricht@linux.vnet.ibm.com Signed-off-by: Thomas-Mich Richter Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/bpf-script-test-prologue.c | 4 +- tools/perf/util/bpf-prologue.c | 49 ++++++++++++++++++++- 2 files changed, 50 insertions(+), 3 deletions(-) diff --git a/tools/perf/tests/bpf-script-test-prologue.c b/tools/perf/tests/bpf-script-test-prologue.c index b4ebc75e25ae..43f1e16486f4 100644 --- a/tools/perf/tests/bpf-script-test-prologue.c +++ b/tools/perf/tests/bpf-script-test-prologue.c @@ -26,9 +26,11 @@ static void (*bpf_trace_printk)(const char *fmt, int fmt_size, ...) = (void *) 6; SEC("func=null_lseek file->f_mode offset orig") -int bpf_func__null_lseek(void *ctx, int err, unsigned long f_mode, +int bpf_func__null_lseek(void *ctx, int err, unsigned long _f_mode, unsigned long offset, unsigned long orig) { + fmode_t f_mode = (fmode_t)_f_mode; + if (err) return 0; if (f_mode & FMODE_WRITE) diff --git a/tools/perf/util/bpf-prologue.c b/tools/perf/util/bpf-prologue.c index 1356220a9f1b..827f9140f3b8 100644 --- a/tools/perf/util/bpf-prologue.c +++ b/tools/perf/util/bpf-prologue.c @@ -58,6 +58,46 @@ check_pos(struct bpf_insn_pos *pos) return 0; } +/* + * Convert type string (u8/u16/u32/u64/s8/s16/s32/s64 ..., see + * Documentation/trace/kprobetrace.txt) to size field of BPF_LDX_MEM + * instruction (BPF_{B,H,W,DW}). + */ +static int +argtype_to_ldx_size(const char *type) +{ + int arg_size = type ? atoi(&type[1]) : 64; + + switch (arg_size) { + case 8: + return BPF_B; + case 16: + return BPF_H; + case 32: + return BPF_W; + case 64: + default: + return BPF_DW; + } +} + +static const char * +insn_sz_to_str(int insn_sz) +{ + switch (insn_sz) { + case BPF_B: + return "BPF_B"; + case BPF_H: + return "BPF_H"; + case BPF_W: + return "BPF_W"; + case BPF_DW: + return "BPF_DW"; + default: + return "UNKNOWN"; + } +} + /* Give it a shorter name */ #define ins(i, p) append_insn((i), (p)) @@ -258,9 +298,14 @@ gen_prologue_slowpath(struct bpf_insn_pos *pos, } /* Final pass: read to registers */ - for (i = 0; i < nargs; i++) - ins(BPF_LDX_MEM(BPF_DW, BPF_PROLOGUE_START_ARG_REG + i, + for (i = 0; i < nargs; i++) { + int insn_sz = (args[i].ref) ? argtype_to_ldx_size(args[i].type) : BPF_DW; + + pr_debug("prologue: load arg %d, insn_sz is %s\n", + i, insn_sz_to_str(insn_sz)); + ins(BPF_LDX_MEM(insn_sz, BPF_PROLOGUE_START_ARG_REG + i, BPF_REG_FP, -BPF_REG_SIZE * (i + 1)), pos); + } ins(BPF_JMP_IMM(BPF_JA, BPF_REG_0, 0, JMP_TO_SUCCESS_CODE), pos); From 35435cd06081d7db96bc617b65ba556f8e24340e Mon Sep 17 00:00:00 2001 From: Kim Phillips Date: Wed, 16 Aug 2017 16:20:40 -0300 Subject: [PATCH 10/10] perf test shell: Replace '|&' with '2>&1 |' to work with more shells Since we do not specify bash (and/or zsh) as a requirement, use the standard error redirection that is more widely supported. Signed-off-by: Kim Phillips Link: http://lkml.kernel.org/n/tip-ji5mhn3iilgch3eaay6csr6z@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/shell/lib/probe.sh | 2 +- tools/perf/tests/shell/lib/probe_vfs_getname.sh | 4 ++-- tools/perf/tests/shell/trace+probe_libc_inet_pton.sh | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/perf/tests/shell/lib/probe.sh b/tools/perf/tests/shell/lib/probe.sh index 61da3b2c9bca..6293cc660947 100644 --- a/tools/perf/tests/shell/lib/probe.sh +++ b/tools/perf/tests/shell/lib/probe.sh @@ -1,6 +1,6 @@ # Arnaldo Carvalho de Melo , 2017 skip_if_no_perf_probe() { - perf probe |& grep -q 'is not a perf-command' && return 2 + perf probe 2>&1 | grep -q 'is not a perf-command' && return 2 return 0 } diff --git a/tools/perf/tests/shell/lib/probe_vfs_getname.sh b/tools/perf/tests/shell/lib/probe_vfs_getname.sh index f832395daad7..30a950c9d407 100644 --- a/tools/perf/tests/shell/lib/probe_vfs_getname.sh +++ b/tools/perf/tests/shell/lib/probe_vfs_getname.sh @@ -1,6 +1,6 @@ # Arnaldo Carvalho de Melo , 2017 -perf probe -l |& grep -q probe:vfs_getname +perf probe -l 2>&1 | grep -q probe:vfs_getname had_vfs_getname=$? cleanup_probe_vfs_getname() { @@ -12,7 +12,7 @@ cleanup_probe_vfs_getname() { add_probe_vfs_getname() { local verbose=$1 if [ $had_vfs_getname -eq 1 ] ; then - line=$(perf probe -L getname_flags |& egrep 'result.*=.*filename;' | sed -r 's/[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*/\1/') + line=$(perf probe -L getname_flags 2>&1 | egrep 'result.*=.*filename;' | sed -r 's/[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*/\1/') perf probe $verbose "vfs_getname=getname_flags:${line} pathname=result->name:string" fi } diff --git a/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh b/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh index 99dafad61954..462fc755092e 100755 --- a/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh +++ b/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh @@ -22,7 +22,7 @@ trace_libc_inet_pton_backtrace() { expected[7]="getaddrinfo[[:space:]]\(/usr/lib.*/libc-[0-9]+\.[0-9]+\.so\)$" expected[8]=".*\(.*/bin/ping.*\)$" - perf trace --no-syscalls -e probe_libc:inet_pton/max-stack=3/ ping -6 -c 1 ::1 |& grep -v ^$ | while read line ; do + perf trace --no-syscalls -e probe_libc:inet_pton/max-stack=3/ ping -6 -c 1 ::1 2>&1 | grep -v ^$ | while read line ; do echo $line echo "$line" | egrep -q "${expected[$idx]}" if [ $? -ne 0 ] ; then