From 3cd3216dbb421244b96b992f193e778a3baa2220 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Fri, 12 Apr 2019 14:38:27 +0300 Subject: perf scripts python: export-to-postgresql.py: Add support for pyside2 pyside2 is the future for pyside support. Note pyside use Qt4 whereas pyside2 uses Qt5. Signed-off-by: Adrian Hunter Cc: Jiri Olsa Link: http://lkml.kernel.org/r/20190412113830.4126-6-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/scripts/python/export-to-postgresql.py | 43 ++++++++++++++++++----- 1 file changed, 34 insertions(+), 9 deletions(-) (limited to 'tools/perf/scripts/python/export-to-postgresql.py') diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py index c3eae1d77d36..b2f481b0d28d 100644 --- a/tools/perf/scripts/python/export-to-postgresql.py +++ b/tools/perf/scripts/python/export-to-postgresql.py @@ -27,18 +27,31 @@ import datetime # # fedora: # -# $ sudo yum install postgresql postgresql-server python-pyside qt-postgresql +# $ sudo yum install postgresql postgresql-server qt-postgresql # $ sudo su - postgres -c initdb # $ sudo service postgresql start # $ sudo su - postgres -# $ createuser +# $ createuser -s # Older versions may not support -s, in which case answer the prompt below: # Shall the new role be a superuser? (y/n) y +# $ sudo yum install python-pyside +# +# Alternately, to use Python3 and/or pyside 2, one of the following: +# $ sudo yum install python3-pyside +# $ pip install --user PySide2 +# $ pip3 install --user PySide2 # # ubuntu: # -# $ sudo apt-get install postgresql python-pyside.qtsql libqt4-sql-psql +# $ sudo apt-get install postgresql # $ sudo su - postgres # $ createuser -s +# $ sudo apt-get install python-pyside.qtsql libqt4-sql-psql +# +# Alternately, to use Python3 and/or pyside 2, one of the following: +# +# $ sudo apt-get install python3-pyside.qtsql libqt4-sql-psql +# $ sudo apt-get install python-pyside2.qtsql libqt5sql5-psql +# $ sudo apt-get install python3-pyside2.qtsql libqt5sql5-psql # # An example of using this script with Intel PT: # @@ -199,7 +212,16 @@ import datetime # print "{0:>6} {1:>10} {2:>9} {3:<30} {4:>6} {5:<30}".format(query.value(0), query.value(1), query.value(2), query.value(3), query.value(4), query.value(5)) # call_path_id = query.value(6) -from PySide.QtSql import * +pyside_version_1 = True +if not "pyside-version-1" in sys.argv: + try: + from PySide2.QtSql import * + pyside_version_1 = False + except: + pass + +if pyside_version_1: + from PySide.QtSql import * if sys.version_info < (3, 0): def toserverstr(str): @@ -255,11 +277,12 @@ def printdate(*args, **kw_args): print(datetime.datetime.today(), *args, sep=' ', **kw_args) def usage(): - printerr("Usage is: export-to-postgresql.py [] [] []") - printerr("where: columns 'all' or 'branches'") - printerr(" calls 'calls' => create calls and call_paths table") - printerr(" callchains 'callchains' => create call_paths table") - raise Exception("Too few arguments") + printerr("Usage is: export-to-postgresql.py [] [] [] []"); + printerr("where: columns 'all' or 'branches'"); + printerr(" calls 'calls' => create calls and call_paths table"); + printerr(" callchains 'callchains' => create call_paths table"); + printerr(" pyside-version-1 'pyside-version-1' => use pyside version 1"); + raise Exception("Too few or bad arguments") if (len(sys.argv) < 2): usage() @@ -281,6 +304,8 @@ for i in range(3,len(sys.argv)): perf_db_export_calls = True elif (sys.argv[i] == "callchains"): perf_db_export_callchains = True + elif (sys.argv[i] == "pyside-version-1"): + pass else: usage() -- cgit From ec7f448e2b2e13d1629300c5881cb3b5e0a99c2f Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Mon, 20 May 2019 14:37:23 +0300 Subject: perf scripts python: export-to-postgresql.py: Export IPC information Export cycle and instruction counts on samples and calls tables. Signed-off-by: Adrian Hunter Cc: Jiri Olsa Link: http://lkml.kernel.org/r/20190520113728.14389-18-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/scripts/python/export-to-postgresql.py | 36 +++++++++++++++-------- 1 file changed, 24 insertions(+), 12 deletions(-) (limited to 'tools/perf/scripts/python/export-to-postgresql.py') diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py index b2f481b0d28d..93225c02117e 100644 --- a/tools/perf/scripts/python/export-to-postgresql.py +++ b/tools/perf/scripts/python/export-to-postgresql.py @@ -394,7 +394,9 @@ if branches: 'to_ip bigint,' 'branch_type integer,' 'in_tx boolean,' - 'call_path_id bigint)') + 'call_path_id bigint,' + 'insn_count bigint,' + 'cyc_count bigint)') else: do_query(query, 'CREATE TABLE samples (' 'id bigint NOT NULL,' @@ -418,7 +420,9 @@ else: 'data_src bigint,' 'branch_type integer,' 'in_tx boolean,' - 'call_path_id bigint)') + 'call_path_id bigint,' + 'insn_count bigint,' + 'cyc_count bigint)') if perf_db_export_calls or perf_db_export_callchains: do_query(query, 'CREATE TABLE call_paths (' @@ -439,7 +443,9 @@ if perf_db_export_calls: 'return_id bigint,' 'parent_call_path_id bigint,' 'flags integer,' - 'parent_id bigint)') + 'parent_id bigint,' + 'insn_count bigint,' + 'cyc_count bigint)') do_query(query, 'CREATE VIEW machines_view AS ' 'SELECT ' @@ -521,6 +527,9 @@ if perf_db_export_calls: 'return_time,' 'return_time - call_time AS elapsed_time,' 'branch_count,' + 'insn_count,' + 'cyc_count,' + 'CASE WHEN cyc_count=0 THEN CAST(0 AS NUMERIC(20, 2)) ELSE CAST((CAST(insn_count AS FLOAT) / cyc_count) AS NUMERIC(20, 2)) END AS IPC,' 'call_id,' 'return_id,' 'CASE WHEN flags=0 THEN \'\' WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' WHEN flags=6 THEN \'jump\' ELSE CAST ( flags AS VARCHAR(6) ) END AS flags,' @@ -546,7 +555,10 @@ do_query(query, 'CREATE VIEW samples_view AS ' 'to_sym_offset,' '(SELECT short_name FROM dsos WHERE id = to_dso_id) AS to_dso_short_name,' '(SELECT name FROM branch_types WHERE id = branch_type) AS branch_type_name,' - 'in_tx' + 'in_tx,' + 'insn_count,' + 'cyc_count,' + 'CASE WHEN cyc_count=0 THEN CAST(0 AS NUMERIC(20, 2)) ELSE CAST((CAST(insn_count AS FLOAT) / cyc_count) AS NUMERIC(20, 2)) END AS IPC' ' FROM samples') @@ -618,10 +630,10 @@ def trace_begin(): comm_table(0, "unknown") dso_table(0, 0, "unknown", "unknown", "") symbol_table(0, 0, 0, 0, 0, "unknown") - sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) + sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) if perf_db_export_calls or perf_db_export_callchains: call_path_table(0, 0, 0, 0) - call_return_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) + call_return_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) unhandled_count = 0 @@ -772,11 +784,11 @@ def branch_type_table(branch_type, name, *x): value = struct.pack(fmt, 2, 4, branch_type, n, name) branch_type_file.write(value) -def sample_table(sample_id, evsel_id, machine_id, thread_id, comm_id, dso_id, symbol_id, sym_offset, ip, time, cpu, to_dso_id, to_symbol_id, to_sym_offset, to_ip, period, weight, transaction, data_src, branch_type, in_tx, call_path_id, *x): +def sample_table(sample_id, evsel_id, machine_id, thread_id, comm_id, dso_id, symbol_id, sym_offset, ip, time, cpu, to_dso_id, to_symbol_id, to_sym_offset, to_ip, period, weight, transaction, data_src, branch_type, in_tx, call_path_id, insn_cnt, cyc_cnt, *x): if branches: - value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiiiBiq", 18, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 4, branch_type, 1, in_tx, 8, call_path_id) + value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiiiBiqiqiq", 20, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 4, branch_type, 1, in_tx, 8, call_path_id, 8, insn_cnt, 8, cyc_cnt) else: - value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiqiqiqiqiiiBiq", 22, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 8, period, 8, weight, 8, transaction, 8, data_src, 4, branch_type, 1, in_tx, 8, call_path_id) + value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiqiqiqiqiiiBiqiqiq", 24, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 8, period, 8, weight, 8, transaction, 8, data_src, 4, branch_type, 1, in_tx, 8, call_path_id, 8, insn_cnt, 8, cyc_cnt) sample_file.write(value) def call_path_table(cp_id, parent_id, symbol_id, ip, *x): @@ -784,7 +796,7 @@ def call_path_table(cp_id, parent_id, symbol_id, ip, *x): value = struct.pack(fmt, 4, 8, cp_id, 8, parent_id, 8, symbol_id, 8, ip) call_path_file.write(value) -def call_return_table(cr_id, thread_id, comm_id, call_path_id, call_time, return_time, branch_count, call_id, return_id, parent_call_path_id, flags, parent_id, *x): - fmt = "!hiqiqiqiqiqiqiqiqiqiqiiiq" - value = struct.pack(fmt, 12, 8, cr_id, 8, thread_id, 8, comm_id, 8, call_path_id, 8, call_time, 8, return_time, 8, branch_count, 8, call_id, 8, return_id, 8, parent_call_path_id, 4, flags, 8, parent_id) +def call_return_table(cr_id, thread_id, comm_id, call_path_id, call_time, return_time, branch_count, call_id, return_id, parent_call_path_id, flags, parent_id, insn_cnt, cyc_cnt, *x): + fmt = "!hiqiqiqiqiqiqiqiqiqiqiiiqiqiq" + value = struct.pack(fmt, 14, 8, cr_id, 8, thread_id, 8, comm_id, 8, call_path_id, 8, call_time, 8, return_time, 8, branch_count, 8, call_id, 8, return_id, 8, parent_call_path_id, 4, flags, 8, parent_id, 8, insn_cnt, 8, cyc_cnt) call_file.write(value) -- cgit From aba44287a224dfcfdd99ba885ca9d9acc4de0c17 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Sat, 22 Jun 2019 12:32:48 +0300 Subject: perf scripts python: export-to-postgresql.py: Export Intel PT power and ptwrite events The format of synthesized events is determined by the attribute config. For the formats for Intel PT power and ptwrite events, create tables and populate them when the synth_data handler is called. If the tables remain empty, drop them at the end. The tables and views, including a combined power_events_view, will display automatically from the tables menu of the exported exported-sql-viewer.py script. Note, currently only Atoms since Gemini Lake have support for ptwrite and mwait, pwre, exstop and pwrx, but all Intel PT implementations support cbr. Signed-off-by: Adrian Hunter Cc: Jiri Olsa Link: http://lkml.kernel.org/r/20190622093248.581-8-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/scripts/python/export-to-postgresql.py | 251 ++++++++++++++++++++++ 1 file changed, 251 insertions(+) (limited to 'tools/perf/scripts/python/export-to-postgresql.py') diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py index 93225c02117e..4447f0d7c754 100644 --- a/tools/perf/scripts/python/export-to-postgresql.py +++ b/tools/perf/scripts/python/export-to-postgresql.py @@ -447,6 +447,38 @@ if perf_db_export_calls: 'insn_count bigint,' 'cyc_count bigint)') +do_query(query, 'CREATE TABLE ptwrite (' + 'id bigint NOT NULL,' + 'payload bigint,' + 'exact_ip boolean)') + +do_query(query, 'CREATE TABLE cbr (' + 'id bigint NOT NULL,' + 'cbr integer,' + 'mhz integer,' + 'percent integer)') + +do_query(query, 'CREATE TABLE mwait (' + 'id bigint NOT NULL,' + 'hints integer,' + 'extensions integer)') + +do_query(query, 'CREATE TABLE pwre (' + 'id bigint NOT NULL,' + 'cstate integer,' + 'subcstate integer,' + 'hw boolean)') + +do_query(query, 'CREATE TABLE exstop (' + 'id bigint NOT NULL,' + 'exact_ip boolean)') + +do_query(query, 'CREATE TABLE pwrx (' + 'id bigint NOT NULL,' + 'deepest_cstate integer,' + 'last_cstate integer,' + 'wake_reason integer)') + do_query(query, 'CREATE VIEW machines_view AS ' 'SELECT ' 'id,' @@ -561,6 +593,104 @@ do_query(query, 'CREATE VIEW samples_view AS ' 'CASE WHEN cyc_count=0 THEN CAST(0 AS NUMERIC(20, 2)) ELSE CAST((CAST(insn_count AS FLOAT) / cyc_count) AS NUMERIC(20, 2)) END AS IPC' ' FROM samples') +do_query(query, 'CREATE VIEW ptwrite_view AS ' + 'SELECT ' + 'ptwrite.id,' + 'time,' + 'cpu,' + 'to_hex(payload) AS payload_hex,' + 'CASE WHEN exact_ip=FALSE THEN \'False\' ELSE \'True\' END AS exact_ip' + ' FROM ptwrite' + ' INNER JOIN samples ON samples.id = ptwrite.id') + +do_query(query, 'CREATE VIEW cbr_view AS ' + 'SELECT ' + 'cbr.id,' + 'time,' + 'cpu,' + 'cbr,' + 'mhz,' + 'percent' + ' FROM cbr' + ' INNER JOIN samples ON samples.id = cbr.id') + +do_query(query, 'CREATE VIEW mwait_view AS ' + 'SELECT ' + 'mwait.id,' + 'time,' + 'cpu,' + 'to_hex(hints) AS hints_hex,' + 'to_hex(extensions) AS extensions_hex' + ' FROM mwait' + ' INNER JOIN samples ON samples.id = mwait.id') + +do_query(query, 'CREATE VIEW pwre_view AS ' + 'SELECT ' + 'pwre.id,' + 'time,' + 'cpu,' + 'cstate,' + 'subcstate,' + 'CASE WHEN hw=FALSE THEN \'False\' ELSE \'True\' END AS hw' + ' FROM pwre' + ' INNER JOIN samples ON samples.id = pwre.id') + +do_query(query, 'CREATE VIEW exstop_view AS ' + 'SELECT ' + 'exstop.id,' + 'time,' + 'cpu,' + 'CASE WHEN exact_ip=FALSE THEN \'False\' ELSE \'True\' END AS exact_ip' + ' FROM exstop' + ' INNER JOIN samples ON samples.id = exstop.id') + +do_query(query, 'CREATE VIEW pwrx_view AS ' + 'SELECT ' + 'pwrx.id,' + 'time,' + 'cpu,' + 'deepest_cstate,' + 'last_cstate,' + 'CASE WHEN wake_reason=1 THEN \'Interrupt\'' + ' WHEN wake_reason=2 THEN \'Timer Deadline\'' + ' WHEN wake_reason=4 THEN \'Monitored Address\'' + ' WHEN wake_reason=8 THEN \'HW\'' + ' ELSE CAST ( wake_reason AS VARCHAR(2) )' + 'END AS wake_reason' + ' FROM pwrx' + ' INNER JOIN samples ON samples.id = pwrx.id') + +do_query(query, 'CREATE VIEW power_events_view AS ' + 'SELECT ' + 'samples.id,' + 'samples.time,' + 'samples.cpu,' + 'selected_events.name AS event,' + 'FORMAT(\'%6s\', cbr.cbr) AS cbr,' + 'FORMAT(\'%6s\', cbr.mhz) AS MHz,' + 'FORMAT(\'%5s\', cbr.percent) AS percent,' + 'to_hex(mwait.hints) AS hints_hex,' + 'to_hex(mwait.extensions) AS extensions_hex,' + 'FORMAT(\'%3s\', pwre.cstate) AS cstate,' + 'FORMAT(\'%3s\', pwre.subcstate) AS subcstate,' + 'CASE WHEN pwre.hw=FALSE THEN \'False\' WHEN pwre.hw=TRUE THEN \'True\' ELSE NULL END AS hw,' + 'CASE WHEN exstop.exact_ip=FALSE THEN \'False\' WHEN exstop.exact_ip=TRUE THEN \'True\' ELSE NULL END AS exact_ip,' + 'FORMAT(\'%3s\', pwrx.deepest_cstate) AS deepest_cstate,' + 'FORMAT(\'%3s\', pwrx.last_cstate) AS last_cstate,' + 'CASE WHEN pwrx.wake_reason=1 THEN \'Interrupt\'' + ' WHEN pwrx.wake_reason=2 THEN \'Timer Deadline\'' + ' WHEN pwrx.wake_reason=4 THEN \'Monitored Address\'' + ' WHEN pwrx.wake_reason=8 THEN \'HW\'' + ' ELSE FORMAT(\'%2s\', pwrx.wake_reason)' + 'END AS wake_reason' + ' FROM cbr' + ' FULL JOIN mwait ON mwait.id = cbr.id' + ' FULL JOIN pwre ON pwre.id = cbr.id' + ' FULL JOIN exstop ON exstop.id = cbr.id' + ' FULL JOIN pwrx ON pwrx.id = cbr.id' + ' INNER JOIN samples ON samples.id = coalesce(cbr.id, mwait.id, pwre.id, exstop.id, pwrx.id)' + ' INNER JOIN selected_events ON selected_events.id = samples.evsel_id' + ' ORDER BY samples.id') file_header = struct.pack("!11sii", b"PGCOPY\n\377\r\n\0", 0, 0) file_trailer = b"\377\377" @@ -620,6 +750,12 @@ if perf_db_export_calls or perf_db_export_callchains: call_path_file = open_output_file("call_path_table.bin") if perf_db_export_calls: call_file = open_output_file("call_table.bin") +ptwrite_file = open_output_file("ptwrite_table.bin") +cbr_file = open_output_file("cbr_table.bin") +mwait_file = open_output_file("mwait_table.bin") +pwre_file = open_output_file("pwre_table.bin") +exstop_file = open_output_file("exstop_table.bin") +pwrx_file = open_output_file("pwrx_table.bin") def trace_begin(): printdate("Writing to intermediate files...") @@ -637,6 +773,16 @@ def trace_begin(): unhandled_count = 0 +def is_table_empty(table_name): + do_query(query, 'SELECT * FROM ' + table_name + ' LIMIT 1'); + if query.next(): + return False + return True + +def drop(table_name): + do_query(query, 'DROP VIEW ' + table_name + '_view'); + do_query(query, 'DROP TABLE ' + table_name); + def trace_end(): printdate("Copying to database...") copy_output_file(evsel_file, "selected_events") @@ -652,6 +798,12 @@ def trace_end(): copy_output_file(call_path_file, "call_paths") if perf_db_export_calls: copy_output_file(call_file, "calls") + copy_output_file(ptwrite_file, "ptwrite") + copy_output_file(cbr_file, "cbr") + copy_output_file(mwait_file, "mwait") + copy_output_file(pwre_file, "pwre") + copy_output_file(exstop_file, "exstop") + copy_output_file(pwrx_file, "pwrx") printdate("Removing intermediate files...") remove_output_file(evsel_file) @@ -667,6 +819,12 @@ def trace_end(): remove_output_file(call_path_file) if perf_db_export_calls: remove_output_file(call_file) + remove_output_file(ptwrite_file) + remove_output_file(cbr_file) + remove_output_file(mwait_file) + remove_output_file(pwre_file) + remove_output_file(exstop_file) + remove_output_file(pwrx_file) os.rmdir(output_dir_name) printdate("Adding primary keys") do_query(query, 'ALTER TABLE selected_events ADD PRIMARY KEY (id)') @@ -682,6 +840,12 @@ def trace_end(): do_query(query, 'ALTER TABLE call_paths ADD PRIMARY KEY (id)') if perf_db_export_calls: do_query(query, 'ALTER TABLE calls ADD PRIMARY KEY (id)') + do_query(query, 'ALTER TABLE ptwrite ADD PRIMARY KEY (id)') + do_query(query, 'ALTER TABLE cbr ADD PRIMARY KEY (id)') + do_query(query, 'ALTER TABLE mwait ADD PRIMARY KEY (id)') + do_query(query, 'ALTER TABLE pwre ADD PRIMARY KEY (id)') + do_query(query, 'ALTER TABLE exstop ADD PRIMARY KEY (id)') + do_query(query, 'ALTER TABLE pwrx ADD PRIMARY KEY (id)') printdate("Adding foreign keys") do_query(query, 'ALTER TABLE threads ' @@ -717,6 +881,30 @@ def trace_end(): 'ADD CONSTRAINT parent_call_pathfk FOREIGN KEY (parent_call_path_id) REFERENCES call_paths (id)') do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)') do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)') + do_query(query, 'ALTER TABLE ptwrite ' + 'ADD CONSTRAINT idfk FOREIGN KEY (id) REFERENCES samples (id)') + do_query(query, 'ALTER TABLE cbr ' + 'ADD CONSTRAINT idfk FOREIGN KEY (id) REFERENCES samples (id)') + do_query(query, 'ALTER TABLE mwait ' + 'ADD CONSTRAINT idfk FOREIGN KEY (id) REFERENCES samples (id)') + do_query(query, 'ALTER TABLE pwre ' + 'ADD CONSTRAINT idfk FOREIGN KEY (id) REFERENCES samples (id)') + do_query(query, 'ALTER TABLE exstop ' + 'ADD CONSTRAINT idfk FOREIGN KEY (id) REFERENCES samples (id)') + do_query(query, 'ALTER TABLE pwrx ' + 'ADD CONSTRAINT idfk FOREIGN KEY (id) REFERENCES samples (id)') + + printdate("Dropping unused tables") + if is_table_empty("ptwrite"): + drop("ptwrite") + if is_table_empty("mwait") and is_table_empty("pwre") and is_table_empty("exstop") and is_table_empty("pwrx"): + drop("mwait") + drop("pwre") + drop("exstop") + drop("pwrx") + do_query(query, 'DROP VIEW power_events_view'); + if is_table_empty("cbr"): + drop("cbr") if (unhandled_count): printdate("Warning: ", unhandled_count, " unhandled events") @@ -800,3 +988,66 @@ def call_return_table(cr_id, thread_id, comm_id, call_path_id, call_time, return fmt = "!hiqiqiqiqiqiqiqiqiqiqiiiqiqiq" value = struct.pack(fmt, 14, 8, cr_id, 8, thread_id, 8, comm_id, 8, call_path_id, 8, call_time, 8, return_time, 8, branch_count, 8, call_id, 8, return_id, 8, parent_call_path_id, 4, flags, 8, parent_id, 8, insn_cnt, 8, cyc_cnt) call_file.write(value) + +def ptwrite(id, raw_buf): + data = struct.unpack_from("> 32) & 0x3 + value = struct.pack("!hiqiiii", 3, 8, id, 4, hints, 4, extensions) + mwait_file.write(value) + +def pwre(id, raw_buf): + data = struct.unpack_from("> 7) & 1 + cstate = (payload >> 12) & 0xf + subcstate = (payload >> 8) & 0xf + value = struct.pack("!hiqiiiiiB", 4, 8, id, 4, cstate, 4, subcstate, 1, hw) + pwre_file.write(value) + +def exstop(id, raw_buf): + data = struct.unpack_from("> 4) & 0xf + wake_reason = (payload >> 8) & 0xf + value = struct.pack("!hiqiiiiii", 4, 8, id, 4, deepest_cstate, 4, last_cstate, 4, wake_reason) + pwrx_file.write(value) + +def synth_data(id, config, raw_buf, *x): + if config == 0: + ptwrite(id, raw_buf) + elif config == 1: + mwait(id, raw_buf) + elif config == 2: + pwre(id, raw_buf) + elif config == 3: + exstop(id, raw_buf) + elif config == 4: + pwrx(id, raw_buf) + elif config == 5: + cbr(id, raw_buf) -- cgit From d8d051df9f906232715282cc0570c94273b197bc Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Mon, 8 Jul 2019 08:52:31 +0300 Subject: perf scripts python: export-to-postgresql.py: Fix DROP VIEW power_events_view PostgreSQL can error if power_events_view is not dropped before its dependent tables e.g. Exception: Query failed: ERROR: cannot drop table mwait because other objects depend on it DETAIL: view power_events_view depends on table mwait Signed-off-by: Adrian Hunter Cc: Jiri Olsa Fixes: aba44287a224 ("perf scripts python: export-to-postgresql.py: Export Intel PT power and ptwrite events") Link: http://lkml.kernel.org/r/20190708055232.5032-2-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/scripts/python/export-to-postgresql.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools/perf/scripts/python/export-to-postgresql.py') diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py index 4447f0d7c754..92713d93e956 100644 --- a/tools/perf/scripts/python/export-to-postgresql.py +++ b/tools/perf/scripts/python/export-to-postgresql.py @@ -898,11 +898,11 @@ def trace_end(): if is_table_empty("ptwrite"): drop("ptwrite") if is_table_empty("mwait") and is_table_empty("pwre") and is_table_empty("exstop") and is_table_empty("pwrx"): + do_query(query, 'DROP VIEW power_events_view'); drop("mwait") drop("pwre") drop("exstop") drop("pwrx") - do_query(query, 'DROP VIEW power_events_view'); if is_table_empty("cbr"): drop("cbr") -- cgit From 8534b5de81802a82b13fe05acc3e749e3baf980e Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Wed, 10 Jul 2019 11:57:59 +0300 Subject: perf scripts python: export-to-postgresql.py: Export comm details Add table columns for thread id, comm start time and exec flag. Signed-off-by: Adrian Hunter Cc: Jiri Olsa Link: http://lkml.kernel.org/r/20190710085810.1650-11-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/scripts/python/export-to-postgresql.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) (limited to 'tools/perf/scripts/python/export-to-postgresql.py') diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py index 92713d93e956..01f37877f5bb 100644 --- a/tools/perf/scripts/python/export-to-postgresql.py +++ b/tools/perf/scripts/python/export-to-postgresql.py @@ -353,7 +353,10 @@ do_query(query, 'CREATE TABLE threads (' 'tid integer)') do_query(query, 'CREATE TABLE comms (' 'id bigint NOT NULL,' - 'comm varchar(16))') + 'comm varchar(16),' + 'c_thread_id bigint,' + 'c_time bigint,' + 'exec_flag boolean)') do_query(query, 'CREATE TABLE comm_threads (' 'id bigint NOT NULL,' 'comm_id bigint,' @@ -763,7 +766,7 @@ def trace_begin(): evsel_table(0, "unknown") machine_table(0, 0, "unknown") thread_table(0, 0, 0, -1, -1) - comm_table(0, "unknown") + comm_table(0, "unknown", 0, 0, 0) dso_table(0, 0, "unknown", "unknown", "") symbol_table(0, 0, 0, 0, 0, "unknown") sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) @@ -851,6 +854,8 @@ def trace_end(): do_query(query, 'ALTER TABLE threads ' 'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),' 'ADD CONSTRAINT processfk FOREIGN KEY (process_id) REFERENCES threads (id)') + do_query(query, 'ALTER TABLE comms ' + 'ADD CONSTRAINT threadfk FOREIGN KEY (c_thread_id) REFERENCES threads (id)') do_query(query, 'ALTER TABLE comm_threads ' 'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),' 'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id)') @@ -935,11 +940,11 @@ def thread_table(thread_id, machine_id, process_id, pid, tid, *x): value = struct.pack("!hiqiqiqiiii", 5, 8, thread_id, 8, machine_id, 8, process_id, 4, pid, 4, tid) thread_file.write(value) -def comm_table(comm_id, comm_str, *x): +def comm_table(comm_id, comm_str, thread_id, time, exec_flag, *x): comm_str = toserverstr(comm_str) n = len(comm_str) - fmt = "!hiqi" + str(n) + "s" - value = struct.pack(fmt, 2, 8, comm_id, n, comm_str) + fmt = "!hiqi" + str(n) + "s" + "iqiqiB" + value = struct.pack(fmt, 5, 8, comm_id, n, comm_str, 8, thread_id, 8, time, 1, exec_flag) comm_file.write(value) def comm_thread_table(comm_thread_id, comm_id, thread_id, *x): -- cgit From d9efc1d25214da500d6592095b542b32c15459df Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Wed, 10 Jul 2019 11:58:03 +0300 Subject: perf scripts python: export-to-postgresql.py: Add has_calls column to comms table Now that a thread's current comm is exported, it shows up in the call graph and call tree even if it has no calls. That can happen because the calls are recorded against the main thread's initial comm. Add a table column to make it easy for the exported-sql-viewer.py script to select only comms with calls. Committer testing: $ rm -f simple-retpoline.db $ sudo ~acme/bin/perf script -i simple-retpoline.perf.data --itrace=be -s ~/libexec/perf-core/scripts/python/export-to-sqlite.py simple-retpoline.db branches calls 2019-07-10 12:25:33.200529 Creating database ... 2019-07-10 12:25:33.211548 Writing records... 2019-07-10 12:25:33.549630 Adding indexes 2019-07-10 12:25:33.560715 Dropping unused tables 2019-07-10 12:25:33.580201 Done $ sha256sum tools/perf/scripts/python/export-to-sqlite.py ~/libexec/perf-core/scripts/python/export-to-sqlite.py 2922b642c392004dffa1d8789296478c85904623f5895bcb9b6cbf33e3ca999f tools/perf/scripts/python/export-to-sqlite.py 2922b642c392004dffa1d8789296478c85904623f5895bcb9b6cbf33e3ca999f /home/acme/libexec/perf-core/scripts/python/export-to-sqlite.py $ $ sqlite3 simple-retpoline.db SQLite version 3.26.0 2018-12-01 12:34:55 Enter ".help" for usage hints. sqlite> .schema comms CREATE TABLE comms (id integer NOT NULL PRIMARY KEY,comm varchar(16),c_thread_id bigint,c_time bigint,exec_flag boolean, has_calls boolean); sqlite> select id,has_calls from comms; 0|1 1|1 sqlite> select distinct comm_id from calls; 0 1 sqlite> Signed-off-by: Adrian Hunter Tested-by: Arnaldo Carvalho de Melo Cc: Jiri Olsa Link: http://lkml.kernel.org/r/20190710085810.1650-15-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/scripts/python/export-to-postgresql.py | 2 ++ 1 file changed, 2 insertions(+) (limited to 'tools/perf/scripts/python/export-to-postgresql.py') diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py index 01f37877f5bb..13205e4e5b3b 100644 --- a/tools/perf/scripts/python/export-to-postgresql.py +++ b/tools/perf/scripts/python/export-to-postgresql.py @@ -886,6 +886,8 @@ def trace_end(): 'ADD CONSTRAINT parent_call_pathfk FOREIGN KEY (parent_call_path_id) REFERENCES call_paths (id)') do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)') do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)') + do_query(query, 'ALTER TABLE comms ADD has_calls boolean') + do_query(query, 'UPDATE comms SET has_calls = TRUE WHERE comms.id IN (SELECT DISTINCT comm_id FROM calls)') do_query(query, 'ALTER TABLE ptwrite ' 'ADD CONSTRAINT idfk FOREIGN KEY (id) REFERENCES samples (id)') do_query(query, 'ALTER TABLE cbr ' -- cgit From 56789f3dc127d4f8c07ce2bb48629ba75e8ef16c Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Wed, 10 Jul 2019 11:58:10 +0300 Subject: perf scripts python: export-to-postgresql.py: Export switch events Export switch events to a new table 'context_switches' and create a view 'context_switches_view'. The table and view will show automatically in the exported-sql-viewer.py script. If the table ends up empty, then it and the view are dropped. Signed-off-by: Adrian Hunter Cc: Jiri Olsa Link: http://lkml.kernel.org/r/20190710085810.1650-22-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/scripts/python/export-to-postgresql.py | 51 +++++++++++++++++++++++ 1 file changed, 51 insertions(+) (limited to 'tools/perf/scripts/python/export-to-postgresql.py') diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py index 13205e4e5b3b..7bd73a904b4e 100644 --- a/tools/perf/scripts/python/export-to-postgresql.py +++ b/tools/perf/scripts/python/export-to-postgresql.py @@ -482,6 +482,17 @@ do_query(query, 'CREATE TABLE pwrx (' 'last_cstate integer,' 'wake_reason integer)') +do_query(query, 'CREATE TABLE context_switches (' + 'id bigint NOT NULL,' + 'machine_id bigint,' + 'time bigint,' + 'cpu integer,' + 'thread_out_id bigint,' + 'comm_out_id bigint,' + 'thread_in_id bigint,' + 'comm_in_id bigint,' + 'flags integer)') + do_query(query, 'CREATE VIEW machines_view AS ' 'SELECT ' 'id,' @@ -695,6 +706,29 @@ do_query(query, 'CREATE VIEW power_events_view AS ' ' INNER JOIN selected_events ON selected_events.id = samples.evsel_id' ' ORDER BY samples.id') +do_query(query, 'CREATE VIEW context_switches_view AS ' + 'SELECT ' + 'context_switches.id,' + 'context_switches.machine_id,' + 'context_switches.time,' + 'context_switches.cpu,' + 'th_out.pid AS pid_out,' + 'th_out.tid AS tid_out,' + 'comm_out.comm AS comm_out,' + 'th_in.pid AS pid_in,' + 'th_in.tid AS tid_in,' + 'comm_in.comm AS comm_in,' + 'CASE WHEN context_switches.flags = 0 THEN \'in\'' + ' WHEN context_switches.flags = 1 THEN \'out\'' + ' WHEN context_switches.flags = 3 THEN \'out preempt\'' + ' ELSE CAST ( context_switches.flags AS VARCHAR(11) )' + 'END AS flags' + ' FROM context_switches' + ' INNER JOIN threads AS th_out ON th_out.id = context_switches.thread_out_id' + ' INNER JOIN threads AS th_in ON th_in.id = context_switches.thread_in_id' + ' INNER JOIN comms AS comm_out ON comm_out.id = context_switches.comm_out_id' + ' INNER JOIN comms AS comm_in ON comm_in.id = context_switches.comm_in_id') + file_header = struct.pack("!11sii", b"PGCOPY\n\377\r\n\0", 0, 0) file_trailer = b"\377\377" @@ -759,6 +793,7 @@ mwait_file = open_output_file("mwait_table.bin") pwre_file = open_output_file("pwre_table.bin") exstop_file = open_output_file("exstop_table.bin") pwrx_file = open_output_file("pwrx_table.bin") +context_switches_file = open_output_file("context_switches_table.bin") def trace_begin(): printdate("Writing to intermediate files...") @@ -807,6 +842,7 @@ def trace_end(): copy_output_file(pwre_file, "pwre") copy_output_file(exstop_file, "exstop") copy_output_file(pwrx_file, "pwrx") + copy_output_file(context_switches_file, "context_switches") printdate("Removing intermediate files...") remove_output_file(evsel_file) @@ -828,6 +864,7 @@ def trace_end(): remove_output_file(pwre_file) remove_output_file(exstop_file) remove_output_file(pwrx_file) + remove_output_file(context_switches_file) os.rmdir(output_dir_name) printdate("Adding primary keys") do_query(query, 'ALTER TABLE selected_events ADD PRIMARY KEY (id)') @@ -849,6 +886,7 @@ def trace_end(): do_query(query, 'ALTER TABLE pwre ADD PRIMARY KEY (id)') do_query(query, 'ALTER TABLE exstop ADD PRIMARY KEY (id)') do_query(query, 'ALTER TABLE pwrx ADD PRIMARY KEY (id)') + do_query(query, 'ALTER TABLE context_switches ADD PRIMARY KEY (id)') printdate("Adding foreign keys") do_query(query, 'ALTER TABLE threads ' @@ -900,6 +938,12 @@ def trace_end(): 'ADD CONSTRAINT idfk FOREIGN KEY (id) REFERENCES samples (id)') do_query(query, 'ALTER TABLE pwrx ' 'ADD CONSTRAINT idfk FOREIGN KEY (id) REFERENCES samples (id)') + do_query(query, 'ALTER TABLE context_switches ' + 'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),' + 'ADD CONSTRAINT toutfk FOREIGN KEY (thread_out_id) REFERENCES threads (id),' + 'ADD CONSTRAINT tinfk FOREIGN KEY (thread_in_id) REFERENCES threads (id),' + 'ADD CONSTRAINT coutfk FOREIGN KEY (comm_out_id) REFERENCES comms (id),' + 'ADD CONSTRAINT cinfk FOREIGN KEY (comm_in_id) REFERENCES comms (id)') printdate("Dropping unused tables") if is_table_empty("ptwrite"): @@ -912,6 +956,8 @@ def trace_end(): drop("pwrx") if is_table_empty("cbr"): drop("cbr") + if is_table_empty("context_switches"): + drop("context_switches") if (unhandled_count): printdate("Warning: ", unhandled_count, " unhandled events") @@ -1058,3 +1104,8 @@ def synth_data(id, config, raw_buf, *x): pwrx(id, raw_buf) elif config == 5: cbr(id, raw_buf) + +def context_switch_table(id, machine_id, time, cpu, thread_out_id, comm_out_id, thread_in_id, comm_in_id, flags, *x): + fmt = "!hiqiqiqiiiqiqiqiqii" + value = struct.pack(fmt, 9, 8, id, 8, machine_id, 8, time, 4, cpu, 8, thread_out_id, 8, comm_out_id, 8, thread_in_id, 8, comm_in_id, 4, flags) + context_switches_file.write(value) -- cgit