diff options
Diffstat (limited to 'tools/perf/scripts')
-rw-r--r-- | tools/perf/scripts/python/export-to-postgresql.py | 330 | ||||
-rw-r--r-- | tools/perf/scripts/python/export-to-sqlite.py | 319 | ||||
-rwxr-xr-x | tools/perf/scripts/python/exported-sql-viewer.py | 345 |
3 files changed, 874 insertions, 120 deletions
diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py index c3eae1d77d36..4447f0d7c754 100644 --- a/tools/perf/scripts/python/export-to-postgresql.py +++ b/tools/perf/scripts/python/export-to-postgresql.py @@ -27,18 +27,31 @@ import datetime # # fedora: # -# $ sudo yum install postgresql postgresql-server python-pyside qt-postgresql +# $ sudo yum install postgresql postgresql-server qt-postgresql # $ sudo su - postgres -c initdb # $ sudo service postgresql start # $ sudo su - postgres -# $ createuser <your user id here> +# $ createuser -s <your user id here> # Older versions may not support -s, in which case answer the prompt below: # Shall the new role be a superuser? (y/n) y +# $ sudo yum install python-pyside +# +# Alternately, to use Python3 and/or pyside 2, one of the following: +# $ sudo yum install python3-pyside +# $ pip install --user PySide2 +# $ pip3 install --user PySide2 # # ubuntu: # -# $ sudo apt-get install postgresql python-pyside.qtsql libqt4-sql-psql +# $ sudo apt-get install postgresql # $ sudo su - postgres # $ createuser -s <your user id here> +# $ sudo apt-get install python-pyside.qtsql libqt4-sql-psql +# +# Alternately, to use Python3 and/or pyside 2, one of the following: +# +# $ sudo apt-get install python3-pyside.qtsql libqt4-sql-psql +# $ sudo apt-get install python-pyside2.qtsql libqt5sql5-psql +# $ sudo apt-get install python3-pyside2.qtsql libqt5sql5-psql # # An example of using this script with Intel PT: # @@ -199,7 +212,16 @@ import datetime # print "{0:>6} {1:>10} {2:>9} {3:<30} {4:>6} {5:<30}".format(query.value(0), query.value(1), query.value(2), query.value(3), query.value(4), query.value(5)) # call_path_id = query.value(6) -from PySide.QtSql import * +pyside_version_1 = True +if not "pyside-version-1" in sys.argv: + try: + from PySide2.QtSql import * + pyside_version_1 = False + except: + pass + +if pyside_version_1: + from PySide.QtSql import * if sys.version_info < (3, 0): def toserverstr(str): @@ -255,11 +277,12 @@ def printdate(*args, **kw_args): print(datetime.datetime.today(), *args, sep=' ', **kw_args) def usage(): - printerr("Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>] [<callchains>]") - printerr("where: columns 'all' or 'branches'") - printerr(" calls 'calls' => create calls and call_paths table") - printerr(" callchains 'callchains' => create call_paths table") - raise Exception("Too few arguments") + printerr("Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>] [<callchains>] [<pyside-version-1>]"); + printerr("where: columns 'all' or 'branches'"); + printerr(" calls 'calls' => create calls and call_paths table"); + printerr(" callchains 'callchains' => create call_paths table"); + printerr(" pyside-version-1 'pyside-version-1' => use pyside version 1"); + raise Exception("Too few or bad arguments") if (len(sys.argv) < 2): usage() @@ -281,6 +304,8 @@ for i in range(3,len(sys.argv)): perf_db_export_calls = True elif (sys.argv[i] == "callchains"): perf_db_export_callchains = True + elif (sys.argv[i] == "pyside-version-1"): + pass else: usage() @@ -369,7 +394,9 @@ if branches: 'to_ip bigint,' 'branch_type integer,' 'in_tx boolean,' - 'call_path_id bigint)') + 'call_path_id bigint,' + 'insn_count bigint,' + 'cyc_count bigint)') else: do_query(query, 'CREATE TABLE samples (' 'id bigint NOT NULL,' @@ -393,7 +420,9 @@ else: 'data_src bigint,' 'branch_type integer,' 'in_tx boolean,' - 'call_path_id bigint)') + 'call_path_id bigint,' + 'insn_count bigint,' + 'cyc_count bigint)') if perf_db_export_calls or perf_db_export_callchains: do_query(query, 'CREATE TABLE call_paths (' @@ -414,7 +443,41 @@ if perf_db_export_calls: 'return_id bigint,' 'parent_call_path_id bigint,' 'flags integer,' - 'parent_id bigint)') + 'parent_id bigint,' + 'insn_count bigint,' + 'cyc_count bigint)') + +do_query(query, 'CREATE TABLE ptwrite (' + 'id bigint NOT NULL,' + 'payload bigint,' + 'exact_ip boolean)') + +do_query(query, 'CREATE TABLE cbr (' + 'id bigint NOT NULL,' + 'cbr integer,' + 'mhz integer,' + 'percent integer)') + +do_query(query, 'CREATE TABLE mwait (' + 'id bigint NOT NULL,' + 'hints integer,' + 'extensions integer)') + +do_query(query, 'CREATE TABLE pwre (' + 'id bigint NOT NULL,' + 'cstate integer,' + 'subcstate integer,' + 'hw boolean)') + +do_query(query, 'CREATE TABLE exstop (' + 'id bigint NOT NULL,' + 'exact_ip boolean)') + +do_query(query, 'CREATE TABLE pwrx (' + 'id bigint NOT NULL,' + 'deepest_cstate integer,' + 'last_cstate integer,' + 'wake_reason integer)') do_query(query, 'CREATE VIEW machines_view AS ' 'SELECT ' @@ -496,6 +559,9 @@ if perf_db_export_calls: 'return_time,' 'return_time - call_time AS elapsed_time,' 'branch_count,' + 'insn_count,' + 'cyc_count,' + 'CASE WHEN cyc_count=0 THEN CAST(0 AS NUMERIC(20, 2)) ELSE CAST((CAST(insn_count AS FLOAT) / cyc_count) AS NUMERIC(20, 2)) END AS IPC,' 'call_id,' 'return_id,' 'CASE WHEN flags=0 THEN \'\' WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' WHEN flags=6 THEN \'jump\' ELSE CAST ( flags AS VARCHAR(6) ) END AS flags,' @@ -521,9 +587,110 @@ do_query(query, 'CREATE VIEW samples_view AS ' 'to_sym_offset,' '(SELECT short_name FROM dsos WHERE id = to_dso_id) AS to_dso_short_name,' '(SELECT name FROM branch_types WHERE id = branch_type) AS branch_type_name,' - 'in_tx' + 'in_tx,' + 'insn_count,' + 'cyc_count,' + 'CASE WHEN cyc_count=0 THEN CAST(0 AS NUMERIC(20, 2)) ELSE CAST((CAST(insn_count AS FLOAT) / cyc_count) AS NUMERIC(20, 2)) END AS IPC' ' FROM samples') +do_query(query, 'CREATE VIEW ptwrite_view AS ' + 'SELECT ' + 'ptwrite.id,' + 'time,' + 'cpu,' + 'to_hex(payload) AS payload_hex,' + 'CASE WHEN exact_ip=FALSE THEN \'False\' ELSE \'True\' END AS exact_ip' + ' FROM ptwrite' + ' INNER JOIN samples ON samples.id = ptwrite.id') + +do_query(query, 'CREATE VIEW cbr_view AS ' + 'SELECT ' + 'cbr.id,' + 'time,' + 'cpu,' + 'cbr,' + 'mhz,' + 'percent' + ' FROM cbr' + ' INNER JOIN samples ON samples.id = cbr.id') + +do_query(query, 'CREATE VIEW mwait_view AS ' + 'SELECT ' + 'mwait.id,' + 'time,' + 'cpu,' + 'to_hex(hints) AS hints_hex,' + 'to_hex(extensions) AS extensions_hex' + ' FROM mwait' + ' INNER JOIN samples ON samples.id = mwait.id') + +do_query(query, 'CREATE VIEW pwre_view AS ' + 'SELECT ' + 'pwre.id,' + 'time,' + 'cpu,' + 'cstate,' + 'subcstate,' + 'CASE WHEN hw=FALSE THEN \'False\' ELSE \'True\' END AS hw' + ' FROM pwre' + ' INNER JOIN samples ON samples.id = pwre.id') + +do_query(query, 'CREATE VIEW exstop_view AS ' + 'SELECT ' + 'exstop.id,' + 'time,' + 'cpu,' + 'CASE WHEN exact_ip=FALSE THEN \'False\' ELSE \'True\' END AS exact_ip' + ' FROM exstop' + ' INNER JOIN samples ON samples.id = exstop.id') + +do_query(query, 'CREATE VIEW pwrx_view AS ' + 'SELECT ' + 'pwrx.id,' + 'time,' + 'cpu,' + 'deepest_cstate,' + 'last_cstate,' + 'CASE WHEN wake_reason=1 THEN \'Interrupt\'' + ' WHEN wake_reason=2 THEN \'Timer Deadline\'' + ' WHEN wake_reason=4 THEN \'Monitored Address\'' + ' WHEN wake_reason=8 THEN \'HW\'' + ' ELSE CAST ( wake_reason AS VARCHAR(2) )' + 'END AS wake_reason' + ' FROM pwrx' + ' INNER JOIN samples ON samples.id = pwrx.id') + +do_query(query, 'CREATE VIEW power_events_view AS ' + 'SELECT ' + 'samples.id,' + 'samples.time,' + 'samples.cpu,' + 'selected_events.name AS event,' + 'FORMAT(\'%6s\', cbr.cbr) AS cbr,' + 'FORMAT(\'%6s\', cbr.mhz) AS MHz,' + 'FORMAT(\'%5s\', cbr.percent) AS percent,' + 'to_hex(mwait.hints) AS hints_hex,' + 'to_hex(mwait.extensions) AS extensions_hex,' + 'FORMAT(\'%3s\', pwre.cstate) AS cstate,' + 'FORMAT(\'%3s\', pwre.subcstate) AS subcstate,' + 'CASE WHEN pwre.hw=FALSE THEN \'False\' WHEN pwre.hw=TRUE THEN \'True\' ELSE NULL END AS hw,' + 'CASE WHEN exstop.exact_ip=FALSE THEN \'False\' WHEN exstop.exact_ip=TRUE THEN \'True\' ELSE NULL END AS exact_ip,' + 'FORMAT(\'%3s\', pwrx.deepest_cstate) AS deepest_cstate,' + 'FORMAT(\'%3s\', pwrx.last_cstate) AS last_cstate,' + 'CASE WHEN pwrx.wake_reason=1 THEN \'Interrupt\'' + ' WHEN pwrx.wake_reason=2 THEN \'Timer Deadline\'' + ' WHEN pwrx.wake_reason=4 THEN \'Monitored Address\'' + ' WHEN pwrx.wake_reason=8 THEN \'HW\'' + ' ELSE FORMAT(\'%2s\', pwrx.wake_reason)' + 'END AS wake_reason' + ' FROM cbr' + ' FULL JOIN mwait ON mwait.id = cbr.id' + ' FULL JOIN pwre ON pwre.id = cbr.id' + ' FULL JOIN exstop ON exstop.id = cbr.id' + ' FULL JOIN pwrx ON pwrx.id = cbr.id' + ' INNER JOIN samples ON samples.id = coalesce(cbr.id, mwait.id, pwre.id, exstop.id, pwrx.id)' + ' INNER JOIN selected_events ON selected_events.id = samples.evsel_id' + ' ORDER BY samples.id') file_header = struct.pack("!11sii", b"PGCOPY\n\377\r\n\0", 0, 0) file_trailer = b"\377\377" @@ -583,6 +750,12 @@ if perf_db_export_calls or perf_db_export_callchains: call_path_file = open_output_file("call_path_table.bin") if perf_db_export_calls: call_file = open_output_file("call_table.bin") +ptwrite_file = open_output_file("ptwrite_table.bin") +cbr_file = open_output_file("cbr_table.bin") +mwait_file = open_output_file("mwait_table.bin") +pwre_file = open_output_file("pwre_table.bin") +exstop_file = open_output_file("exstop_table.bin") +pwrx_file = open_output_file("pwrx_table.bin") def trace_begin(): printdate("Writing to intermediate files...") @@ -593,13 +766,23 @@ def trace_begin(): comm_table(0, "unknown") dso_table(0, 0, "unknown", "unknown", "") symbol_table(0, 0, 0, 0, 0, "unknown") - sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) + sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) if perf_db_export_calls or perf_db_export_callchains: call_path_table(0, 0, 0, 0) - call_return_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) + call_return_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) unhandled_count = 0 +def is_table_empty(table_name): + do_query(query, 'SELECT * FROM ' + table_name + ' LIMIT 1'); + if query.next(): + return False + return True + +def drop(table_name): + do_query(query, 'DROP VIEW ' + table_name + '_view'); + do_query(query, 'DROP TABLE ' + table_name); + def trace_end(): printdate("Copying to database...") copy_output_file(evsel_file, "selected_events") @@ -615,6 +798,12 @@ def trace_end(): copy_output_file(call_path_file, "call_paths") if perf_db_export_calls: copy_output_file(call_file, "calls") + copy_output_file(ptwrite_file, "ptwrite") + copy_output_file(cbr_file, "cbr") + copy_output_file(mwait_file, "mwait") + copy_output_file(pwre_file, "pwre") + copy_output_file(exstop_file, "exstop") + copy_output_file(pwrx_file, "pwrx") printdate("Removing intermediate files...") remove_output_file(evsel_file) @@ -630,6 +819,12 @@ def trace_end(): remove_output_file(call_path_file) if perf_db_export_calls: remove_output_file(call_file) + remove_output_file(ptwrite_file) + remove_output_file(cbr_file) + remove_output_file(mwait_file) + remove_output_file(pwre_file) + remove_output_file(exstop_file) + remove_output_file(pwrx_file) os.rmdir(output_dir_name) printdate("Adding primary keys") do_query(query, 'ALTER TABLE selected_events ADD PRIMARY KEY (id)') @@ -645,6 +840,12 @@ def trace_end(): do_query(query, 'ALTER TABLE call_paths ADD PRIMARY KEY (id)') if perf_db_export_calls: do_query(query, 'ALTER TABLE calls ADD PRIMARY KEY (id)') + do_query(query, 'ALTER TABLE ptwrite ADD PRIMARY KEY (id)') + do_query(query, 'ALTER TABLE cbr ADD PRIMARY KEY (id)') + do_query(query, 'ALTER TABLE mwait ADD PRIMARY KEY (id)') + do_query(query, 'ALTER TABLE pwre ADD PRIMARY KEY (id)') + do_query(query, 'ALTER TABLE exstop ADD PRIMARY KEY (id)') + do_query(query, 'ALTER TABLE pwrx ADD PRIMARY KEY (id)') printdate("Adding foreign keys") do_query(query, 'ALTER TABLE threads ' @@ -680,6 +881,30 @@ def trace_end(): 'ADD CONSTRAINT parent_call_pathfk FOREIGN KEY (parent_call_path_id) REFERENCES call_paths (id)') do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)') do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)') + do_query(query, 'ALTER TABLE ptwrite ' + 'ADD CONSTRAINT idfk FOREIGN KEY (id) REFERENCES samples (id)') + do_query(query, 'ALTER TABLE cbr ' + 'ADD CONSTRAINT idfk FOREIGN KEY (id) REFERENCES samples (id)') + do_query(query, 'ALTER TABLE mwait ' + 'ADD CONSTRAINT idfk FOREIGN KEY (id) REFERENCES samples (id)') + do_query(query, 'ALTER TABLE pwre ' + 'ADD CONSTRAINT idfk FOREIGN KEY (id) REFERENCES samples (id)') + do_query(query, 'ALTER TABLE exstop ' + 'ADD CONSTRAINT idfk FOREIGN KEY (id) REFERENCES samples (id)') + do_query(query, 'ALTER TABLE pwrx ' + 'ADD CONSTRAINT idfk FOREIGN KEY (id) REFERENCES samples (id)') + + printdate("Dropping unused tables") + if is_table_empty("ptwrite"): + drop("ptwrite") + if is_table_empty("mwait") and is_table_empty("pwre") and is_table_empty("exstop") and is_table_empty("pwrx"): + drop("mwait") + drop("pwre") + drop("exstop") + drop("pwrx") + do_query(query, 'DROP VIEW power_events_view'); + if is_table_empty("cbr"): + drop("cbr") if (unhandled_count): printdate("Warning: ", unhandled_count, " unhandled events") @@ -747,11 +972,11 @@ def branch_type_table(branch_type, name, *x): value = struct.pack(fmt, 2, 4, branch_type, n, name) branch_type_file.write(value) -def sample_table(sample_id, evsel_id, machine_id, thread_id, comm_id, dso_id, symbol_id, sym_offset, ip, time, cpu, to_dso_id, to_symbol_id, to_sym_offset, to_ip, period, weight, transaction, data_src, branch_type, in_tx, call_path_id, *x): +def sample_table(sample_id, evsel_id, machine_id, thread_id, comm_id, dso_id, symbol_id, sym_offset, ip, time, cpu, to_dso_id, to_symbol_id, to_sym_offset, to_ip, period, weight, transaction, data_src, branch_type, in_tx, call_path_id, insn_cnt, cyc_cnt, *x): if branches: - value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiiiBiq", 18, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 4, branch_type, 1, in_tx, 8, call_path_id) + value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiiiBiqiqiq", 20, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 4, branch_type, 1, in_tx, 8, call_path_id, 8, insn_cnt, 8, cyc_cnt) else: - value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiqiqiqiqiiiBiq", 22, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 8, period, 8, weight, 8, transaction, 8, data_src, 4, branch_type, 1, in_tx, 8, call_path_id) + value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiqiqiqiqiiiBiqiqiq", 24, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 8, period, 8, weight, 8, transaction, 8, data_src, 4, branch_type, 1, in_tx, 8, call_path_id, 8, insn_cnt, 8, cyc_cnt) sample_file.write(value) def call_path_table(cp_id, parent_id, symbol_id, ip, *x): @@ -759,7 +984,70 @@ def call_path_table(cp_id, parent_id, symbol_id, ip, *x): value = struct.pack(fmt, 4, 8, cp_id, 8, parent_id, 8, symbol_id, 8, ip) call_path_file.write(value) -def call_return_table(cr_id, thread_id, comm_id, call_path_id, call_time, return_time, branch_count, call_id, return_id, parent_call_path_id, flags, parent_id, *x): - fmt = "!hiqiqiqiqiqiqiqiqiqiqiiiq" - value = struct.pack(fmt, 12, 8, cr_id, 8, thread_id, 8, comm_id, 8, call_path_id, 8, call_time, 8, return_time, 8, branch_count, 8, call_id, 8, return_id, 8, parent_call_path_id, 4, flags, 8, parent_id) +def call_return_table(cr_id, thread_id, comm_id, call_path_id, call_time, return_time, branch_count, call_id, return_id, parent_call_path_id, flags, parent_id, insn_cnt, cyc_cnt, *x): + fmt = "!hiqiqiqiqiqiqiqiqiqiqiiiqiqiq" + value = struct.pack(fmt, 14, 8, cr_id, 8, thread_id, 8, comm_id, 8, call_path_id, 8, call_time, 8, return_time, 8, branch_count, 8, call_id, 8, return_id, 8, parent_call_path_id, 4, flags, 8, parent_id, 8, insn_cnt, 8, cyc_cnt) call_file.write(value) + +def ptwrite(id, raw_buf): + data = struct.unpack_from("<IQ", raw_buf) + flags = data[0] + payload = data[1] + exact_ip = flags & 1 + value = struct.pack("!hiqiqiB", 3, 8, id, 8, payload, 1, exact_ip) + ptwrite_file.write(value) + +def cbr(id, raw_buf): + data = struct.unpack_from("<BBBBII", raw_buf) + cbr = data[0] + MHz = (data[4] + 500) / 1000 + percent = ((cbr * 1000 / data[2]) + 5) / 10 + value = struct.pack("!hiqiiiiii", 4, 8, id, 4, cbr, 4, MHz, 4, percent) + cbr_file.write(value) + +def mwait(id, raw_buf): + data = struct.unpack_from("<IQ", raw_buf) + payload = data[1] + hints = payload & 0xff + extensions = (payload >> 32) & 0x3 + value = struct.pack("!hiqiiii", 3, 8, id, 4, hints, 4, extensions) + mwait_file.write(value) + +def pwre(id, raw_buf): + data = struct.unpack_from("<IQ", raw_buf) + payload = data[1] + hw = (payload >> 7) & 1 + cstate = (payload >> 12) & 0xf + subcstate = (payload >> 8) & 0xf + value = struct.pack("!hiqiiiiiB", 4, 8, id, 4, cstate, 4, subcstate, 1, hw) + pwre_file.write(value) + +def exstop(id, raw_buf): + data = struct.unpack_from("<I", raw_buf) + flags = data[0] + exact_ip = flags & 1 + value = struct.pack("!hiqiB", 2, 8, id, 1, exact_ip) + exstop_file.write(value) + +def pwrx(id, raw_buf): + data = struct.unpack_from("<IQ", raw_buf) + payload = data[1] + deepest_cstate = payload & 0xf + last_cstate = (payload >> 4) & 0xf + wake_reason = (payload >> 8) & 0xf + value = struct.pack("!hiqiiiiii", 4, 8, id, 4, deepest_cstate, 4, last_cstate, 4, wake_reason) + pwrx_file.write(value) + +def synth_data(id, config, raw_buf, *x): + if config == 0: + ptwrite(id, raw_buf) + elif config == 1: + mwait(id, raw_buf) + elif config == 2: + pwre(id, raw_buf) + elif config == 3: + exstop(id, raw_buf) + elif config == 4: + pwrx(id, raw_buf) + elif config == 5: + cbr(id, raw_buf) diff --git a/tools/perf/scripts/python/export-to-sqlite.py b/tools/perf/scripts/python/export-to-sqlite.py index bf271fbc3a88..3222a83f4184 100644 --- a/tools/perf/scripts/python/export-to-sqlite.py +++ b/tools/perf/scripts/python/export-to-sqlite.py @@ -21,6 +21,26 @@ import datetime # provides LGPL-licensed Python bindings for Qt. You will also need the package # libqt4-sql-sqlite for Qt sqlite3 support. # +# Examples of installing pyside: +# +# ubuntu: +# +# $ sudo apt-get install python-pyside.qtsql libqt4-sql-psql +# +# Alternately, to use Python3 and/or pyside 2, one of the following: +# +# $ sudo apt-get install python3-pyside.qtsql libqt4-sql-psql +# $ sudo apt-get install python-pyside2.qtsql libqt5sql5-psql +# $ sudo apt-get install python3-pyside2.qtsql libqt5sql5-psql +# fedora: +# +# $ sudo yum install python-pyside +# +# Alternately, to use Python3 and/or pyside 2, one of the following: +# $ sudo yum install python3-pyside +# $ pip install --user PySide2 +# $ pip3 install --user PySide2 +# # An example of using this script with Intel PT: # # $ perf record -e intel_pt//u ls @@ -49,7 +69,16 @@ import datetime # difference is the 'transaction' column of the 'samples' table which is # renamed 'transaction_' in sqlite because 'transaction' is a reserved word. -from PySide.QtSql import * +pyside_version_1 = True +if not "pyside-version-1" in sys.argv: + try: + from PySide2.QtSql import * + pyside_version_1 = False + except: + pass + +if pyside_version_1: + from PySide.QtSql import * sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') @@ -69,11 +98,12 @@ def printdate(*args, **kw_args): print(datetime.datetime.today(), *args, sep=' ', **kw_args) def usage(): - printerr("Usage is: export-to-sqlite.py <database name> [<columns>] [<calls>] [<callchains>]"); - printerr("where: columns 'all' or 'branches'"); - printerr(" calls 'calls' => create calls and call_paths table"); - printerr(" callchains 'callchains' => create call_paths table"); - raise Exception("Too few arguments") + printerr("Usage is: export-to-sqlite.py <database name> [<columns>] [<calls>] [<callchains>] [<pyside-version-1>]"); + printerr("where: columns 'all' or 'branches'"); + printerr(" calls 'calls' => create calls and call_paths table"); + printerr(" callchains 'callchains' => create call_paths table"); + printerr(" pyside-version-1 'pyside-version-1' => use pyside version 1"); + raise Exception("Too few or bad arguments") if (len(sys.argv) < 2): usage() @@ -95,6 +125,8 @@ for i in range(3,len(sys.argv)): perf_db_export_calls = True elif (sys.argv[i] == "callchains"): perf_db_export_callchains = True + elif (sys.argv[i] == "pyside-version-1"): + pass else: usage() @@ -186,7 +218,9 @@ if branches: 'to_ip bigint,' 'branch_type integer,' 'in_tx boolean,' - 'call_path_id bigint)') + 'call_path_id bigint,' + 'insn_count bigint,' + 'cyc_count bigint)') else: do_query(query, 'CREATE TABLE samples (' 'id integer NOT NULL PRIMARY KEY,' @@ -210,7 +244,9 @@ else: 'data_src bigint,' 'branch_type integer,' 'in_tx boolean,' - 'call_path_id bigint)') + 'call_path_id bigint,' + 'insn_count bigint,' + 'cyc_count bigint)') if perf_db_export_calls or perf_db_export_callchains: do_query(query, 'CREATE TABLE call_paths (' @@ -231,7 +267,41 @@ if perf_db_export_calls: 'return_id bigint,' 'parent_call_path_id bigint,' 'flags integer,' - 'parent_id bigint)') + 'parent_id bigint,' + 'insn_count bigint,' + 'cyc_count bigint)') + +do_query(query, 'CREATE TABLE ptwrite (' + 'id integer NOT NULL PRIMARY KEY,' + 'payload bigint,' + 'exact_ip integer)') + +do_query(query, 'CREATE TABLE cbr (' + 'id integer NOT NULL PRIMARY KEY,' + 'cbr integer,' + 'mhz integer,' + 'percent integer)') + +do_query(query, 'CREATE TABLE mwait (' + 'id integer NOT NULL PRIMARY KEY,' + 'hints integer,' + 'extensions integer)') + +do_query(query, 'CREATE TABLE pwre (' + 'id integer NOT NULL PRIMARY KEY,' + 'cstate integer,' + 'subcstate integer,' + 'hw integer)') + +do_query(query, 'CREATE TABLE exstop (' + 'id integer NOT NULL PRIMARY KEY,' + 'exact_ip integer)') + +do_query(query, 'CREATE TABLE pwrx (' + 'id integer NOT NULL PRIMARY KEY,' + 'deepest_cstate integer,' + 'last_cstate integer,' + 'wake_reason integer)') # printf was added to sqlite in version 3.8.3 sqlite_has_printf = False @@ -327,6 +397,9 @@ if perf_db_export_calls: 'return_time,' 'return_time - call_time AS elapsed_time,' 'branch_count,' + 'insn_count,' + 'cyc_count,' + 'CASE WHEN cyc_count=0 THEN CAST(0 AS FLOAT) ELSE ROUND(CAST(insn_count AS FLOAT) / cyc_count, 2) END AS IPC,' 'call_id,' 'return_id,' 'CASE WHEN flags=0 THEN \'\' WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' WHEN flags=6 THEN \'jump\' ELSE flags END AS flags,' @@ -352,9 +425,108 @@ do_query(query, 'CREATE VIEW samples_view AS ' 'to_sym_offset,' '(SELECT short_name FROM dsos WHERE id = to_dso_id) AS to_dso_short_name,' '(SELECT name FROM branch_types WHERE id = branch_type) AS branch_type_name,' - 'in_tx' + 'in_tx,' + 'insn_count,' + 'cyc_count,' + 'CASE WHEN cyc_count=0 THEN CAST(0 AS FLOAT) ELSE ROUND(CAST(insn_count AS FLOAT) / cyc_count, 2) END AS IPC' ' FROM samples') +do_query(query, 'CREATE VIEW ptwrite_view AS ' + 'SELECT ' + 'ptwrite.id,' + 'time,' + 'cpu,' + + emit_to_hex('payload') + ' AS payload_hex,' + 'CASE WHEN exact_ip=0 THEN \'False\' ELSE \'True\' END AS exact_ip' + ' FROM ptwrite' + ' INNER JOIN samples ON samples.id = ptwrite.id') + +do_query(query, 'CREATE VIEW cbr_view AS ' + 'SELECT ' + 'cbr.id,' + 'time,' + 'cpu,' + 'cbr,' + 'mhz,' + 'percent' + ' FROM cbr' + ' INNER JOIN samples ON samples.id = cbr.id') + +do_query(query, 'CREATE VIEW mwait_view AS ' + 'SELECT ' + 'mwait.id,' + 'time,' + 'cpu,' + + emit_to_hex('hints') + ' AS hints_hex,' + + emit_to_hex('extensions') + ' AS extensions_hex' + ' FROM mwait' + ' INNER JOIN samples ON samples.id = mwait.id') + +do_query(query, 'CREATE VIEW pwre_view AS ' + 'SELECT ' + 'pwre.id,' + 'time,' + 'cpu,' + 'cstate,' + 'subcstate,' + 'CASE WHEN hw=0 THEN \'False\' ELSE \'True\' END AS hw' + ' FROM pwre' + ' INNER JOIN samples ON samples.id = pwre.id') + +do_query(query, 'CREATE VIEW exstop_view AS ' + 'SELECT ' + 'exstop.id,' + 'time,' + 'cpu,' + 'CASE WHEN exact_ip=0 THEN \'False\' ELSE \'True\' END AS exact_ip' + ' FROM exstop' + ' INNER JOIN samples ON samples.id = exstop.id') + +do_query(query, 'CREATE VIEW pwrx_view AS ' + 'SELECT ' + 'pwrx.id,' + 'time,' + 'cpu,' + 'deepest_cstate,' + 'last_cstate,' + 'CASE WHEN wake_reason=1 THEN \'Interrupt\'' + ' WHEN wake_reason=2 THEN \'Timer Deadline\'' + ' WHEN wake_reason=4 THEN \'Monitored Address\'' + ' WHEN wake_reason=8 THEN \'HW\'' + ' ELSE wake_reason ' + 'END AS wake_reason' + ' FROM pwrx' + ' INNER JOIN samples ON samples.id = pwrx.id') + +do_query(query, 'CREATE VIEW power_events_view AS ' + 'SELECT ' + 'samples.id,' + 'time,' + 'cpu,' + 'selected_events.name AS event,' + 'CASE WHEN selected_events.name=\'cbr\' THEN (SELECT cbr FROM cbr WHERE cbr.id = samples.id) ELSE "" END AS cbr,' + 'CASE WHEN selected_events.name=\'cbr\' THEN (SELECT mhz FROM cbr WHERE cbr.id = samples.id) ELSE "" END AS mhz,' + 'CASE WHEN selected_events.name=\'cbr\' THEN (SELECT percent FROM cbr WHERE cbr.id = samples.id) ELSE "" END AS percent,' + 'CASE WHEN selected_events.name=\'mwait\' THEN (SELECT ' + emit_to_hex('hints') + ' FROM mwait WHERE mwait.id = samples.id) ELSE "" END AS hints_hex,' + 'CASE WHEN selected_events.name=\'mwait\' THEN (SELECT ' + emit_to_hex('extensions') + ' FROM mwait WHERE mwait.id = samples.id) ELSE "" END AS extensions_hex,' + 'CASE WHEN selected_events.name=\'pwre\' THEN (SELECT cstate FROM pwre WHERE pwre.id = samples.id) ELSE "" END AS cstate,' + 'CASE WHEN selected_events.name=\'pwre\' THEN (SELECT subcstate FROM pwre WHERE pwre.id = samples.id) ELSE "" END AS subcstate,' + 'CASE WHEN selected_events.name=\'pwre\' THEN (SELECT hw FROM pwre WHERE pwre.id = samples.id) ELSE "" END AS hw,' + 'CASE WHEN selected_events.name=\'exstop\' THEN (SELECT exact_ip FROM exstop WHERE exstop.id = samples.id) ELSE "" END AS exact_ip,' + 'CASE WHEN selected_events.name=\'pwrx\' THEN (SELECT deepest_cstate FROM pwrx WHERE pwrx.id = samples.id) ELSE "" END AS deepest_cstate,' + 'CASE WHEN selected_events.name=\'pwrx\' THEN (SELECT last_cstate FROM pwrx WHERE pwrx.id = samples.id) ELSE "" END AS last_cstate,' + 'CASE WHEN selected_events.name=\'pwrx\' THEN (SELECT ' + 'CASE WHEN wake_reason=1 THEN \'Interrupt\'' + ' WHEN wake_reason=2 THEN \'Timer Deadline\'' + ' WHEN wake_reason=4 THEN \'Monitored Address\'' + ' WHEN wake_reason=8 THEN \'HW\'' + ' ELSE wake_reason ' + 'END' + ' FROM pwrx WHERE pwrx.id = samples.id) ELSE "" END AS wake_reason' + ' FROM samples' + ' INNER JOIN selected_events ON selected_events.id = evsel_id' + ' WHERE selected_events.name IN (\'cbr\',\'mwait\',\'exstop\',\'pwre\',\'pwrx\')') + do_query(query, 'END TRANSACTION') evsel_query = QSqlQuery(db) @@ -375,15 +547,27 @@ branch_type_query = QSqlQuery(db) branch_type_query.prepare("INSERT INTO branch_types VALUES (?, ?)") sample_query = QSqlQuery(db) if branches: - sample_query.prepare("INSERT INTO samples VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)") + sample_query.prepare("INSERT INTO samples VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)") else: - sample_query.prepare("INSERT INTO samples VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)") + sample_query.prepare("INSERT INTO samples VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)") if perf_db_export_calls or perf_db_export_callchains: call_path_query = QSqlQuery(db) call_path_query.prepare("INSERT INTO call_paths VALUES (?, ?, ?, ?)") if perf_db_export_calls: call_query = QSqlQuery(db) - call_query.prepare("INSERT INTO calls VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)") + call_query.prepare("INSERT INTO calls VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)") +ptwrite_query = QSqlQuery(db) +ptwrite_query.prepare("INSERT INTO ptwrite VALUES (?, ?, ?)") +cbr_query = QSqlQuery(db) +cbr_query.prepare("INSERT INTO cbr VALUES (?, ?, ?, ?)") +mwait_query = QSqlQuery(db) +mwait_query.prepare("INSERT INTO mwait VALUES (?, ?, ?)") +pwre_query = QSqlQuery(db) +pwre_query.prepare("INSERT INTO pwre VALUES (?, ?, ?, ?)") +exstop_query = QSqlQuery(db) +exstop_query.prepare("INSERT INTO exstop VALUES (?, ?)") +pwrx_query = QSqlQuery(db) +pwrx_query.prepare("INSERT INTO pwrx VALUES (?, ?, ?, ?)") def trace_begin(): printdate("Writing records...") @@ -395,13 +579,23 @@ def trace_begin(): comm_table(0, "unknown") dso_table(0, 0, "unknown", "unknown", "") symbol_table(0, 0, 0, 0, 0, "unknown") - sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) + sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) if perf_db_export_calls or perf_db_export_callchains: call_path_table(0, 0, 0, 0) - call_return_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) + call_return_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) unhandled_count = 0 +def is_table_empty(table_name): + do_query(query, 'SELECT * FROM ' + table_name + ' LIMIT 1'); + if query.next(): + return False + return True + +def drop(table_name): + do_query(query, 'DROP VIEW ' + table_name + '_view'); + do_query(query, 'DROP TABLE ' + table_name); + def trace_end(): do_query(query, 'END TRANSACTION') @@ -410,6 +604,18 @@ def trace_end(): do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)') do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)') + printdate("Dropping unused tables") + if is_table_empty("ptwrite"): + drop("ptwrite") + if is_table_empty("mwait") and is_table_empty("pwre") and is_table_empty("exstop") and is_table_empty("pwrx"): + drop("mwait") + drop("pwre") + drop("exstop") + drop("pwrx") + do_query(query, 'DROP VIEW power_events_view'); + if is_table_empty("cbr"): + drop("cbr") + if (unhandled_count): printdate("Warning: ", unhandled_count, " unhandled events") printdate("Done") @@ -454,14 +660,91 @@ def sample_table(*x): if branches: for xx in x[0:15]: sample_query.addBindValue(str(xx)) - for xx in x[19:22]: + for xx in x[19:24]: sample_query.addBindValue(str(xx)) do_query_(sample_query) else: - bind_exec(sample_query, 22, x) + bind_exec(sample_query, 24, x) def call_path_table(*x): bind_exec(call_path_query, 4, x) def call_return_table(*x): - bind_exec(call_query, 12, x) + bind_exec(call_query, 14, x) + +def ptwrite(id, raw_buf): + data = struct.unpack_from("<IQ", raw_buf) + flags = data[0] + payload = data[1] + exact_ip = flags & 1 + ptwrite_query.addBindValue(str(id)) + ptwrite_query.addBindValue(str(payload)) + ptwrite_query.addBindValue(str(exact_ip)) + do_query_(ptwrite_query) + +def cbr(id, raw_buf): + data = struct.unpack_from("<BBBBII", raw_buf) + cbr = data[0] + MHz = (data[4] + 500) / 1000 + percent = ((cbr * 1000 / data[2]) + 5) / 10 + cbr_query.addBindValue(str(id)) + cbr_query.addBindValue(str(cbr)) + cbr_query.addBindValue(str(MHz)) + cbr_query.addBindValue(str(percent)) + do_query_(cbr_query) + +def mwait(id, raw_buf): + data = struct.unpack_from("<IQ", raw_buf) + payload = data[1] + hints = payload & 0xff + extensions = (payload >> 32) & 0x3 + mwait_query.addBindValue(str(id)) + mwait_query.addBindValue(str(hints)) + mwait_query.addBindValue(str(extensions)) + do_query_(mwait_query) + +def pwre(id, raw_buf): + data = struct.unpack_from("<IQ", raw_buf) + payload = data[1] + hw = (payload >> 7) & 1 + cstate = (payload >> 12) & 0xf + subcstate = (payload >> 8) & 0xf + pwre_query.addBindValue(str(id)) + pwre_query.addBindValue(str(cstate)) + pwre_query.addBindValue(str(subcstate)) + pwre_query.addBindValue(str(hw)) + do_query_(pwre_query) + +def exstop(id, raw_buf): + data = struct.unpack_from("<I", raw_buf) + flags = data[0] + exact_ip = flags & 1 + exstop_query.addBindValue(str(id)) + exstop_query.addBindValue(str(exact_ip)) + do_query_(exstop_query) + +def pwrx(id, raw_buf): + data = struct.unpack_from("<IQ", raw_buf) + payload = data[1] + deepest_cstate = payload & 0xf + last_cstate = (payload >> 4) & 0xf + wake_reason = (payload >> 8) & 0xf + pwrx_query.addBindValue(str(id)) + pwrx_query.addBindValue(str(deepest_cstate)) + pwrx_query.addBindValue(str(last_cstate)) + pwrx_query.addBindValue(str(wake_reason)) + do_query_(pwrx_query) + +def synth_data(id, config, raw_buf, *x): + if config == 0: + ptwrite(id, raw_buf) + elif config == 1: + mwait(id, raw_buf) + elif config == 2: + pwre(id, raw_buf) + elif config == 3: + exstop(id, raw_buf) + elif config == 4: + pwrx(id, raw_buf) + elif config == 5: + cbr(id, raw_buf) diff --git a/tools/perf/scripts/python/exported-sql-viewer.py b/tools/perf/scripts/python/exported-sql-viewer.py index affed7d149be..6e7934f2ac9a 100755 --- a/tools/perf/scripts/python/exported-sql-viewer.py +++ b/tools/perf/scripts/python/exported-sql-viewer.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python # SPDX-License-Identifier: GPL-2.0 # exported-sql-viewer.py: view data from sql database # Copyright (c) 2014-2018, Intel Corporation. @@ -91,6 +91,7 @@ from __future__ import print_function import sys +import argparse import weakref import threading import string @@ -104,10 +105,23 @@ except ImportError: glb_nsz = 16 import re import os -from PySide.QtCore import * -from PySide.QtGui import * -from PySide.QtSql import * + pyside_version_1 = True +if not "--pyside-version-1" in sys.argv: + try: + from PySide2.QtCore import * + from PySide2.QtGui import * + from PySide2.QtSql import * + from PySide2.QtWidgets import * + pyside_version_1 = False + except: + pass + +if pyside_version_1: + from PySide.QtCore import * + from PySide.QtGui import * + from PySide.QtSql import * + from decimal import * from ctypes import * from multiprocessing import Process, Array, Value, Event @@ -186,9 +200,10 @@ class Thread(QThread): class TreeModel(QAbstractItemModel): - def __init__(self, glb, parent=None): + def __init__(self, glb, params, parent=None): super(TreeModel, self).__init__(parent) self.glb = glb + self.params = params self.root = self.GetRoot() self.last_row_read = 0 @@ -385,6 +400,7 @@ class FindBar(): def Activate(self): self.bar.show() + self.textbox.lineEdit().selectAll() self.textbox.setFocus() def Deactivate(self): @@ -449,8 +465,9 @@ class FindBar(): class CallGraphLevelItemBase(object): - def __init__(self, glb, row, parent_item): + def __init__(self, glb, params, row, parent_item): self.glb = glb + self.params = params self.row = row self.parent_item = parent_item self.query_done = False; @@ -489,18 +506,24 @@ class CallGraphLevelItemBase(object): class CallGraphLevelTwoPlusItemBase(CallGraphLevelItemBase): - def __init__(self, glb, row, comm_id, thread_id, call_path_id, time, branch_count, parent_item): - super(CallGraphLevelTwoPlusItemBase, self).__init__(glb, row, parent_item) + def __init__(self, glb, params, row, comm_id, thread_id, call_path_id, time, insn_cnt, cyc_cnt, branch_count, parent_item): + super(CallGraphLevelTwoPlusItemBase, self).__init__(glb, params, row, parent_item) self.comm_id = comm_id self.thread_id = thread_id self.call_path_id = call_path_id + self.insn_cnt = insn_cnt + self.cyc_cnt = cyc_cnt self.branch_count = branch_count self.time = time def Select(self): self.query_done = True; query = QSqlQuery(self.glb.db) - QueryExec(query, "SELECT call_path_id, name, short_name, COUNT(calls.id), SUM(return_time - call_time), SUM(branch_count)" + if self.params.have_ipc: + ipc_str = ", SUM(insn_count), SUM(cyc_count)" + else: + ipc_str = "" + QueryExec(query, "SELECT call_path_id, name, short_name, COUNT(calls.id), SUM(return_time - call_time)" + ipc_str + ", SUM(branch_count)" " FROM calls" " INNER JOIN call_paths ON calls.call_path_id = call_paths.id" " INNER JOIN symbols ON call_paths.symbol_id = symbols.id" @@ -511,7 +534,15 @@ class CallGraphLevelTwoPlusItemBase(CallGraphLevelItemBase): " GROUP BY call_path_id, name, short_name" " ORDER BY call_path_id") while query.next(): - child_item = CallGraphLevelThreeItem(self.glb, self.child_count, self.comm_id, self.thread_id, query.value(0), query.value(1), query.value(2), query.value(3), int(query.value(4)), int(query.value(5)), self) + if self.params.have_ipc: + insn_cnt = int(query.value(5)) + cyc_cnt = int(query.value(6)) + branch_count = int(query.value(7)) + else: + insn_cnt = 0 + cyc_cnt = 0 + branch_count = int(query.value(5)) + child_item = CallGraphLevelThreeItem(self.glb, self.params, self.child_count, self.comm_id, self.thread_id, query.value(0), query.value(1), query.value(2), query.value(3), int(query.value(4)), insn_cnt, cyc_cnt, branch_count, self) self.child_items.append(child_item) self.child_count += 1 @@ -519,37 +550,57 @@ class CallGraphLevelTwoPlusItemBase(CallGraphLevelItemBase): class CallGraphLevelThreeItem(CallGraphLevelTwoPlusItemBase): - def __init__(self, glb, row, comm_id, thread_id, call_path_id, name, dso, count, time, branch_count, parent_item): - super(CallGraphLevelThreeItem, self).__init__(glb, row, comm_id, thread_id, call_path_id, time, branch_count, parent_item) + def __init__(self, glb, params, row, comm_id, thread_id, call_path_id, name, dso, count, time, insn_cnt, cyc_cnt, branch_count, parent_item): + super(CallGraphLevelThreeItem, self).__init__(glb, params, row, comm_id, thread_id, call_path_id, time, insn_cnt, cyc_cnt, branch_count, parent_item) dso = dsoname(dso) - self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(branch_count), PercentToOneDP(branch_count, parent_item.branch_count) ] + if self.params.have_ipc: + insn_pcnt = PercentToOneDP(insn_cnt, parent_item.insn_cnt) + cyc_pcnt = PercentToOneDP(cyc_cnt, parent_item.cyc_cnt) + br_pcnt = PercentToOneDP(branch_count, parent_item.branch_count) + ipc = CalcIPC(cyc_cnt, insn_cnt) + self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(insn_cnt), insn_pcnt, str(cyc_cnt), cyc_pcnt, ipc, str(branch_count), br_pcnt ] + else: + self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(branch_count), PercentToOneDP(branch_count, parent_item.branch_count) ] self.dbid = call_path_id # Context-sensitive call graph data model level two item class CallGraphLevelTwoItem(CallGraphLevelTwoPlusItemBase): - def __init__(self, glb, row, comm_id, thread_id, pid, tid, parent_item): - super(CallGraphLevelTwoItem, self).__init__(glb, row, comm_id, thread_id, 1, 0, 0, parent_item) - self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", ""] + def __init__(self, glb, params, row, comm_id, thread_id, pid, tid, parent_item): + super(CallGraphLevelTwoItem, self).__init__(glb, params, row, comm_id, thread_id, 1, 0, 0, 0, 0, parent_item) + if self.params.have_ipc: + self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", "", "", "", "", "", ""] + else: + self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", ""] self.dbid = thread_id def Select(self): super(CallGraphLevelTwoItem, self).Select() for child_item in self.child_items: self.time += child_item.time + self.insn_cnt += child_item.insn_cnt + self.cyc_cnt += child_item.cyc_cnt self.branch_count += child_item.branch_count for child_item in self.child_items: child_item.data[4] = PercentToOneDP(child_item.time, self.time) - child_item.data[6] = PercentToOneDP(child_item.branch_count, self.branch_count) + if self.params.have_ipc: + child_item.data[6] = PercentToOneDP(child_item.insn_cnt, self.insn_cnt) + child_item.data[8] = PercentToOneDP(child_item.cyc_cnt, self.cyc_cnt) + child_item.data[11] = PercentToOneDP(child_item.branch_count, self.branch_count) + else: + child_item.data[6] = PercentToOneDP(child_item.branch_count, self.branch_count) # Context-sensitive call graph data model level one item class CallGraphLevelOneItem(CallGraphLevelItemBase): - def __init__(self, glb, row, comm_id, comm, parent_item): - super(CallGraphLevelOneItem, self).__init__(glb, row, parent_item) - self.data = [comm, "", "", "", "", "", ""] + def __init__(self, glb, params, row, comm_id, comm, parent_item): + super(CallGraphLevelOneItem, self).__init__(glb, params, row, parent_item) + if self.params.have_ipc: + self.data = [comm, "", "", "", "", "", "", "", "", "", "", ""] + else: + self.data = [comm, "", "", "", "", "", ""] self.dbid = comm_id def Select(self): @@ -560,7 +611,7 @@ class CallGraphLevelOneItem(CallGraphLevelItemBase): " INNER JOIN threads ON thread_id = threads.id" " WHERE comm_id = " + str(self.dbid)) while query.next(): - child_item = CallGraphLevelTwoItem(self.glb, self.child_count, self.dbid, query.value(0), query.value(1), query.value(2), self) + child_item = CallGraphLevelTwoItem(self.glb, self.params, self.child_count, self.dbid, query.value(0), query.value(1), query.value(2), self) self.child_items.append(child_item) self.child_count += 1 @@ -568,8 +619,8 @@ class CallGraphLevelOneItem(CallGraphLevelItemBase): class CallGraphRootItem(CallGraphLevelItemBase): - def __init__(self, glb): - super(CallGraphRootItem, self).__init__(glb, 0, None) + def __init__(self, glb, params): + super(CallGraphRootItem, self).__init__(glb, params, 0, None) self.dbid = 0 self.query_done = True; query = QSqlQuery(glb.db) @@ -577,16 +628,23 @@ class CallGraphRootItem(CallGraphLevelItemBase): while query.next(): if not query.value(0): continue - child_item = CallGraphLevelOneItem(glb, self.child_count, query.value(0), query.value(1), self) + child_item = CallGraphLevelOneItem(glb, params, self.child_count, query.value(0), query.value(1), self) self.child_items.append(child_item) self.child_count += 1 +# Call graph model parameters + +class CallGraphModelParams(): + + def __init__(self, glb, parent=None): + self.have_ipc = IsSelectable(glb.db, "calls", columns = "insn_count, cyc_count") + # Context-sensitive call graph data model base class CallGraphModelBase(TreeModel): def __init__(self, glb, parent=None): - super(CallGraphModelBase, self).__init__(glb, parent) + super(CallGraphModelBase, self).__init__(glb, CallGraphModelParams(glb), parent) def FindSelect(self, value, pattern, query): if pattern: @@ -668,17 +726,26 @@ class CallGraphModel(CallGraphModelBase): super(CallGraphModel, self).__init__(glb, parent) def GetRoot(self): - return CallGraphRootItem(self.glb) + return CallGraphRootItem(self.glb, self.params) def columnCount(self, parent=None): - return 7 + if self.params.have_ipc: + return 12 + else: + return 7 def columnHeader(self, column): - headers = ["Call Path", "Object", "Count ", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "] + if self.params.have_ipc: + headers = ["Call Path", "Object", "Count ", "Time (ns) ", "Time (%) ", "Insn Cnt", "Insn Cnt (%)", "Cyc Cnt", "Cyc Cnt (%)", "IPC", "Branch Count ", "Branch Count (%) "] + else: + headers = ["Call Path", "Object", "Count ", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "] return headers[column] def columnAlignment(self, column): - alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ] + if self.params.have_ipc: + alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ] + else: + alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ] return alignment[column] def DoFindSelect(self, query, match): @@ -715,11 +782,13 @@ class CallGraphModel(CallGraphModelBase): class CallTreeLevelTwoPlusItemBase(CallGraphLevelItemBase): - def __init__(self, glb, row, comm_id, thread_id, calls_id, time, branch_count, parent_item): - super(CallTreeLevelTwoPlusItemBase, self).__init__(glb, row, parent_item) + def __init__(self, glb, params, row, comm_id, thread_id, calls_id, time, insn_cnt, cyc_cnt, branch_count, parent_item): + super(CallTreeLevelTwoPlusItemBase, self).__init__(glb, params, row, parent_item) self.comm_id = comm_id self.thread_id = thread_id self.calls_id = calls_id + self.insn_cnt = insn_cnt + self.cyc_cnt = cyc_cnt self.branch_count = branch_count self.time = time @@ -729,8 +798,12 @@ class CallTreeLevelTwoPlusItemBase(CallGraphLevelItemBase): comm_thread = " AND comm_id = " + str(self.comm_id) + " AND thread_id = " + str(self.thread_id) else: comm_thread = "" + if self.params.have_ipc: + ipc_str = ", insn_count, cyc_count" + else: + ipc_str = "" query = QSqlQuery(self.glb.db) - QueryExec(query, "SELECT calls.id, name, short_name, call_time, return_time - call_time, branch_count" + QueryExec(query, "SELECT calls.id, name, short_name, call_time, return_time - call_time" + ipc_str + ", branch_count" " FROM calls" " INNER JOIN call_paths ON calls.call_path_id = call_paths.id" " INNER JOIN symbols ON call_paths.symbol_id = symbols.id" @@ -738,7 +811,15 @@ class CallTreeLevelTwoPlusItemBase(CallGraphLevelItemBase): " WHERE calls.parent_id = " + str(self.calls_id) + comm_thread + " ORDER BY call_time, calls.id") while query.next(): - child_item = CallTreeLevelThreeItem(self.glb, self.child_count, self.comm_id, self.thread_id, query.value(0), query.value(1), query.value(2), query.value(3), int(query.value(4)), int(query.value(5)), self) + if self.params.have_ipc: + insn_cnt = int(query.value(5)) + cyc_cnt = int(query.value(6)) + branch_count = int(query.value(7)) + else: + insn_cnt = 0 + cyc_cnt = 0 + branch_count = int(query.value(5)) + child_item = CallTreeLevelThreeItem(self.glb, self.params, self.child_count, self.comm_id, self.thread_id, query.value(0), query.value(1), query.value(2), query.value(3), int(query.value(4)), insn_cnt, cyc_cnt, branch_count, self) self.child_items.append(child_item) self.child_count += 1 @@ -746,37 +827,57 @@ class CallTreeLevelTwoPlusItemBase(CallGraphLevelItemBase): class CallTreeLevelThreeItem(CallTreeLevelTwoPlusItemBase): - def __init__(self, glb, row, comm_id, thread_id, calls_id, name, dso, count, time, branch_count, parent_item): - super(CallTreeLevelThreeItem, self).__init__(glb, row, comm_id, thread_id, calls_id, time, branch_count, parent_item) + def __init__(self, glb, params, row, comm_id, thread_id, calls_id, name, dso, count, time, insn_cnt, cyc_cnt, branch_count, parent_item): + super(CallTreeLevelThreeItem, self).__init__(glb, params, row, comm_id, thread_id, calls_id, time, insn_cnt, cyc_cnt, branch_count, parent_item) dso = dsoname(dso) - self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(branch_count), PercentToOneDP(branch_count, parent_item.branch_count) ] + if self.params.have_ipc: + insn_pcnt = PercentToOneDP(insn_cnt, parent_item.insn_cnt) + cyc_pcnt = PercentToOneDP(cyc_cnt, parent_item.cyc_cnt) + br_pcnt = PercentToOneDP(branch_count, parent_item.branch_count) + ipc = CalcIPC(cyc_cnt, insn_cnt) + self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(insn_cnt), insn_pcnt, str(cyc_cnt), cyc_pcnt, ipc, str(branch_count), br_pcnt ] + else: + self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(branch_count), PercentToOneDP(branch_count, parent_item.branch_count) ] self.dbid = calls_id # Call tree data model level two item class CallTreeLevelTwoItem(CallTreeLevelTwoPlusItemBase): - def __init__(self, glb, row, comm_id, thread_id, pid, tid, parent_item): - super(CallTreeLevelTwoItem, self).__init__(glb, row, comm_id, thread_id, 0, 0, 0, parent_item) - self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", ""] + def __init__(self, glb, params, row, comm_id, thread_id, pid, tid, parent_item): + super(CallTreeLevelTwoItem, self).__init__(glb, params, row, comm_id, thread_id, 0, 0, 0, 0, 0, parent_item) + if self.params.have_ipc: + self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", "", "", "", "", "", ""] + else: + self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", ""] self.dbid = thread_id def Select(self): super(CallTreeLevelTwoItem, self).Select() for child_item in self.child_items: self.time += child_item.time + self.insn_cnt += child_item.insn_cnt + self.cyc_cnt += child_item.cyc_cnt self.branch_count += child_item.branch_count for child_item in self.child_items: child_item.data[4] = PercentToOneDP(child_item.time, self.time) - child_item.data[6] = PercentToOneDP(child_item.branch_count, self.branch_count) + if self.params.have_ipc: + child_item.data[6] = PercentToOneDP(child_item.insn_cnt, self.insn_cnt) + child_item.data[8] = PercentToOneDP(child_item.cyc_cnt, self.cyc_cnt) + child_item.data[11] = PercentToOneDP(child_item.branch_count, self.branch_count) + else: + child_item.data[6] = PercentToOneDP(child_item.branch_count, self.branch_count) # Call tree data model level one item class CallTreeLevelOneItem(CallGraphLevelItemBase): - def __init__(self, glb, row, comm_id, comm, parent_item): - super(CallTreeLevelOneItem, self).__init__(glb, row, parent_item) - self.data = [comm, "", "", "", "", "", ""] + def __init__(self, glb, params, row, comm_id, comm, parent_item): + super(CallTreeLevelOneItem, self).__init__(glb, params, row, parent_item) + if self.params.have_ipc: + self.data = [comm, "", "", "", "", "", "", "", "", "", "", ""] + else: + self.data = [comm, "", "", "", "", "", ""] self.dbid = comm_id def Select(self): @@ -787,7 +888,7 @@ class CallTreeLevelOneItem(CallGraphLevelItemBase): " INNER JOIN threads ON thread_id = threads.id" " WHERE comm_id = " + str(self.dbid)) while query.next(): - child_item = CallTreeLevelTwoItem(self.glb, self.child_count, self.dbid, query.value(0), query.value(1), query.value(2), self) + child_item = CallTreeLevelTwoItem(self.glb, self.params, self.child_count, self.dbid, query.value(0), query.value(1), query.value(2), self) self.child_items.append(child_item) self.child_count += 1 @@ -795,8 +896,8 @@ class CallTreeLevelOneItem(CallGraphLevelItemBase): class CallTreeRootItem(CallGraphLevelItemBase): - def __init__(self, glb): - super(CallTreeRootItem, self).__init__(glb, 0, None) + def __init__(self, glb, params): + super(CallTreeRootItem, self).__init__(glb, params, 0, None) self.dbid = 0 self.query_done = True; query = QSqlQuery(glb.db) @@ -804,7 +905,7 @@ class CallTreeRootItem(CallGraphLevelItemBase): while query.next(): if not query.value(0): continue - child_item = CallTreeLevelOneItem(glb, self.child_count, query.value(0), query.value(1), self) + child_item = CallTreeLevelOneItem(glb, params, self.child_count, query.value(0), query.value(1), self) self.child_items.append(child_item) self.child_count += 1 @@ -816,17 +917,26 @@ class CallTreeModel(CallGraphModelBase): super(CallTreeModel, self).__init__(glb, parent) def GetRoot(self): - return CallTreeRootItem(self.glb) + return CallTreeRootItem(self.glb, self.params) def columnCount(self, parent=None): - return 7 + if self.params.have_ipc: + return 12 + else: + return 7 def columnHeader(self, column): - headers = ["Call Path", "Object", "Call Time", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "] + if self.params.have_ipc: + headers = ["Call Path", "Object", "Call Time", "Time (ns) ", "Time (%) ", "Insn Cnt", "Insn Cnt (%)", "Cyc Cnt", "Cyc Cnt (%)", "IPC", "Branch Count ", "Branch Count (%) "] + else: + headers = ["Call Path", "Object", "Call Time", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "] return headers[column] def columnAlignment(self, column): - alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ] + if self.params.have_ipc: + alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ] + else: + alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ] return alignment[column] def DoFindSelect(self, query, match): @@ -1355,11 +1465,11 @@ class FetchMoreRecordsBar(): class BranchLevelTwoItem(): - def __init__(self, row, text, parent_item): + def __init__(self, row, col, text, parent_item): self.row = row self.parent_item = parent_item - self.data = [""] * 8 - self.data[7] = text + self.data = [""] * (col + 1) + self.data[col] = text self.level = 2 def getParentItem(self): @@ -1391,6 +1501,7 @@ class BranchLevelOneItem(): self.dbid = data[0] self.level = 1 self.query_done = False + self.br_col = len(self.data) - 1 def getChildItem(self, row): return self.child_items[row] @@ -1471,7 +1582,7 @@ class BranchLevelOneItem(): while k < 15: byte_str += " " k += 1 - self.child_items.append(BranchLevelTwoItem(0, byte_str + " " + text, self)) + self.child_items.append(BranchLevelTwoItem(0, self.br_col, byte_str + " " + text, self)) self.child_count += 1 else: return @@ -1522,16 +1633,37 @@ class BranchRootItem(): def getData(self, column): return "" +# Calculate instructions per cycle + +def CalcIPC(cyc_cnt, insn_cnt): + if cyc_cnt and insn_cnt: + ipc = Decimal(float(insn_cnt) / cyc_cnt) + ipc = str(ipc.quantize(Decimal(".01"), rounding=ROUND_HALF_UP)) + else: + ipc = "0" + return ipc + # Branch data preparation -def BranchDataPrep(query): - data = [] - for i in xrange(0, 8): - data.append(query.value(i)) +def BranchDataPrepBr(query, data): data.append(tohex(query.value(8)).rjust(16) + " " + query.value(9) + offstr(query.value(10)) + " (" + dsoname(query.value(11)) + ")" + " -> " + tohex(query.value(12)) + " " + query.value(13) + offstr(query.value(14)) + " (" + dsoname(query.value(15)) + ")") + +def BranchDataPrepIPC(query, data): + insn_cnt = query.value(16) + cyc_cnt = query.value(17) + ipc = CalcIPC(cyc_cnt, insn_cnt) + data.append(insn_cnt) + data.append(cyc_cnt) + data.append(ipc) + +def BranchDataPrep(query): + data = [] + for i in xrange(0, 8): + data.append(query.value(i)) + BranchDataPrepBr(query, data) return data def BranchDataPrepWA(query): @@ -1541,10 +1673,26 @@ def BranchDataPrepWA(query): data.append("{:>19}".format(query.value(1))) for i in xrange(2, 8): data.append(query.value(i)) - data.append(tohex(query.value(8)).rjust(16) + " " + query.value(9) + offstr(query.value(10)) + - " (" + dsoname(query.value(11)) + ")" + " -> " + - tohex(query.value(12)) + " " + query.value(13) + offstr(query.value(14)) + - " (" + dsoname(query.value(15)) + ")") + BranchDataPrepBr(query, data) + return data + +def BranchDataWithIPCPrep(query): + data = [] + for i in xrange(0, 8): + data.append(query.value(i)) + BranchDataPrepIPC(query, data) + BranchDataPrepBr(query, data) + return data + +def BranchDataWithIPCPrepWA(query): + data = [] + data.append(query.value(0)) + # Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string + data.append("{:>19}".format(query.value(1))) + for i in xrange(2, 8): + data.append(query.value(i)) + BranchDataPrepIPC(query, data) + BranchDataPrepBr(query, data) return data # Branch data model @@ -1554,14 +1702,24 @@ class BranchModel(TreeModel): progress = Signal(object) def __init__(self, glb, event_id, where_clause, parent=None): - super(BranchModel, self).__init__(glb, parent) + super(BranchModel, self).__init__(glb, None, parent) self.event_id = event_id self.more = True self.populated = 0 + self.have_ipc = IsSelectable(glb.db, "samples", columns = "insn_count, cyc_count") + if self.have_ipc: + select_ipc = ", insn_count, cyc_count" + prep_fn = BranchDataWithIPCPrep + prep_wa_fn = BranchDataWithIPCPrepWA + else: + select_ipc = "" + prep_fn = BranchDataPrep + prep_wa_fn = BranchDataPrepWA sql = ("SELECT samples.id, time, cpu, comm, pid, tid, branch_types.name," " CASE WHEN in_tx = '0' THEN 'No' ELSE 'Yes' END," " ip, symbols.name, sym_offset, dsos.short_name," " to_ip, to_symbols.name, to_sym_offset, to_dsos.short_name" + + select_ipc + " FROM samples" " INNER JOIN comms ON comm_id = comms.id" " INNER JOIN threads ON thread_id = threads.id" @@ -1575,9 +1733,9 @@ class BranchModel(TreeModel): " ORDER BY samples.id" " LIMIT " + str(glb_chunk_sz)) if pyside_version_1 and sys.version_info[0] == 3: - prep = BranchDataPrepWA + prep = prep_fn else: - prep = BranchDataPrep + prep = prep_wa_fn self.fetcher = SQLFetcher(glb, sql, prep, self.AddSample) self.fetcher.done.connect(self.Update) self.fetcher.Fetch(glb_chunk_sz) @@ -1586,13 +1744,23 @@ class BranchModel(TreeModel): return BranchRootItem() def columnCount(self, parent=None): - return 8 + if self.have_ipc: + return 11 + else: + return 8 def columnHeader(self, column): - return ("Time", "CPU", "Command", "PID", "TID", "Branch Type", "In Tx", "Branch")[column] + if self.have_ipc: + return ("Time", "CPU", "Command", "PID", "TID", "Branch Type", "In Tx", "Insn Cnt", "Cyc Cnt", "IPC", "Branch")[column] + else: + return ("Time", "CPU", "Command", "PID", "TID", "Branch Type", "In Tx", "Branch")[column] def columnFont(self, column): - if column != 7: + if self.have_ipc: + br_col = 10 + else: + br_col = 7 + if column != br_col: return None return QFont("Monospace") @@ -2100,10 +2268,10 @@ def GetEventList(db): # Is a table selectable -def IsSelectable(db, table, sql = ""): +def IsSelectable(db, table, sql = "", columns = "*"): query = QSqlQuery(db) try: - QueryExec(query, "SELECT * FROM " + table + " " + sql + " LIMIT 1") + QueryExec(query, "SELECT " + columns + " FROM " + table + " " + sql + " LIMIT 1") except: return False return True @@ -2754,7 +2922,7 @@ class WindowMenu(): action = self.window_menu.addAction(label) action.setCheckable(True) action.setChecked(sub_window == self.mdi_area.activeSubWindow()) - action.triggered.connect(lambda x=nr: self.setActiveSubWindow(x)) + action.triggered.connect(lambda a=None,x=nr: self.setActiveSubWindow(x)) self.window_menu.addAction(action) nr += 1 @@ -2840,6 +3008,12 @@ cd xed sudo ./mfile.py --prefix=/usr/local install sudo ldconfig </pre> +<h3>Instructions per Cycle (IPC)</h3> +If available, IPC information is displayed in columns 'insn_cnt', 'cyc_cnt' and 'IPC'. +<p><b>Intel PT note:</b> The information applies to the blocks of code ending with, and including, that branch. +Due to the granularity of timing information, the number of cycles for some code blocks will not be known. +In that case, 'insn_cnt', 'cyc_cnt' and 'IPC' are zero, but when 'IPC' is displayed it covers the period +since the previous displayed 'IPC'. <h3>Find</h3> Ctrl-F displays a Find bar which finds substrings by either an exact match or a regular expression match. Refer to Python documentation for the regular expression syntax. @@ -3114,14 +3288,14 @@ class MainWindow(QMainWindow): event = event.split(":")[0] if event == "branches": label = "All branches" if branches_events == 1 else "All branches " + "(id=" + dbid + ")" - reports_menu.addAction(CreateAction(label, "Create a new window displaying branch events", lambda x=dbid: self.NewBranchView(x), self)) + reports_menu.addAction(CreateAction(label, "Create a new window displaying branch events", lambda a=None,x=dbid: self.NewBranchView(x), self)) label = "Selected branches" if branches_events == 1 else "Selected branches " + "(id=" + dbid + ")" - reports_menu.addAction(CreateAction(label, "Create a new window displaying branch events", lambda x=dbid: self.NewSelectedBranchView(x), self)) + reports_menu.addAction(CreateAction(label, "Create a new window displaying branch events", lambda a=None,x=dbid: self.NewSelectedBranchView(x), self)) def TableMenu(self, tables, menu): table_menu = menu.addMenu("&Tables") for table in tables: - table_menu.addAction(CreateAction(table, "Create a new window containing a table view", lambda t=table: self.NewTableView(t), self)) + table_menu.addAction(CreateAction(table, "Create a new window containing a table view", lambda a=None,t=table: self.NewTableView(t), self)) def NewCallGraph(self): CallGraphWindow(self.glb, self) @@ -3361,18 +3535,27 @@ class DBRef(): # Main def Main(): - if (len(sys.argv) < 2): - printerr("Usage is: exported-sql-viewer.py {<database name> | --help-only}"); - raise Exception("Too few arguments") - - dbname = sys.argv[1] - if dbname == "--help-only": + usage_str = "exported-sql-viewer.py [--pyside-version-1] <database name>\n" \ + " or: exported-sql-viewer.py --help-only" + ap = argparse.ArgumentParser(usage = usage_str, add_help = False) + ap.add_argument("--pyside-version-1", action='store_true') + ap.add_argument("dbname", nargs="?") + ap.add_argument("--help-only", action='store_true') + args = ap.parse_args() + + if args.help_only: app = QApplication(sys.argv) mainwindow = HelpOnlyWindow() mainwindow.show() err = app.exec_() sys.exit(err) + dbname = args.dbname + if dbname is None: + ap.print_usage() + print("Too few arguments") + sys.exit(1) + is_sqlite3 = False try: f = open(dbname, "rb") |