From d180a079b0a3222b9fa938e4229a32a042ce7c05 Mon Sep 17 00:00:00 2001 From: Michael Hohn Date: Tue, 9 Nov 2021 11:46:14 -0800 Subject: [PATCH] include needed python result files --- .../linux-small/arch/ia64/scripts/unwcheck.py | 65 + .../ni_routing/tools/convert_csv_to_c.py | 496 ++ .../ni_routing/tools/convert_py_to_csv.py | 66 + .../ni_routing/tools/csv_collection.py | 39 + .../ni_routing/tools/make_blank_csv.py | 31 + data/linux-small/scripts/bpf_doc.py | 734 ++ .../scripts/checkkconfigsymbols.py | 471 ++ .../scripts/clang-tools/run-clang-tools.py | 74 + data/linux-small/scripts/gdb/linux/clk.py | 76 + data/linux-small/scripts/gdb/linux/dmesg.py | 154 + data/linux-small/scripts/gdb/linux/genpd.py | 83 + data/linux-small/scripts/gdb/linux/modules.py | 95 + data/linux-small/scripts/gdb/linux/rbtree.py | 177 + data/linux-small/scripts/gdb/vmlinux-gdb.py | 39 + data/linux-small/scripts/show_delta | 128 + .../tools/cgroup/iocost_monitor.py | 270 + .../tools/cgroup/memcg_slabinfo.py | 226 + data/linux-small/tools/hv/vmbus_testing | 376 + data/linux-small/tools/kvm/kvm_stat/kvm_stat | 1819 +++++ .../lib/Perf/Trace/EventClass.py | 97 + .../Perf-Trace-Util/lib/Perf/Trace/Util.py | 91 + .../perf/scripts/python/check-perf-trace.py | 84 + .../perf/scripts/python/compaction-times.py | 311 + .../scripts/python/event_analyzing_sample.py | 192 + .../scripts/python/export-to-postgresql.py | 1111 +++ .../perf/scripts/python/export-to-sqlite.py | 796 ++ .../scripts/python/exported-sql-viewer.py | 5027 ++++++++++++ .../scripts/python/failed-syscalls-by-pid.py | 79 + .../perf/scripts/python/intel-pt-events.py | 355 + .../tools/perf/scripts/python/libxed.py | 107 + .../perf/scripts/python/mem-phys-addr.py | 100 + .../perf/scripts/python/net_dropmonitor.py | 78 + .../perf/scripts/python/sched-migration.py | 462 ++ .../tools/perf/scripts/python/sctop.py | 89 + .../scripts/python/syscall-counts-by-pid.py | 75 + .../tools/power/pm-graph/bootgraph.py | 1101 +++ .../tools/power/pm-graph/sleepgraph.py | 6969 +++++++++++++++++ .../intel_pstate_tracer.py | 615 ++ data/linux-small/tools/rcu/rcu-cbs.py | 46 + 39 files changed, 23204 insertions(+) create mode 100644 data/linux-small/arch/ia64/scripts/unwcheck.py create mode 100755 data/linux-small/drivers/comedi/drivers/ni_routing/tools/convert_csv_to_c.py create mode 100755 data/linux-small/drivers/comedi/drivers/ni_routing/tools/convert_py_to_csv.py create mode 100644 data/linux-small/drivers/comedi/drivers/ni_routing/tools/csv_collection.py create mode 100755 data/linux-small/drivers/comedi/drivers/ni_routing/tools/make_blank_csv.py create mode 100755 data/linux-small/scripts/bpf_doc.py create mode 100755 data/linux-small/scripts/checkkconfigsymbols.py create mode 100755 data/linux-small/scripts/clang-tools/run-clang-tools.py create mode 100644 data/linux-small/scripts/gdb/linux/clk.py create mode 100644 data/linux-small/scripts/gdb/linux/dmesg.py create mode 100644 data/linux-small/scripts/gdb/linux/genpd.py create mode 100644 data/linux-small/scripts/gdb/linux/modules.py create mode 100644 data/linux-small/scripts/gdb/linux/rbtree.py create mode 100644 data/linux-small/scripts/gdb/vmlinux-gdb.py create mode 100755 data/linux-small/scripts/show_delta create mode 100644 data/linux-small/tools/cgroup/iocost_monitor.py create mode 100644 data/linux-small/tools/cgroup/memcg_slabinfo.py create mode 100755 data/linux-small/tools/hv/vmbus_testing create mode 100755 data/linux-small/tools/kvm/kvm_stat/kvm_stat create mode 100755 data/linux-small/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py create mode 100644 data/linux-small/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py create mode 100644 data/linux-small/tools/perf/scripts/python/check-perf-trace.py create mode 100644 data/linux-small/tools/perf/scripts/python/compaction-times.py create mode 100644 data/linux-small/tools/perf/scripts/python/event_analyzing_sample.py create mode 100644 data/linux-small/tools/perf/scripts/python/export-to-postgresql.py create mode 100644 data/linux-small/tools/perf/scripts/python/export-to-sqlite.py create mode 100755 data/linux-small/tools/perf/scripts/python/exported-sql-viewer.py create mode 100644 data/linux-small/tools/perf/scripts/python/failed-syscalls-by-pid.py create mode 100644 data/linux-small/tools/perf/scripts/python/intel-pt-events.py create mode 100644 data/linux-small/tools/perf/scripts/python/libxed.py create mode 100644 data/linux-small/tools/perf/scripts/python/mem-phys-addr.py create mode 100755 data/linux-small/tools/perf/scripts/python/net_dropmonitor.py create mode 100644 data/linux-small/tools/perf/scripts/python/sched-migration.py create mode 100644 data/linux-small/tools/perf/scripts/python/sctop.py create mode 100644 data/linux-small/tools/perf/scripts/python/syscall-counts-by-pid.py create mode 100755 data/linux-small/tools/power/pm-graph/bootgraph.py create mode 100755 data/linux-small/tools/power/pm-graph/sleepgraph.py create mode 100755 data/linux-small/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py create mode 100644 data/linux-small/tools/rcu/rcu-cbs.py diff --git a/data/linux-small/arch/ia64/scripts/unwcheck.py b/data/linux-small/arch/ia64/scripts/unwcheck.py new file mode 100644 index 0000000..9581742 --- /dev/null +++ b/data/linux-small/arch/ia64/scripts/unwcheck.py @@ -0,0 +1,65 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0 +# +# Usage: unwcheck.py FILE +# +# This script checks the unwind info of each function in file FILE +# and verifies that the sum of the region-lengths matches the total +# length of the function. +# +# Based on a shell/awk script originally written by Harish Patil, +# which was converted to Perl by Matthew Chapman, which was converted +# to Python by David Mosberger. +# +import os +import re +import sys + +if len(sys.argv) != 2: + print("Usage: %s FILE" % sys.argv[0]) + sys.exit(2) + +readelf = os.getenv("READELF", "readelf") + +start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]") +rlen_pattern = re.compile(".*rlen=([0-9]+)") + +def check_func (func, slots, rlen_sum): + if slots != rlen_sum: + global num_errors + num_errors += 1 + if not func: func = "[%#x-%#x]" % (start, end) + print("ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)) + return + +num_funcs = 0 +num_errors = 0 +func = False +slots = 0 +rlen_sum = 0 +for line in os.popen("%s -u %s" % (readelf, sys.argv[1])): + m = start_pattern.match(line) + if m: + check_func(func, slots, rlen_sum) + + func = m.group(1) + start = int(m.group(2), 16) + end = int(m.group(3), 16) + slots = 3 * (end - start) / 16 + rlen_sum = 0 + num_funcs += 1 + else: + m = rlen_pattern.match(line) + if m: + rlen_sum += int(m.group(1)) +check_func(func, slots, rlen_sum) + +if num_errors == 0: + print("No errors detected in %u functions." % num_funcs) +else: + if num_errors > 1: + err="errors" + else: + err="error" + print("%u %s detected in %u functions." % (num_errors, err, num_funcs)) + sys.exit(1) diff --git a/data/linux-small/drivers/comedi/drivers/ni_routing/tools/convert_csv_to_c.py b/data/linux-small/drivers/comedi/drivers/ni_routing/tools/convert_csv_to_c.py new file mode 100755 index 0000000..90378fb --- /dev/null +++ b/data/linux-small/drivers/comedi/drivers/ni_routing/tools/convert_csv_to_c.py @@ -0,0 +1,496 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0+ + +# This is simply to aide in creating the entries in the order of the value of +# the device-global NI signal/terminal constants defined in comedi.h +import comedi_h +import os, sys, re +from csv_collection import CSVCollection + + +def c_to_o(filename, prefix='\t\t\t\t\t ni_routing/', suffix=' \\'): + if not filename.endswith('.c'): + return '' + return prefix + filename.rpartition('.c')[0] + '.o' + suffix + + +def routedict_to_structinit_single(name, D, return_name=False): + Locals = dict() + lines = [ + '\t.family = "{}",'.format(name), + '\t.register_values = {', + '\t\t/*', + '\t\t * destination = {', + '\t\t * source = register value,', + '\t\t * ...', + '\t\t * }', + '\t\t */', + ] + if (False): + # print table with index0:src, index1:dest + D0 = D # (src-> dest->reg_value) + #D1 : destD + else: + D0 = dict() + for src, destD in D.items(): + for dest, val in destD.items(): + D0.setdefault(dest, {})[src] = val + + + D0 = sorted(D0.items(), key=lambda i: eval(i[0], comedi_h.__dict__, Locals)) + + for D0_sig, D1_D in D0: + D1 = sorted(D1_D.items(), key=lambda i: eval(i[0], comedi_h.__dict__, Locals)) + + lines.append('\t\t[B({})] = {{'.format(D0_sig)) + for D1_sig, value in D1: + if not re.match('[VIU]\([^)]*\)', value): + sys.stderr.write('Invalid register format: {}\n'.format(repr(value))) + sys.stderr.write( + 'Register values should be formatted with V(),I(),or U()\n') + raise RuntimeError('Invalid register values format') + lines.append('\t\t\t[B({})]\t= {},'.format(D1_sig, value)) + lines.append('\t\t},') + lines.append('\t},') + + lines = '\n'.join(lines) + if return_name: + return N, lines + else: + return lines + + +def routedict_to_routelist_single(name, D, indent=1): + Locals = dict() + + indents = dict( + I0 = '\t'*(indent), + I1 = '\t'*(indent+1), + I2 = '\t'*(indent+2), + I3 = '\t'*(indent+3), + I4 = '\t'*(indent+4), + ) + + if (False): + # data is src -> dest-list + D0 = D + keyname = 'src' + valname = 'dest' + else: + # data is dest -> src-list + keyname = 'dest' + valname = 'src' + D0 = dict() + for src, destD in D.items(): + for dest, val in destD.items(): + D0.setdefault(dest, {})[src] = val + + # Sort by order of device-global names (numerically) + D0 = sorted(D0.items(), key=lambda i: eval(i[0], comedi_h.__dict__, Locals)) + + lines = [ '{I0}.device = "{name}",\n' + '{I0}.routes = (struct ni_route_set[]){{' + .format(name=name, **indents) ] + for D0_sig, D1_D in D0: + D1 = [ k for k,v in D1_D.items() if v ] + D1.sort(key=lambda i: eval(i, comedi_h.__dict__, Locals)) + + lines.append('{I1}{{\n{I2}.{keyname} = {D0_sig},\n' + '{I2}.{valname} = (int[]){{' + .format(keyname=keyname, valname=valname, D0_sig=D0_sig, **indents) + ) + for D1_sig in D1: + lines.append( '{I3}{D1_sig},'.format(D1_sig=D1_sig, **indents) ) + lines.append( '{I3}0, /* Termination */'.format(**indents) ) + + lines.append('{I2}}}\n{I1}}},'.format(**indents)) + + lines.append('{I1}{{ /* Termination of list */\n{I2}.{keyname} = 0,\n{I1}}},' + .format(keyname=keyname, **indents)) + + lines.append('{I0}}},'.format(**indents)) + + return '\n'.join(lines) + + +class DeviceRoutes(CSVCollection): + MKFILE_SEGMENTS = 'device-route.mk' + SET_C = 'ni_device_routes.c' + ITEMS_DIR = 'ni_device_routes' + EXTERN_H = 'all.h' + OUTPUT_DIR = 'c' + + output_file_top = """\ +// SPDX-License-Identifier: GPL-2.0+ +/* + * comedi/drivers/ni_routing/{filename} + * List of valid routes for specific NI boards. + * + * COMEDI - Linux Control and Measurement Device Interface + * Copyright (C) 2016 Spencer E. Olson + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * The contents of this file are generated using the tools in + * comedi/drivers/ni_routing/tools + * + * Please use those tools to help maintain the contents of this file. + */ + +#include "ni_device_routes.h" +#include "{extern_h}"\ +""".format(filename=SET_C, extern_h=os.path.join(ITEMS_DIR, EXTERN_H)) + + extern_header = """\ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * comedi/drivers/ni_routing/{filename} + * List of valid routes for specific NI boards. + * + * COMEDI - Linux Control and Measurement Device Interface + * Copyright (C) 2016 Spencer E. Olson + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * The contents of this file are generated using the tools in + * comedi/drivers/ni_routing/tools + * + * Please use those tools to help maintain the contents of this file. + */ + +#ifndef _COMEDI_DRIVERS_NI_ROUTING_NI_DEVICE_ROUTES_EXTERN_H +#define _COMEDI_DRIVERS_NI_ROUTING_NI_DEVICE_ROUTES_EXTERN_H + +#include "../ni_device_routes.h" + +{externs} + +#endif //_COMEDI_DRIVERS_NI_ROUTING_NI_DEVICE_ROUTES_EXTERN_H +""" + + single_output_file_top = """\ +// SPDX-License-Identifier: GPL-2.0+ +/* + * comedi/drivers/ni_routing/{filename} + * List of valid routes for specific NI boards. + * + * COMEDI - Linux Control and Measurement Device Interface + * Copyright (C) 2016 Spencer E. Olson + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * The contents of this file are generated using the tools in + * comedi/drivers/ni_routing/tools + * + * Please use those tools to help maintain the contents of this file. + */ + +#include "../ni_device_routes.h" +#include "{extern_h}" + +struct ni_device_routes {table_name} = {{\ +""" + + def __init__(self, pattern='csv/device_routes/*.csv'): + super(DeviceRoutes,self).__init__(pattern) + + def to_listinit(self): + chunks = [ self.output_file_top, + '', + 'struct ni_device_routes *const ni_device_routes_list[] = {' + ] + # put the sheets in lexical order of device numbers then bus + sheets = sorted(self.items(), key=lambda i : tuple(i[0].split('-')[::-1]) ) + + externs = [] + objs = [c_to_o(self.SET_C)] + + for sheet,D in sheets: + S = sheet.lower() + dev_table_name = 'ni_{}_device_routes'.format(S.replace('-','_')) + sheet_filename = os.path.join(self.ITEMS_DIR,'{}.c'.format(S)) + externs.append('extern struct ni_device_routes {};'.format(dev_table_name)) + + chunks.append('\t&{},'.format(dev_table_name)) + + s_chunks = [ + self.single_output_file_top.format( + filename = sheet_filename, + table_name = dev_table_name, + extern_h = self.EXTERN_H, + ), + routedict_to_routelist_single(S, D), + '};', + ] + + objs.append(c_to_o(sheet_filename)) + + with open(os.path.join(self.OUTPUT_DIR, sheet_filename), 'w') as f: + f.write('\n'.join(s_chunks)) + f.write('\n') + + with open(os.path.join(self.OUTPUT_DIR, self.MKFILE_SEGMENTS), 'w') as f: + f.write('# This is the segment that should be included in comedi/drivers/Makefile\n') + f.write('ni_routing-objs\t\t\t\t+= \\\n') + f.write('\n'.join(objs)) + f.write('\n') + + EXTERN_H = os.path.join(self.ITEMS_DIR, self.EXTERN_H) + with open(os.path.join(self.OUTPUT_DIR, EXTERN_H), 'w') as f: + f.write(self.extern_header.format( + filename=EXTERN_H, externs='\n'.join(externs))) + + chunks.append('\tNULL,') # terminate list + chunks.append('};') + return '\n'.join(chunks) + + def save(self): + filename=os.path.join(self.OUTPUT_DIR, self.SET_C) + + try: + os.makedirs(os.path.join(self.OUTPUT_DIR, self.ITEMS_DIR)) + except: + pass + with open(filename,'w') as f: + f.write( self.to_listinit() ) + f.write( '\n' ) + + +class RouteValues(CSVCollection): + MKFILE_SEGMENTS = 'route-values.mk' + SET_C = 'ni_route_values.c' + ITEMS_DIR = 'ni_route_values' + EXTERN_H = 'all.h' + OUTPUT_DIR = 'c' + + output_file_top = """\ +// SPDX-License-Identifier: GPL-2.0+ +/* + * comedi/drivers/ni_routing/{filename} + * Route information for NI boards. + * + * COMEDI - Linux Control and Measurement Device Interface + * Copyright (C) 2016 Spencer E. Olson + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * This file includes the tables that are a list of all the values of various + * signals routes available on NI hardware. In many cases, one does not + * explicitly make these routes, rather one might indicate that something is + * used as the source of one particular trigger or another (using + * *_src=TRIG_EXT). + * + * The contents of this file are generated using the tools in + * comedi/drivers/ni_routing/tools + * + * Please use those tools to help maintain the contents of this file. + */ + +#include "ni_route_values.h" +#include "{extern_h}"\ +""".format(filename=SET_C, extern_h=os.path.join(ITEMS_DIR, EXTERN_H)) + + extern_header = """\ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * comedi/drivers/ni_routing/{filename} + * List of valid routes for specific NI boards. + * + * COMEDI - Linux Control and Measurement Device Interface + * Copyright (C) 2016 Spencer E. Olson + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * The contents of this file are generated using the tools in + * comedi/drivers/ni_routing/tools + * + * Please use those tools to help maintain the contents of this file. + */ + +#ifndef _COMEDI_DRIVERS_NI_ROUTING_NI_ROUTE_VALUES_EXTERN_H +#define _COMEDI_DRIVERS_NI_ROUTING_NI_ROUTE_VALUES_EXTERN_H + +#include "../ni_route_values.h" + +{externs} + +#endif //_COMEDI_DRIVERS_NI_ROUTING_NI_ROUTE_VALUES_EXTERN_H +""" + + single_output_file_top = """\ +// SPDX-License-Identifier: GPL-2.0+ +/* + * comedi/drivers/ni_routing/{filename} + * Route information for {sheet} boards. + * + * COMEDI - Linux Control and Measurement Device Interface + * Copyright (C) 2016 Spencer E. Olson + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * This file includes a list of all the values of various signals routes + * available on NI 660x hardware. In many cases, one does not explicitly make + * these routes, rather one might indicate that something is used as the source + * of one particular trigger or another (using *_src=TRIG_EXT). + * + * The contents of this file can be generated using the tools in + * comedi/drivers/ni_routing/tools. This file also contains specific notes to + * this family of devices. + * + * Please use those tools to help maintain the contents of this file, but be + * mindful to not lose the notes already made in this file, since these notes + * are critical to a complete undertsanding of the register values of this + * family. + */ + +#include "../ni_route_values.h" +#include "{extern_h}" + +const struct family_route_values {table_name} = {{\ +""" + + def __init__(self, pattern='csv/route_values/*.csv'): + super(RouteValues,self).__init__(pattern) + + def to_structinit(self): + chunks = [ self.output_file_top, + '', + 'const struct family_route_values *const ni_all_route_values[] = {' + ] + # put the sheets in lexical order for consistency + sheets = sorted(self.items(), key=lambda i : i[0] ) + + externs = [] + objs = [c_to_o(self.SET_C)] + + for sheet,D in sheets: + S = sheet.lower() + fam_table_name = '{}_route_values'.format(S.replace('-','_')) + sheet_filename = os.path.join(self.ITEMS_DIR,'{}.c'.format(S)) + externs.append('extern const struct family_route_values {};'.format(fam_table_name)) + + chunks.append('\t&{},'.format(fam_table_name)) + + s_chunks = [ + self.single_output_file_top.format( + filename = sheet_filename, + sheet = sheet.upper(), + table_name = fam_table_name, + extern_h = self.EXTERN_H, + ), + routedict_to_structinit_single(S, D), + '};', + ] + + objs.append(c_to_o(sheet_filename)) + + with open(os.path.join(self.OUTPUT_DIR, sheet_filename), 'w') as f: + f.write('\n'.join(s_chunks)) + f.write( '\n' ) + + with open(os.path.join(self.OUTPUT_DIR, self.MKFILE_SEGMENTS), 'w') as f: + f.write('# This is the segment that should be included in comedi/drivers/Makefile\n') + f.write('ni_routing-objs\t\t\t\t+= \\\n') + f.write('\n'.join(objs)) + f.write('\n') + + EXTERN_H = os.path.join(self.ITEMS_DIR, self.EXTERN_H) + with open(os.path.join(self.OUTPUT_DIR, EXTERN_H), 'w') as f: + f.write(self.extern_header.format( + filename=EXTERN_H, externs='\n'.join(externs))) + + chunks.append('\tNULL,') # terminate list + chunks.append('};') + return '\n'.join(chunks) + + def save(self): + filename=os.path.join(self.OUTPUT_DIR, self.SET_C) + + try: + os.makedirs(os.path.join(self.OUTPUT_DIR, self.ITEMS_DIR)) + except: + pass + with open(filename,'w') as f: + f.write( self.to_structinit() ) + f.write( '\n' ) + + + +if __name__ == '__main__': + import argparse + parser = argparse.ArgumentParser() + parser.add_argument( '--route_values', action='store_true', + help='Extract route values from csv/route_values/*.csv' ) + parser.add_argument( '--device_routes', action='store_true', + help='Extract route values from csv/device_routes/*.csv' ) + args = parser.parse_args() + KL = list() + if args.route_values: + KL.append( RouteValues ) + if args.device_routes: + KL.append( DeviceRoutes ) + if not KL: + parser.error('nothing to do...') + for K in KL: + doc = K() + doc.save() diff --git a/data/linux-small/drivers/comedi/drivers/ni_routing/tools/convert_py_to_csv.py b/data/linux-small/drivers/comedi/drivers/ni_routing/tools/convert_py_to_csv.py new file mode 100755 index 0000000..a273b33 --- /dev/null +++ b/data/linux-small/drivers/comedi/drivers/ni_routing/tools/convert_py_to_csv.py @@ -0,0 +1,66 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0+ + +from os import path +import os, csv +from itertools import chain + +from csv_collection import CSVCollection +from ni_names import value_to_name +import ni_values + +CSV_DIR = 'csv' + +def iter_src_values(D): + return D.items() + +def iter_src(D): + for dest in D: + yield dest, 1 + +def create_csv(name, D, src_iter): + # have to change dest->{src:val} to src->{dest:val} + fieldnames = [value_to_name[i] for i in sorted(D.keys())] + fieldnames.insert(0, CSVCollection.source_column_name) + + S = dict() + for dest, srcD in D.items(): + for src,val in src_iter(srcD): + S.setdefault(src,{})[dest] = val + + S = sorted(S.items(), key = lambda src_destD : src_destD[0]) + + + csv_fname = path.join(CSV_DIR, name + '.csv') + with open(csv_fname, 'w') as F_csv: + dR = csv.DictWriter(F_csv, fieldnames, delimiter=';', quotechar='"') + dR.writeheader() + + # now change the json back into the csv dictionaries + rows = [ + dict(chain( + ((CSVCollection.source_column_name,value_to_name[src]),), + *(((value_to_name[dest],v),) for dest,v in destD.items()) + )) + for src, destD in S + ] + + dR.writerows(rows) + + +def to_csv(): + for d in ['route_values', 'device_routes']: + try: + os.makedirs(path.join(CSV_DIR,d)) + except: + pass + + for family, dst_src_map in ni_values.ni_route_values.items(): + create_csv(path.join('route_values',family), dst_src_map, iter_src_values) + + for device, dst_src_map in ni_values.ni_device_routes.items(): + create_csv(path.join('device_routes',device), dst_src_map, iter_src) + + +if __name__ == '__main__': + to_csv() diff --git a/data/linux-small/drivers/comedi/drivers/ni_routing/tools/csv_collection.py b/data/linux-small/drivers/comedi/drivers/ni_routing/tools/csv_collection.py new file mode 100644 index 0000000..db977ec --- /dev/null +++ b/data/linux-small/drivers/comedi/drivers/ni_routing/tools/csv_collection.py @@ -0,0 +1,39 @@ +# SPDX-License-Identifier: GPL-2.0+ + +import os, csv, glob + +class CSVCollection(dict): + delimiter=';' + quotechar='"' + source_column_name = 'Sources / Destinations' + + """ + This class is a dictionary representation of the collection of sheets that + exist in a given .ODS file. + """ + def __init__(self, pattern, skip_commented_lines=True, strip_lines=True): + super(CSVCollection, self).__init__() + self.pattern = pattern + C = '#' if skip_commented_lines else 'blahblahblah' + + if strip_lines: + strip = lambda s:s.strip() + else: + strip = lambda s:s + + # load all CSV files + key = self.source_column_name + for fname in glob.glob(pattern): + with open(fname) as F: + dR = csv.DictReader(F, delimiter=self.delimiter, + quotechar=self.quotechar) + name = os.path.basename(fname).partition('.')[0] + D = { + r[key]:{f:strip(c) for f,c in r.items() + if f != key and f[:1] not in ['', C] and + strip(c)[:1] not in ['', C]} + for r in dR if r[key][:1] not in ['', C] + } + # now, go back through and eliminate all empty dictionaries + D = {k:v for k,v in D.items() if v} + self[name] = D diff --git a/data/linux-small/drivers/comedi/drivers/ni_routing/tools/make_blank_csv.py b/data/linux-small/drivers/comedi/drivers/ni_routing/tools/make_blank_csv.py new file mode 100755 index 0000000..c00eaf8 --- /dev/null +++ b/data/linux-small/drivers/comedi/drivers/ni_routing/tools/make_blank_csv.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0+ + +from os import path +import os, csv + +from csv_collection import CSVCollection +from ni_names import value_to_name + +CSV_DIR = 'csv' + +def to_csv(): + try: + os.makedirs(CSV_DIR) + except: + pass + + csv_fname = path.join(CSV_DIR, 'blank_route_table.csv') + + fieldnames = [sig for sig_val, sig in sorted(value_to_name.items())] + fieldnames.insert(0, CSVCollection.source_column_name) + + with open(csv_fname, 'w') as F_csv: + dR = csv.DictWriter(F_csv, fieldnames, delimiter=';', quotechar='"') + dR.writeheader() + + for sig in fieldnames[1:]: + dR.writerow({CSVCollection.source_column_name: sig}) + +if __name__ == '__main__': + to_csv() diff --git a/data/linux-small/scripts/bpf_doc.py b/data/linux-small/scripts/bpf_doc.py new file mode 100755 index 0000000..00ac7b7 --- /dev/null +++ b/data/linux-small/scripts/bpf_doc.py @@ -0,0 +1,734 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0-only +# +# Copyright (C) 2018-2019 Netronome Systems, Inc. +# Copyright (C) 2021 Isovalent, Inc. + +# In case user attempts to run with Python 2. +from __future__ import print_function + +import argparse +import re +import sys, os + +class NoHelperFound(BaseException): + pass + +class NoSyscallCommandFound(BaseException): + pass + +class ParsingError(BaseException): + def __init__(self, line='', reader=None): + if reader: + BaseException.__init__(self, + 'Error at file offset %d, parsing line: %s' % + (reader.tell(), line)) + else: + BaseException.__init__(self, 'Error parsing line: %s' % line) + + +class APIElement(object): + """ + An object representing the description of an aspect of the eBPF API. + @proto: prototype of the API symbol + @desc: textual description of the symbol + @ret: (optional) description of any associated return value + """ + def __init__(self, proto='', desc='', ret=''): + self.proto = proto + self.desc = desc + self.ret = ret + + +class Helper(APIElement): + """ + An object representing the description of an eBPF helper function. + @proto: function prototype of the helper function + @desc: textual description of the helper function + @ret: description of the return value of the helper function + """ + def proto_break_down(self): + """ + Break down helper function protocol into smaller chunks: return type, + name, distincts arguments. + """ + arg_re = re.compile('((\w+ )*?(\w+|...))( (\**)(\w+))?$') + res = {} + proto_re = re.compile('(.+) (\**)(\w+)\(((([^,]+)(, )?){1,5})\)$') + + capture = proto_re.match(self.proto) + res['ret_type'] = capture.group(1) + res['ret_star'] = capture.group(2) + res['name'] = capture.group(3) + res['args'] = [] + + args = capture.group(4).split(', ') + for a in args: + capture = arg_re.match(a) + res['args'].append({ + 'type' : capture.group(1), + 'star' : capture.group(5), + 'name' : capture.group(6) + }) + + return res + + +class HeaderParser(object): + """ + An object used to parse a file in order to extract the documentation of a + list of eBPF helper functions. All the helpers that can be retrieved are + stored as Helper object, in the self.helpers() array. + @filename: name of file to parse, usually include/uapi/linux/bpf.h in the + kernel tree + """ + def __init__(self, filename): + self.reader = open(filename, 'r') + self.line = '' + self.helpers = [] + self.commands = [] + + def parse_element(self): + proto = self.parse_symbol() + desc = self.parse_desc() + ret = self.parse_ret() + return APIElement(proto=proto, desc=desc, ret=ret) + + def parse_helper(self): + proto = self.parse_proto() + desc = self.parse_desc() + ret = self.parse_ret() + return Helper(proto=proto, desc=desc, ret=ret) + + def parse_symbol(self): + p = re.compile(' \* ?(.+)$') + capture = p.match(self.line) + if not capture: + raise NoSyscallCommandFound + end_re = re.compile(' \* ?NOTES$') + end = end_re.match(self.line) + if end: + raise NoSyscallCommandFound + self.line = self.reader.readline() + return capture.group(1) + + def parse_proto(self): + # Argument can be of shape: + # - "void" + # - "type name" + # - "type *name" + # - Same as above, with "const" and/or "struct" in front of type + # - "..." (undefined number of arguments, for bpf_trace_printk()) + # There is at least one term ("void"), and at most five arguments. + p = re.compile(' \* ?((.+) \**\w+\((((const )?(struct )?(\w+|\.\.\.)( \**\w+)?)(, )?){1,5}\))$') + capture = p.match(self.line) + if not capture: + raise NoHelperFound + self.line = self.reader.readline() + return capture.group(1) + + def parse_desc(self): + p = re.compile(' \* ?(?:\t| {5,8})Description$') + capture = p.match(self.line) + if not capture: + # Helper can have empty description and we might be parsing another + # attribute: return but do not consume. + return '' + # Description can be several lines, some of them possibly empty, and it + # stops when another subsection title is met. + desc = '' + while True: + self.line = self.reader.readline() + if self.line == ' *\n': + desc += '\n' + else: + p = re.compile(' \* ?(?:\t| {5,8})(?:\t| {8})(.*)') + capture = p.match(self.line) + if capture: + desc += capture.group(1) + '\n' + else: + break + return desc + + def parse_ret(self): + p = re.compile(' \* ?(?:\t| {5,8})Return$') + capture = p.match(self.line) + if not capture: + # Helper can have empty retval and we might be parsing another + # attribute: return but do not consume. + return '' + # Return value description can be several lines, some of them possibly + # empty, and it stops when another subsection title is met. + ret = '' + while True: + self.line = self.reader.readline() + if self.line == ' *\n': + ret += '\n' + else: + p = re.compile(' \* ?(?:\t| {5,8})(?:\t| {8})(.*)') + capture = p.match(self.line) + if capture: + ret += capture.group(1) + '\n' + else: + break + return ret + + def seek_to(self, target, help_message): + self.reader.seek(0) + offset = self.reader.read().find(target) + if offset == -1: + raise Exception(help_message) + self.reader.seek(offset) + self.reader.readline() + self.reader.readline() + self.line = self.reader.readline() + + def parse_syscall(self): + self.seek_to('* DOC: eBPF Syscall Commands', + 'Could not find start of eBPF syscall descriptions list') + while True: + try: + command = self.parse_element() + self.commands.append(command) + except NoSyscallCommandFound: + break + + def parse_helpers(self): + self.seek_to('* Start of BPF helper function descriptions:', + 'Could not find start of eBPF helper descriptions list') + while True: + try: + helper = self.parse_helper() + self.helpers.append(helper) + except NoHelperFound: + break + + def run(self): + self.parse_syscall() + self.parse_helpers() + self.reader.close() + +############################################################################### + +class Printer(object): + """ + A generic class for printers. Printers should be created with an array of + Helper objects, and implement a way to print them in the desired fashion. + @parser: A HeaderParser with objects to print to standard output + """ + def __init__(self, parser): + self.parser = parser + self.elements = [] + + def print_header(self): + pass + + def print_footer(self): + pass + + def print_one(self, helper): + pass + + def print_all(self): + self.print_header() + for elem in self.elements: + self.print_one(elem) + self.print_footer() + + +class PrinterRST(Printer): + """ + A generic class for printers that print ReStructured Text. Printers should + be created with a HeaderParser object, and implement a way to print API + elements in the desired fashion. + @parser: A HeaderParser with objects to print to standard output + """ + def __init__(self, parser): + self.parser = parser + + def print_license(self): + license = '''\ +.. Copyright (C) All BPF authors and contributors from 2014 to present. +.. See git log include/uapi/linux/bpf.h in kernel tree for details. +.. +.. %%%LICENSE_START(VERBATIM) +.. Permission is granted to make and distribute verbatim copies of this +.. manual provided the copyright notice and this permission notice are +.. preserved on all copies. +.. +.. Permission is granted to copy and distribute modified versions of this +.. manual under the conditions for verbatim copying, provided that the +.. entire resulting derived work is distributed under the terms of a +.. permission notice identical to this one. +.. +.. Since the Linux kernel and libraries are constantly changing, this +.. manual page may be incorrect or out-of-date. The author(s) assume no +.. responsibility for errors or omissions, or for damages resulting from +.. the use of the information contained herein. The author(s) may not +.. have taken the same level of care in the production of this manual, +.. which is licensed free of charge, as they might when working +.. professionally. +.. +.. Formatted or processed versions of this manual, if unaccompanied by +.. the source, must acknowledge the copyright and authors of this work. +.. %%%LICENSE_END +.. +.. Please do not edit this file. It was generated from the documentation +.. located in file include/uapi/linux/bpf.h of the Linux kernel sources +.. (helpers description), and from scripts/bpf_doc.py in the same +.. repository (header and footer). +''' + print(license) + + def print_elem(self, elem): + if (elem.desc): + print('\tDescription') + # Do not strip all newline characters: formatted code at the end of + # a section must be followed by a blank line. + for line in re.sub('\n$', '', elem.desc, count=1).split('\n'): + print('{}{}'.format('\t\t' if line else '', line)) + + if (elem.ret): + print('\tReturn') + for line in elem.ret.rstrip().split('\n'): + print('{}{}'.format('\t\t' if line else '', line)) + + print('') + + +class PrinterHelpersRST(PrinterRST): + """ + A printer for dumping collected information about helpers as a ReStructured + Text page compatible with the rst2man program, which can be used to + generate a manual page for the helpers. + @parser: A HeaderParser with Helper objects to print to standard output + """ + def __init__(self, parser): + self.elements = parser.helpers + + def print_header(self): + header = '''\ +=========== +BPF-HELPERS +=========== +------------------------------------------------------------------------------- +list of eBPF helper functions +------------------------------------------------------------------------------- + +:Manual section: 7 + +DESCRIPTION +=========== + +The extended Berkeley Packet Filter (eBPF) subsystem consists in programs +written in a pseudo-assembly language, then attached to one of the several +kernel hooks and run in reaction of specific events. This framework differs +from the older, "classic" BPF (or "cBPF") in several aspects, one of them being +the ability to call special functions (or "helpers") from within a program. +These functions are restricted to a white-list of helpers defined in the +kernel. + +These helpers are used by eBPF programs to interact with the system, or with +the context in which they work. For instance, they can be used to print +debugging messages, to get the time since the system was booted, to interact +with eBPF maps, or to manipulate network packets. Since there are several eBPF +program types, and that they do not run in the same context, each program type +can only call a subset of those helpers. + +Due to eBPF conventions, a helper can not have more than five arguments. + +Internally, eBPF programs call directly into the compiled helper functions +without requiring any foreign-function interface. As a result, calling helpers +introduces no overhead, thus offering excellent performance. + +This document is an attempt to list and document the helpers available to eBPF +developers. They are sorted by chronological order (the oldest helpers in the +kernel at the top). + +HELPERS +======= +''' + PrinterRST.print_license(self) + print(header) + + def print_footer(self): + footer = ''' +EXAMPLES +======== + +Example usage for most of the eBPF helpers listed in this manual page are +available within the Linux kernel sources, at the following locations: + +* *samples/bpf/* +* *tools/testing/selftests/bpf/* + +LICENSE +======= + +eBPF programs can have an associated license, passed along with the bytecode +instructions to the kernel when the programs are loaded. The format for that +string is identical to the one in use for kernel modules (Dual licenses, such +as "Dual BSD/GPL", may be used). Some helper functions are only accessible to +programs that are compatible with the GNU Privacy License (GPL). + +In order to use such helpers, the eBPF program must be loaded with the correct +license string passed (via **attr**) to the **bpf**\ () system call, and this +generally translates into the C source code of the program containing a line +similar to the following: + +:: + + char ____license[] __attribute__((section("license"), used)) = "GPL"; + +IMPLEMENTATION +============== + +This manual page is an effort to document the existing eBPF helper functions. +But as of this writing, the BPF sub-system is under heavy development. New eBPF +program or map types are added, along with new helper functions. Some helpers +are occasionally made available for additional program types. So in spite of +the efforts of the community, this page might not be up-to-date. If you want to +check by yourself what helper functions exist in your kernel, or what types of +programs they can support, here are some files among the kernel tree that you +may be interested in: + +* *include/uapi/linux/bpf.h* is the main BPF header. It contains the full list + of all helper functions, as well as many other BPF definitions including most + of the flags, structs or constants used by the helpers. +* *net/core/filter.c* contains the definition of most network-related helper + functions, and the list of program types from which they can be used. +* *kernel/trace/bpf_trace.c* is the equivalent for most tracing program-related + helpers. +* *kernel/bpf/verifier.c* contains the functions used to check that valid types + of eBPF maps are used with a given helper function. +* *kernel/bpf/* directory contains other files in which additional helpers are + defined (for cgroups, sockmaps, etc.). +* The bpftool utility can be used to probe the availability of helper functions + on the system (as well as supported program and map types, and a number of + other parameters). To do so, run **bpftool feature probe** (see + **bpftool-feature**\ (8) for details). Add the **unprivileged** keyword to + list features available to unprivileged users. + +Compatibility between helper functions and program types can generally be found +in the files where helper functions are defined. Look for the **struct +bpf_func_proto** objects and for functions returning them: these functions +contain a list of helpers that a given program type can call. Note that the +**default:** label of the **switch ... case** used to filter helpers can call +other functions, themselves allowing access to additional helpers. The +requirement for GPL license is also in those **struct bpf_func_proto**. + +Compatibility between helper functions and map types can be found in the +**check_map_func_compatibility**\ () function in file *kernel/bpf/verifier.c*. + +Helper functions that invalidate the checks on **data** and **data_end** +pointers for network processing are listed in function +**bpf_helper_changes_pkt_data**\ () in file *net/core/filter.c*. + +SEE ALSO +======== + +**bpf**\ (2), +**bpftool**\ (8), +**cgroups**\ (7), +**ip**\ (8), +**perf_event_open**\ (2), +**sendmsg**\ (2), +**socket**\ (7), +**tc-bpf**\ (8)''' + print(footer) + + def print_proto(self, helper): + """ + Format function protocol with bold and italics markers. This makes RST + file less readable, but gives nice results in the manual page. + """ + proto = helper.proto_break_down() + + print('**%s %s%s(' % (proto['ret_type'], + proto['ret_star'].replace('*', '\\*'), + proto['name']), + end='') + + comma = '' + for a in proto['args']: + one_arg = '{}{}'.format(comma, a['type']) + if a['name']: + if a['star']: + one_arg += ' {}**\ '.format(a['star'].replace('*', '\\*')) + else: + one_arg += '** ' + one_arg += '*{}*\\ **'.format(a['name']) + comma = ', ' + print(one_arg, end='') + + print(')**') + + def print_one(self, helper): + self.print_proto(helper) + self.print_elem(helper) + + +class PrinterSyscallRST(PrinterRST): + """ + A printer for dumping collected information about the syscall API as a + ReStructured Text page compatible with the rst2man program, which can be + used to generate a manual page for the syscall. + @parser: A HeaderParser with APIElement objects to print to standard + output + """ + def __init__(self, parser): + self.elements = parser.commands + + def print_header(self): + header = '''\ +=== +bpf +=== +------------------------------------------------------------------------------- +Perform a command on an extended BPF object +------------------------------------------------------------------------------- + +:Manual section: 2 + +COMMANDS +======== +''' + PrinterRST.print_license(self) + print(header) + + def print_one(self, command): + print('**%s**' % (command.proto)) + self.print_elem(command) + + +class PrinterHelpers(Printer): + """ + A printer for dumping collected information about helpers as C header to + be included from BPF program. + @parser: A HeaderParser with Helper objects to print to standard output + """ + def __init__(self, parser): + self.elements = parser.helpers + + type_fwds = [ + 'struct bpf_fib_lookup', + 'struct bpf_sk_lookup', + 'struct bpf_perf_event_data', + 'struct bpf_perf_event_value', + 'struct bpf_pidns_info', + 'struct bpf_redir_neigh', + 'struct bpf_sock', + 'struct bpf_sock_addr', + 'struct bpf_sock_ops', + 'struct bpf_sock_tuple', + 'struct bpf_spin_lock', + 'struct bpf_sysctl', + 'struct bpf_tcp_sock', + 'struct bpf_tunnel_key', + 'struct bpf_xfrm_state', + 'struct linux_binprm', + 'struct pt_regs', + 'struct sk_reuseport_md', + 'struct sockaddr', + 'struct tcphdr', + 'struct seq_file', + 'struct tcp6_sock', + 'struct tcp_sock', + 'struct tcp_timewait_sock', + 'struct tcp_request_sock', + 'struct udp6_sock', + 'struct task_struct', + + 'struct __sk_buff', + 'struct sk_msg_md', + 'struct xdp_md', + 'struct path', + 'struct btf_ptr', + 'struct inode', + 'struct socket', + 'struct file', + 'struct bpf_timer', + ] + known_types = { + '...', + 'void', + 'const void', + 'char', + 'const char', + 'int', + 'long', + 'unsigned long', + + '__be16', + '__be32', + '__wsum', + + 'struct bpf_fib_lookup', + 'struct bpf_perf_event_data', + 'struct bpf_perf_event_value', + 'struct bpf_pidns_info', + 'struct bpf_redir_neigh', + 'struct bpf_sk_lookup', + 'struct bpf_sock', + 'struct bpf_sock_addr', + 'struct bpf_sock_ops', + 'struct bpf_sock_tuple', + 'struct bpf_spin_lock', + 'struct bpf_sysctl', + 'struct bpf_tcp_sock', + 'struct bpf_tunnel_key', + 'struct bpf_xfrm_state', + 'struct linux_binprm', + 'struct pt_regs', + 'struct sk_reuseport_md', + 'struct sockaddr', + 'struct tcphdr', + 'struct seq_file', + 'struct tcp6_sock', + 'struct tcp_sock', + 'struct tcp_timewait_sock', + 'struct tcp_request_sock', + 'struct udp6_sock', + 'struct task_struct', + 'struct path', + 'struct btf_ptr', + 'struct inode', + 'struct socket', + 'struct file', + 'struct bpf_timer', + } + mapped_types = { + 'u8': '__u8', + 'u16': '__u16', + 'u32': '__u32', + 'u64': '__u64', + 's8': '__s8', + 's16': '__s16', + 's32': '__s32', + 's64': '__s64', + 'size_t': 'unsigned long', + 'struct bpf_map': 'void', + 'struct sk_buff': 'struct __sk_buff', + 'const struct sk_buff': 'const struct __sk_buff', + 'struct sk_msg_buff': 'struct sk_msg_md', + 'struct xdp_buff': 'struct xdp_md', + } + # Helpers overloaded for different context types. + overloaded_helpers = [ + 'bpf_get_socket_cookie', + 'bpf_sk_assign', + ] + + def print_header(self): + header = '''\ +/* This is auto-generated file. See bpf_doc.py for details. */ + +/* Forward declarations of BPF structs */''' + + print(header) + for fwd in self.type_fwds: + print('%s;' % fwd) + print('') + + def print_footer(self): + footer = '' + print(footer) + + def map_type(self, t): + if t in self.known_types: + return t + if t in self.mapped_types: + return self.mapped_types[t] + print("Unrecognized type '%s', please add it to known types!" % t, + file=sys.stderr) + sys.exit(1) + + seen_helpers = set() + + def print_one(self, helper): + proto = helper.proto_break_down() + + if proto['name'] in self.seen_helpers: + return + self.seen_helpers.add(proto['name']) + + print('/*') + print(" * %s" % proto['name']) + print(" *") + if (helper.desc): + # Do not strip all newline characters: formatted code at the end of + # a section must be followed by a blank line. + for line in re.sub('\n$', '', helper.desc, count=1).split('\n'): + print(' *{}{}'.format(' \t' if line else '', line)) + + if (helper.ret): + print(' *') + print(' * Returns') + for line in helper.ret.rstrip().split('\n'): + print(' *{}{}'.format(' \t' if line else '', line)) + + print(' */') + print('static %s %s(*%s)(' % (self.map_type(proto['ret_type']), + proto['ret_star'], proto['name']), end='') + comma = '' + for i, a in enumerate(proto['args']): + t = a['type'] + n = a['name'] + if proto['name'] in self.overloaded_helpers and i == 0: + t = 'void' + n = 'ctx' + one_arg = '{}{}'.format(comma, self.map_type(t)) + if n: + if a['star']: + one_arg += ' {}'.format(a['star']) + else: + one_arg += ' ' + one_arg += '{}'.format(n) + comma = ', ' + print(one_arg, end='') + + print(') = (void *) %d;' % len(self.seen_helpers)) + print('') + +############################################################################### + +# If script is launched from scripts/ from kernel tree and can access +# ../include/uapi/linux/bpf.h, use it as a default name for the file to parse, +# otherwise the --filename argument will be required from the command line. +script = os.path.abspath(sys.argv[0]) +linuxRoot = os.path.dirname(os.path.dirname(script)) +bpfh = os.path.join(linuxRoot, 'include/uapi/linux/bpf.h') + +printers = { + 'helpers': PrinterHelpersRST, + 'syscall': PrinterSyscallRST, +} + +argParser = argparse.ArgumentParser(description=""" +Parse eBPF header file and generate documentation for the eBPF API. +The RST-formatted output produced can be turned into a manual page with the +rst2man utility. +""") +argParser.add_argument('--header', action='store_true', + help='generate C header file') +if (os.path.isfile(bpfh)): + argParser.add_argument('--filename', help='path to include/uapi/linux/bpf.h', + default=bpfh) +else: + argParser.add_argument('--filename', help='path to include/uapi/linux/bpf.h') +argParser.add_argument('target', nargs='?', default='helpers', + choices=printers.keys(), help='eBPF API target') +args = argParser.parse_args() + +# Parse file. +headerParser = HeaderParser(args.filename) +headerParser.run() + +# Print formatted output to standard output. +if args.header: + if args.target != 'helpers': + raise NotImplementedError('Only helpers header generation is supported') + printer = PrinterHelpers(headerParser) +else: + printer = printers[args.target](headerParser) +printer.print_all() diff --git a/data/linux-small/scripts/checkkconfigsymbols.py b/data/linux-small/scripts/checkkconfigsymbols.py new file mode 100755 index 0000000..217d21a --- /dev/null +++ b/data/linux-small/scripts/checkkconfigsymbols.py @@ -0,0 +1,471 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0-only + +"""Find Kconfig symbols that are referenced but not defined.""" + +# (c) 2014-2017 Valentin Rothberg +# (c) 2014 Stefan Hengelein +# + + +import argparse +import difflib +import os +import re +import signal +import subprocess +import sys +from multiprocessing import Pool, cpu_count + + +# regex expressions +OPERATORS = r"&|\(|\)|\||\!" +SYMBOL = r"(?:\w*[A-Z0-9]\w*){2,}" +DEF = r"^\s*(?:menu){,1}config\s+(" + SYMBOL + r")\s*" +EXPR = r"(?:" + OPERATORS + r"|\s|" + SYMBOL + r")+" +DEFAULT = r"default\s+.*?(?:if\s.+){,1}" +STMT = r"^\s*(?:if|select|imply|depends\s+on|(?:" + DEFAULT + r"))\s+" + EXPR +SOURCE_SYMBOL = r"(?:\W|\b)+[D]{,1}CONFIG_(" + SYMBOL + r")" + +# regex objects +REGEX_FILE_KCONFIG = re.compile(r".*Kconfig[\.\w+\-]*$") +REGEX_SYMBOL = re.compile(r'(?!\B)' + SYMBOL + r'(?!\B)') +REGEX_SOURCE_SYMBOL = re.compile(SOURCE_SYMBOL) +REGEX_KCONFIG_DEF = re.compile(DEF) +REGEX_KCONFIG_EXPR = re.compile(EXPR) +REGEX_KCONFIG_STMT = re.compile(STMT) +REGEX_FILTER_SYMBOLS = re.compile(r"[A-Za-z0-9]$") +REGEX_NUMERIC = re.compile(r"0[xX][0-9a-fA-F]+|[0-9]+") +REGEX_QUOTES = re.compile("(\"(.*?)\")") + + +def parse_options(): + """The user interface of this module.""" + usage = "Run this tool to detect Kconfig symbols that are referenced but " \ + "not defined in Kconfig. If no option is specified, " \ + "checkkconfigsymbols defaults to check your current tree. " \ + "Please note that specifying commits will 'git reset --hard\' " \ + "your current tree! You may save uncommitted changes to avoid " \ + "losing data." + + parser = argparse.ArgumentParser(description=usage) + + parser.add_argument('-c', '--commit', dest='commit', action='store', + default="", + help="check if the specified commit (hash) introduces " + "undefined Kconfig symbols") + + parser.add_argument('-d', '--diff', dest='diff', action='store', + default="", + help="diff undefined symbols between two commits " + "(e.g., -d commmit1..commit2)") + + parser.add_argument('-f', '--find', dest='find', action='store_true', + default=False, + help="find and show commits that may cause symbols to be " + "missing (required to run with --diff)") + + parser.add_argument('-i', '--ignore', dest='ignore', action='store', + default="", + help="ignore files matching this Python regex " + "(e.g., -i '.*defconfig')") + + parser.add_argument('-s', '--sim', dest='sim', action='store', default="", + help="print a list of max. 10 string-similar symbols") + + parser.add_argument('--force', dest='force', action='store_true', + default=False, + help="reset current Git tree even when it's dirty") + + parser.add_argument('--no-color', dest='color', action='store_false', + default=True, + help="don't print colored output (default when not " + "outputting to a terminal)") + + args = parser.parse_args() + + if args.commit and args.diff: + sys.exit("Please specify only one option at once.") + + if args.diff and not re.match(r"^[\w\-\.\^]+\.\.[\w\-\.\^]+$", args.diff): + sys.exit("Please specify valid input in the following format: " + "\'commit1..commit2\'") + + if args.commit or args.diff: + if not args.force and tree_is_dirty(): + sys.exit("The current Git tree is dirty (see 'git status'). " + "Running this script may\ndelete important data since it " + "calls 'git reset --hard' for some performance\nreasons. " + " Please run this script in a clean Git tree or pass " + "'--force' if you\nwant to ignore this warning and " + "continue.") + + if args.commit: + if args.commit.startswith('HEAD'): + sys.exit("The --commit option can't use the HEAD ref") + + args.find = False + + if args.ignore: + try: + re.match(args.ignore, "this/is/just/a/test.c") + except: + sys.exit("Please specify a valid Python regex.") + + return args + + +def main(): + """Main function of this module.""" + args = parse_options() + + global COLOR + COLOR = args.color and sys.stdout.isatty() + + if args.sim and not args.commit and not args.diff: + sims = find_sims(args.sim, args.ignore) + if sims: + print("%s: %s" % (yel("Similar symbols"), ', '.join(sims))) + else: + print("%s: no similar symbols found" % yel("Similar symbols")) + sys.exit(0) + + # dictionary of (un)defined symbols + defined = {} + undefined = {} + + if args.commit or args.diff: + head = get_head() + + # get commit range + commit_a = None + commit_b = None + if args.commit: + commit_a = args.commit + "~" + commit_b = args.commit + elif args.diff: + split = args.diff.split("..") + commit_a = split[0] + commit_b = split[1] + undefined_a = {} + undefined_b = {} + + # get undefined items before the commit + reset(commit_a) + undefined_a, _ = check_symbols(args.ignore) + + # get undefined items for the commit + reset(commit_b) + undefined_b, defined = check_symbols(args.ignore) + + # report cases that are present for the commit but not before + for symbol in sorted(undefined_b): + # symbol has not been undefined before + if symbol not in undefined_a: + files = sorted(undefined_b.get(symbol)) + undefined[symbol] = files + # check if there are new files that reference the undefined symbol + else: + files = sorted(undefined_b.get(symbol) - + undefined_a.get(symbol)) + if files: + undefined[symbol] = files + + # reset to head + reset(head) + + # default to check the entire tree + else: + undefined, defined = check_symbols(args.ignore) + + # now print the output + for symbol in sorted(undefined): + print(red(symbol)) + + files = sorted(undefined.get(symbol)) + print("%s: %s" % (yel("Referencing files"), ", ".join(files))) + + sims = find_sims(symbol, args.ignore, defined) + sims_out = yel("Similar symbols") + if sims: + print("%s: %s" % (sims_out, ', '.join(sims))) + else: + print("%s: %s" % (sims_out, "no similar symbols found")) + + if args.find: + print("%s:" % yel("Commits changing symbol")) + commits = find_commits(symbol, args.diff) + if commits: + for commit in commits: + commit = commit.split(" ", 1) + print("\t- %s (\"%s\")" % (yel(commit[0]), commit[1])) + else: + print("\t- no commit found") + print() # new line + + +def reset(commit): + """Reset current git tree to %commit.""" + execute(["git", "reset", "--hard", commit]) + + +def yel(string): + """ + Color %string yellow. + """ + return "\033[33m%s\033[0m" % string if COLOR else string + + +def red(string): + """ + Color %string red. + """ + return "\033[31m%s\033[0m" % string if COLOR else string + + +def execute(cmd): + """Execute %cmd and return stdout. Exit in case of error.""" + try: + stdout = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=False) + stdout = stdout.decode(errors='replace') + except subprocess.CalledProcessError as fail: + exit(fail) + return stdout + + +def find_commits(symbol, diff): + """Find commits changing %symbol in the given range of %diff.""" + commits = execute(["git", "log", "--pretty=oneline", + "--abbrev-commit", "-G", + symbol, diff]) + return [x for x in commits.split("\n") if x] + + +def tree_is_dirty(): + """Return true if the current working tree is dirty (i.e., if any file has + been added, deleted, modified, renamed or copied but not committed).""" + stdout = execute(["git", "status", "--porcelain"]) + for line in stdout: + if re.findall(r"[URMADC]{1}", line[:2]): + return True + return False + + +def get_head(): + """Return commit hash of current HEAD.""" + stdout = execute(["git", "rev-parse", "HEAD"]) + return stdout.strip('\n') + + +def partition(lst, size): + """Partition list @lst into eveni-sized lists of size @size.""" + return [lst[i::size] for i in range(size)] + + +def init_worker(): + """Set signal handler to ignore SIGINT.""" + signal.signal(signal.SIGINT, signal.SIG_IGN) + + +def find_sims(symbol, ignore, defined=[]): + """Return a list of max. ten Kconfig symbols that are string-similar to + @symbol.""" + if defined: + return difflib.get_close_matches(symbol, set(defined), 10) + + pool = Pool(cpu_count(), init_worker) + kfiles = [] + for gitfile in get_files(): + if REGEX_FILE_KCONFIG.match(gitfile): + kfiles.append(gitfile) + + arglist = [] + for part in partition(kfiles, cpu_count()): + arglist.append((part, ignore)) + + for res in pool.map(parse_kconfig_files, arglist): + defined.extend(res[0]) + + return difflib.get_close_matches(symbol, set(defined), 10) + + +def get_files(): + """Return a list of all files in the current git directory.""" + # use 'git ls-files' to get the worklist + stdout = execute(["git", "ls-files"]) + if len(stdout) > 0 and stdout[-1] == "\n": + stdout = stdout[:-1] + + files = [] + for gitfile in stdout.rsplit("\n"): + if ".git" in gitfile or "ChangeLog" in gitfile or \ + ".log" in gitfile or os.path.isdir(gitfile) or \ + gitfile.startswith("tools/"): + continue + files.append(gitfile) + return files + + +def check_symbols(ignore): + """Find undefined Kconfig symbols and return a dict with the symbol as key + and a list of referencing files as value. Files matching %ignore are not + checked for undefined symbols.""" + pool = Pool(cpu_count(), init_worker) + try: + return check_symbols_helper(pool, ignore) + except KeyboardInterrupt: + pool.terminate() + pool.join() + sys.exit(1) + + +def check_symbols_helper(pool, ignore): + """Helper method for check_symbols(). Used to catch keyboard interrupts in + check_symbols() in order to properly terminate running worker processes.""" + source_files = [] + kconfig_files = [] + defined_symbols = [] + referenced_symbols = dict() # {file: [symbols]} + + for gitfile in get_files(): + if REGEX_FILE_KCONFIG.match(gitfile): + kconfig_files.append(gitfile) + else: + if ignore and re.match(ignore, gitfile): + continue + # add source files that do not match the ignore pattern + source_files.append(gitfile) + + # parse source files + arglist = partition(source_files, cpu_count()) + for res in pool.map(parse_source_files, arglist): + referenced_symbols.update(res) + + # parse kconfig files + arglist = [] + for part in partition(kconfig_files, cpu_count()): + arglist.append((part, ignore)) + for res in pool.map(parse_kconfig_files, arglist): + defined_symbols.extend(res[0]) + referenced_symbols.update(res[1]) + defined_symbols = set(defined_symbols) + + # inverse mapping of referenced_symbols to dict(symbol: [files]) + inv_map = dict() + for _file, symbols in referenced_symbols.items(): + for symbol in symbols: + inv_map[symbol] = inv_map.get(symbol, set()) + inv_map[symbol].add(_file) + referenced_symbols = inv_map + + undefined = {} # {symbol: [files]} + for symbol in sorted(referenced_symbols): + # filter some false positives + if symbol == "FOO" or symbol == "BAR" or \ + symbol == "FOO_BAR" or symbol == "XXX": + continue + if symbol not in defined_symbols: + if symbol.endswith("_MODULE"): + # avoid false positives for kernel modules + if symbol[:-len("_MODULE")] in defined_symbols: + continue + undefined[symbol] = referenced_symbols.get(symbol) + return undefined, defined_symbols + + +def parse_source_files(source_files): + """Parse each source file in @source_files and return dictionary with source + files as keys and lists of references Kconfig symbols as values.""" + referenced_symbols = dict() + for sfile in source_files: + referenced_symbols[sfile] = parse_source_file(sfile) + return referenced_symbols + + +def parse_source_file(sfile): + """Parse @sfile and return a list of referenced Kconfig symbols.""" + lines = [] + references = [] + + if not os.path.exists(sfile): + return references + + with open(sfile, "r", encoding='utf-8', errors='replace') as stream: + lines = stream.readlines() + + for line in lines: + if "CONFIG_" not in line: + continue + symbols = REGEX_SOURCE_SYMBOL.findall(line) + for symbol in symbols: + if not REGEX_FILTER_SYMBOLS.search(symbol): + continue + references.append(symbol) + + return references + + +def get_symbols_in_line(line): + """Return mentioned Kconfig symbols in @line.""" + return REGEX_SYMBOL.findall(line) + + +def parse_kconfig_files(args): + """Parse kconfig files and return tuple of defined and references Kconfig + symbols. Note, @args is a tuple of a list of files and the @ignore + pattern.""" + kconfig_files = args[0] + ignore = args[1] + defined_symbols = [] + referenced_symbols = dict() + + for kfile in kconfig_files: + defined, references = parse_kconfig_file(kfile) + defined_symbols.extend(defined) + if ignore and re.match(ignore, kfile): + # do not collect references for files that match the ignore pattern + continue + referenced_symbols[kfile] = references + return (defined_symbols, referenced_symbols) + + +def parse_kconfig_file(kfile): + """Parse @kfile and update symbol definitions and references.""" + lines = [] + defined = [] + references = [] + + if not os.path.exists(kfile): + return defined, references + + with open(kfile, "r", encoding='utf-8', errors='replace') as stream: + lines = stream.readlines() + + for i in range(len(lines)): + line = lines[i] + line = line.strip('\n') + line = line.split("#")[0] # ignore comments + + if REGEX_KCONFIG_DEF.match(line): + symbol_def = REGEX_KCONFIG_DEF.findall(line) + defined.append(symbol_def[0]) + elif REGEX_KCONFIG_STMT.match(line): + line = REGEX_QUOTES.sub("", line) + symbols = get_symbols_in_line(line) + # multi-line statements + while line.endswith("\\"): + i += 1 + line = lines[i] + line = line.strip('\n') + symbols.extend(get_symbols_in_line(line)) + for symbol in set(symbols): + if REGEX_NUMERIC.match(symbol): + # ignore numeric values + continue + references.append(symbol) + + return defined, references + + +if __name__ == "__main__": + main() diff --git a/data/linux-small/scripts/clang-tools/run-clang-tools.py b/data/linux-small/scripts/clang-tools/run-clang-tools.py new file mode 100755 index 0000000..f754415 --- /dev/null +++ b/data/linux-small/scripts/clang-tools/run-clang-tools.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0 +# +# Copyright (C) Google LLC, 2020 +# +# Author: Nathan Huckleberry +# +"""A helper routine run clang-tidy and the clang static-analyzer on +compile_commands.json. +""" + +import argparse +import json +import multiprocessing +import os +import subprocess +import sys + + +def parse_arguments(): + """Set up and parses command-line arguments. + Returns: + args: Dict of parsed args + Has keys: [path, type] + """ + usage = """Run clang-tidy or the clang static-analyzer on a + compilation database.""" + parser = argparse.ArgumentParser(description=usage) + + type_help = "Type of analysis to be performed" + parser.add_argument("type", + choices=["clang-tidy", "clang-analyzer"], + help=type_help) + path_help = "Path to the compilation database to parse" + parser.add_argument("path", type=str, help=path_help) + + return parser.parse_args() + + +def init(l, a): + global lock + global args + lock = l + args = a + + +def run_analysis(entry): + # Disable all checks, then re-enable the ones we want + checks = "-checks=-*," + if args.type == "clang-tidy": + checks += "linuxkernel-*" + else: + checks += "clang-analyzer-*" + p = subprocess.run(["clang-tidy", "-p", args.path, checks, entry["file"]], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + cwd=entry["directory"]) + with lock: + sys.stderr.buffer.write(p.stdout) + + +def main(): + args = parse_arguments() + + lock = multiprocessing.Lock() + pool = multiprocessing.Pool(initializer=init, initargs=(lock, args)) + # Read JSON data into the datastore variable + with open(args.path, "r") as f: + datastore = json.load(f) + pool.map(run_analysis, datastore) + + +if __name__ == "__main__": + main() diff --git a/data/linux-small/scripts/gdb/linux/clk.py b/data/linux-small/scripts/gdb/linux/clk.py new file mode 100644 index 0000000..061aecf --- /dev/null +++ b/data/linux-small/scripts/gdb/linux/clk.py @@ -0,0 +1,76 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Copyright (c) NXP 2019 + +import gdb +import sys + +from linux import utils, lists, constants + +clk_core_type = utils.CachedType("struct clk_core") + + +def clk_core_for_each_child(hlist_head): + return lists.hlist_for_each_entry(hlist_head, + clk_core_type.get_type().pointer(), "child_node") + + +class LxClkSummary(gdb.Command): + """Print clk tree summary + +Output is a subset of /sys/kernel/debug/clk/clk_summary + +No calls are made during printing, instead a (c) if printed after values which +are cached and potentially out of date""" + + def __init__(self): + super(LxClkSummary, self).__init__("lx-clk-summary", gdb.COMMAND_DATA) + + def show_subtree(self, clk, level): + gdb.write("%*s%-*s %7d %8d %8d %11lu%s\n" % ( + level * 3 + 1, "", + 30 - level * 3, + clk['name'].string(), + clk['enable_count'], + clk['prepare_count'], + clk['protect_count'], + clk['rate'], + '(c)' if clk['flags'] & constants.LX_CLK_GET_RATE_NOCACHE else ' ')) + + for child in clk_core_for_each_child(clk['children']): + self.show_subtree(child, level + 1) + + def invoke(self, arg, from_tty): + gdb.write(" enable prepare protect \n") + gdb.write(" clock count count count rate \n") + gdb.write("------------------------------------------------------------------------\n") + for clk in clk_core_for_each_child(gdb.parse_and_eval("clk_root_list")): + self.show_subtree(clk, 0) + for clk in clk_core_for_each_child(gdb.parse_and_eval("clk_orphan_list")): + self.show_subtree(clk, 0) + + +LxClkSummary() + + +class LxClkCoreLookup(gdb.Function): + """Find struct clk_core by name""" + + def __init__(self): + super(LxClkCoreLookup, self).__init__("lx_clk_core_lookup") + + def lookup_hlist(self, hlist_head, name): + for child in clk_core_for_each_child(hlist_head): + if child['name'].string() == name: + return child + result = self.lookup_hlist(child['children'], name) + if result: + return result + + def invoke(self, name): + name = name.string() + return (self.lookup_hlist(gdb.parse_and_eval("clk_root_list"), name) or + self.lookup_hlist(gdb.parse_and_eval("clk_orphan_list"), name)) + + +LxClkCoreLookup() diff --git a/data/linux-small/scripts/gdb/linux/dmesg.py b/data/linux-small/scripts/gdb/linux/dmesg.py new file mode 100644 index 0000000..a92c55b --- /dev/null +++ b/data/linux-small/scripts/gdb/linux/dmesg.py @@ -0,0 +1,154 @@ +# +# gdb helper commands and functions for Linux kernel debugging +# +# kernel log buffer dump +# +# Copyright (c) Siemens AG, 2011, 2012 +# +# Authors: +# Jan Kiszka +# +# This work is licensed under the terms of the GNU GPL version 2. +# + +import gdb +import sys + +from linux import utils + +printk_info_type = utils.CachedType("struct printk_info") +prb_data_blk_lpos_type = utils.CachedType("struct prb_data_blk_lpos") +prb_desc_type = utils.CachedType("struct prb_desc") +prb_desc_ring_type = utils.CachedType("struct prb_desc_ring") +prb_data_ring_type = utils.CachedType("struct prb_data_ring") +printk_ringbuffer_type = utils.CachedType("struct printk_ringbuffer") +atomic_long_type = utils.CachedType("atomic_long_t") + +class LxDmesg(gdb.Command): + """Print Linux kernel log buffer.""" + + def __init__(self): + super(LxDmesg, self).__init__("lx-dmesg", gdb.COMMAND_DATA) + + def invoke(self, arg, from_tty): + inf = gdb.inferiors()[0] + + # read in prb structure + prb_addr = int(str(gdb.parse_and_eval("(void *)'printk.c'::prb")).split()[0], 16) + sz = printk_ringbuffer_type.get_type().sizeof + prb = utils.read_memoryview(inf, prb_addr, sz).tobytes() + + # read in descriptor ring structure + off = printk_ringbuffer_type.get_type()['desc_ring'].bitpos // 8 + addr = prb_addr + off + sz = prb_desc_ring_type.get_type().sizeof + desc_ring = utils.read_memoryview(inf, addr, sz).tobytes() + + # read in descriptor array + off = prb_desc_ring_type.get_type()['count_bits'].bitpos // 8 + desc_ring_count = 1 << utils.read_u32(desc_ring, off) + desc_sz = prb_desc_type.get_type().sizeof + off = prb_desc_ring_type.get_type()['descs'].bitpos // 8 + addr = utils.read_ulong(desc_ring, off) + descs = utils.read_memoryview(inf, addr, desc_sz * desc_ring_count).tobytes() + + # read in info array + info_sz = printk_info_type.get_type().sizeof + off = prb_desc_ring_type.get_type()['infos'].bitpos // 8 + addr = utils.read_ulong(desc_ring, off) + infos = utils.read_memoryview(inf, addr, info_sz * desc_ring_count).tobytes() + + # read in text data ring structure + off = printk_ringbuffer_type.get_type()['text_data_ring'].bitpos // 8 + addr = prb_addr + off + sz = prb_data_ring_type.get_type().sizeof + text_data_ring = utils.read_memoryview(inf, addr, sz).tobytes() + + # read in text data + off = prb_data_ring_type.get_type()['size_bits'].bitpos // 8 + text_data_sz = 1 << utils.read_u32(text_data_ring, off) + off = prb_data_ring_type.get_type()['data'].bitpos // 8 + addr = utils.read_ulong(text_data_ring, off) + text_data = utils.read_memoryview(inf, addr, text_data_sz).tobytes() + + counter_off = atomic_long_type.get_type()['counter'].bitpos // 8 + + sv_off = prb_desc_type.get_type()['state_var'].bitpos // 8 + + off = prb_desc_type.get_type()['text_blk_lpos'].bitpos // 8 + begin_off = off + (prb_data_blk_lpos_type.get_type()['begin'].bitpos // 8) + next_off = off + (prb_data_blk_lpos_type.get_type()['next'].bitpos // 8) + + ts_off = printk_info_type.get_type()['ts_nsec'].bitpos // 8 + len_off = printk_info_type.get_type()['text_len'].bitpos // 8 + + # definitions from kernel/printk/printk_ringbuffer.h + desc_committed = 1 + desc_finalized = 2 + desc_sv_bits = utils.get_long_type().sizeof * 8 + desc_flags_shift = desc_sv_bits - 2 + desc_flags_mask = 3 << desc_flags_shift + desc_id_mask = ~desc_flags_mask + + # read in tail and head descriptor ids + off = prb_desc_ring_type.get_type()['tail_id'].bitpos // 8 + tail_id = utils.read_u64(desc_ring, off + counter_off) + off = prb_desc_ring_type.get_type()['head_id'].bitpos // 8 + head_id = utils.read_u64(desc_ring, off + counter_off) + + did = tail_id + while True: + ind = did % desc_ring_count + desc_off = desc_sz * ind + info_off = info_sz * ind + + # skip non-committed record + state = 3 & (utils.read_u64(descs, desc_off + sv_off + + counter_off) >> desc_flags_shift) + if state != desc_committed and state != desc_finalized: + if did == head_id: + break + did = (did + 1) & desc_id_mask + continue + + begin = utils.read_ulong(descs, desc_off + begin_off) % text_data_sz + end = utils.read_ulong(descs, desc_off + next_off) % text_data_sz + + # handle data-less record + if begin & 1 == 1: + text = "" + else: + # handle wrapping data block + if begin > end: + begin = 0 + + # skip over descriptor id + text_start = begin + utils.get_long_type().sizeof + + text_len = utils.read_u16(infos, info_off + len_off) + + # handle truncated message + if end - text_start < text_len: + text_len = end - text_start + + text = text_data[text_start:text_start + text_len].decode( + encoding='utf8', errors='replace') + + time_stamp = utils.read_u64(infos, info_off + ts_off) + + for line in text.splitlines(): + msg = u"[{time:12.6f}] {line}\n".format( + time=time_stamp / 1000000000.0, + line=line) + # With python2 gdb.write will attempt to convert unicode to + # ascii and might fail so pass an utf8-encoded str instead. + if sys.hexversion < 0x03000000: + msg = msg.encode(encoding='utf8', errors='replace') + gdb.write(msg) + + if did == head_id: + break + did = (did + 1) & desc_id_mask + + +LxDmesg() diff --git a/data/linux-small/scripts/gdb/linux/genpd.py b/data/linux-small/scripts/gdb/linux/genpd.py new file mode 100644 index 0000000..39cd1ab --- /dev/null +++ b/data/linux-small/scripts/gdb/linux/genpd.py @@ -0,0 +1,83 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Copyright (c) NXP 2019 + +import gdb +import sys + +from linux.utils import CachedType +from linux.lists import list_for_each_entry + +generic_pm_domain_type = CachedType('struct generic_pm_domain') +pm_domain_data_type = CachedType('struct pm_domain_data') +device_link_type = CachedType('struct device_link') + + +def kobject_get_path(kobj): + path = kobj['name'].string() + parent = kobj['parent'] + if parent: + path = kobject_get_path(parent) + '/' + path + return path + + +def rtpm_status_str(dev): + if dev['power']['runtime_error']: + return 'error' + if dev['power']['disable_depth']: + return 'unsupported' + _RPM_STATUS_LOOKUP = [ + "active", + "resuming", + "suspended", + "suspending" + ] + return _RPM_STATUS_LOOKUP[dev['power']['runtime_status']] + + +class LxGenPDSummary(gdb.Command): + '''Print genpd summary + +Output is similar to /sys/kernel/debug/pm_genpd/pm_genpd_summary''' + + def __init__(self): + super(LxGenPDSummary, self).__init__('lx-genpd-summary', gdb.COMMAND_DATA) + + def summary_one(self, genpd): + if genpd['status'] == 0: + status_string = 'on' + else: + status_string = 'off-{}'.format(genpd['state_idx']) + + child_names = [] + for link in list_for_each_entry( + genpd['parent_links'], + device_link_type.get_type().pointer(), + 'parent_node'): + child_names.append(link['child']['name']) + + gdb.write('%-30s %-15s %s\n' % ( + genpd['name'].string(), + status_string, + ', '.join(child_names))) + + # Print devices in domain + for pm_data in list_for_each_entry(genpd['dev_list'], + pm_domain_data_type.get_type().pointer(), + 'list_node'): + dev = pm_data['dev'] + kobj_path = kobject_get_path(dev['kobj']) + gdb.write(' %-50s %s\n' % (kobj_path, rtpm_status_str(dev))) + + def invoke(self, arg, from_tty): + gdb.write('domain status children\n'); + gdb.write(' /device runtime status\n'); + gdb.write('----------------------------------------------------------------------\n'); + for genpd in list_for_each_entry( + gdb.parse_and_eval('&gpd_list'), + generic_pm_domain_type.get_type().pointer(), + 'gpd_list_node'): + self.summary_one(genpd) + + +LxGenPDSummary() diff --git a/data/linux-small/scripts/gdb/linux/modules.py b/data/linux-small/scripts/gdb/linux/modules.py new file mode 100644 index 0000000..441b232 --- /dev/null +++ b/data/linux-small/scripts/gdb/linux/modules.py @@ -0,0 +1,95 @@ +# +# gdb helper commands and functions for Linux kernel debugging +# +# module tools +# +# Copyright (c) Siemens AG, 2013 +# +# Authors: +# Jan Kiszka +# +# This work is licensed under the terms of the GNU GPL version 2. +# + +import gdb + +from linux import cpus, utils, lists + + +module_type = utils.CachedType("struct module") + + +def module_list(): + global module_type + modules = utils.gdb_eval_or_none("modules") + if modules is None: + return + + module_ptr_type = module_type.get_type().pointer() + + for module in lists.list_for_each_entry(modules, module_ptr_type, "list"): + yield module + + +def find_module_by_name(name): + for module in module_list(): + if module['name'].string() == name: + return module + return None + + +class LxModule(gdb.Function): + """Find module by name and return the module variable. + +$lx_module("MODULE"): Given the name MODULE, iterate over all loaded modules +of the target and return that module variable which MODULE matches.""" + + def __init__(self): + super(LxModule, self).__init__("lx_module") + + def invoke(self, mod_name): + mod_name = mod_name.string() + module = find_module_by_name(mod_name) + if module: + return module.dereference() + else: + raise gdb.GdbError("Unable to find MODULE " + mod_name) + + +LxModule() + + +class LxLsmod(gdb.Command): + """List currently loaded modules.""" + + _module_use_type = utils.CachedType("struct module_use") + + def __init__(self): + super(LxLsmod, self).__init__("lx-lsmod", gdb.COMMAND_DATA) + + def invoke(self, arg, from_tty): + gdb.write( + "Address{0} Module Size Used by\n".format( + " " if utils.get_long_type().sizeof == 8 else "")) + + for module in module_list(): + layout = module['core_layout'] + gdb.write("{address} {name:<19} {size:>8} {ref}".format( + address=str(layout['base']).split()[0], + name=module['name'].string(), + size=str(layout['size']), + ref=str(module['refcnt']['counter'] - 1))) + + t = self._module_use_type.get_type().pointer() + first = True + sources = module['source_list'] + for use in lists.list_for_each_entry(sources, t, "source_list"): + gdb.write("{separator}{name}".format( + separator=" " if first else ",", + name=use['source']['name'].string())) + first = False + + gdb.write("\n") + + +LxLsmod() diff --git a/data/linux-small/scripts/gdb/linux/rbtree.py b/data/linux-small/scripts/gdb/linux/rbtree.py new file mode 100644 index 0000000..fe46285 --- /dev/null +++ b/data/linux-small/scripts/gdb/linux/rbtree.py @@ -0,0 +1,177 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Copyright 2019 Google LLC. + +import gdb + +from linux import utils + +rb_root_type = utils.CachedType("struct rb_root") +rb_node_type = utils.CachedType("struct rb_node") + + +def rb_first(root): + if root.type == rb_root_type.get_type(): + node = root.address.cast(rb_root_type.get_type().pointer()) + elif root.type != rb_root_type.get_type().pointer(): + raise gdb.GdbError("Must be struct rb_root not {}".format(root.type)) + + node = root['rb_node'] + if node == 0: + return None + + while node['rb_left']: + node = node['rb_left'] + + return node + + +def rb_last(root): + if root.type == rb_root_type.get_type(): + node = root.address.cast(rb_root_type.get_type().pointer()) + elif root.type != rb_root_type.get_type().pointer(): + raise gdb.GdbError("Must be struct rb_root not {}".format(root.type)) + + node = root['rb_node'] + if node == 0: + return None + + while node['rb_right']: + node = node['rb_right'] + + return node + + +def rb_parent(node): + parent = gdb.Value(node['__rb_parent_color'] & ~3) + return parent.cast(rb_node_type.get_type().pointer()) + + +def rb_empty_node(node): + return node['__rb_parent_color'] == node.address + + +def rb_next(node): + if node.type == rb_node_type.get_type(): + node = node.address.cast(rb_node_type.get_type().pointer()) + elif node.type != rb_node_type.get_type().pointer(): + raise gdb.GdbError("Must be struct rb_node not {}".format(node.type)) + + if rb_empty_node(node): + return None + + if node['rb_right']: + node = node['rb_right'] + while node['rb_left']: + node = node['rb_left'] + return node + + parent = rb_parent(node) + while parent and node == parent['rb_right']: + node = parent + parent = rb_parent(node) + + return parent + + +def rb_prev(node): + if node.type == rb_node_type.get_type(): + node = node.address.cast(rb_node_type.get_type().pointer()) + elif node.type != rb_node_type.get_type().pointer(): + raise gdb.GdbError("Must be struct rb_node not {}".format(node.type)) + + if rb_empty_node(node): + return None + + if node['rb_left']: + node = node['rb_left'] + while node['rb_right']: + node = node['rb_right'] + return node.dereference() + + parent = rb_parent(node) + while parent and node == parent['rb_left'].dereference(): + node = parent + parent = rb_parent(node) + + return parent + + +class LxRbFirst(gdb.Function): + """Lookup and return a node from an RBTree + +$lx_rb_first(root): Return the node at the given index. +If index is omitted, the root node is dereferenced and returned.""" + + def __init__(self): + super(LxRbFirst, self).__init__("lx_rb_first") + + def invoke(self, root): + result = rb_first(root) + if result is None: + raise gdb.GdbError("No entry in tree") + + return result + + +LxRbFirst() + + +class LxRbLast(gdb.Function): + """Lookup and return a node from an RBTree. + +$lx_rb_last(root): Return the node at the given index. +If index is omitted, the root node is dereferenced and returned.""" + + def __init__(self): + super(LxRbLast, self).__init__("lx_rb_last") + + def invoke(self, root): + result = rb_last(root) + if result is None: + raise gdb.GdbError("No entry in tree") + + return result + + +LxRbLast() + + +class LxRbNext(gdb.Function): + """Lookup and return a node from an RBTree. + +$lx_rb_next(node): Return the node at the given index. +If index is omitted, the root node is dereferenced and returned.""" + + def __init__(self): + super(LxRbNext, self).__init__("lx_rb_next") + + def invoke(self, node): + result = rb_next(node) + if result is None: + raise gdb.GdbError("No entry in tree") + + return result + + +LxRbNext() + + +class LxRbPrev(gdb.Function): + """Lookup and return a node from an RBTree. + +$lx_rb_prev(node): Return the node at the given index. +If index is omitted, the root node is dereferenced and returned.""" + + def __init__(self): + super(LxRbPrev, self).__init__("lx_rb_prev") + + def invoke(self, node): + result = rb_prev(node) + if result is None: + raise gdb.GdbError("No entry in tree") + + return result + + +LxRbPrev() diff --git a/data/linux-small/scripts/gdb/vmlinux-gdb.py b/data/linux-small/scripts/gdb/vmlinux-gdb.py new file mode 100644 index 0000000..4136dc2 --- /dev/null +++ b/data/linux-small/scripts/gdb/vmlinux-gdb.py @@ -0,0 +1,39 @@ +# +# gdb helper commands and functions for Linux kernel debugging +# +# loader module +# +# Copyright (c) Siemens AG, 2012, 2013 +# +# Authors: +# Jan Kiszka +# +# This work is licensed under the terms of the GNU GPL version 2. +# + +import os + +sys.path.insert(0, os.path.dirname(__file__) + "/scripts/gdb") + +try: + gdb.parse_and_eval("0") + gdb.execute("", to_string=True) +except: + gdb.write("NOTE: gdb 7.2 or later required for Linux helper scripts to " + "work.\n") +else: + import linux.utils + import linux.symbols + import linux.modules + import linux.dmesg + import linux.tasks + import linux.config + import linux.cpus + import linux.lists + import linux.rbtree + import linux.proc + import linux.constants + import linux.timerlist + import linux.clk + import linux.genpd + import linux.device diff --git a/data/linux-small/scripts/show_delta b/data/linux-small/scripts/show_delta new file mode 100755 index 0000000..28e67e1 --- /dev/null +++ b/data/linux-small/scripts/show_delta @@ -0,0 +1,128 @@ +#!/usr/bin/env python +# SPDX-License-Identifier: GPL-2.0-only +# +# show_deltas: Read list of printk messages instrumented with +# time data, and format with time deltas. +# +# Also, you can show the times relative to a fixed point. +# +# Copyright 2003 Sony Corporation +# + +import sys +import string + +def usage(): + print ("""usage: show_delta [] + +This program parses the output from a set of printk message lines which +have time data prefixed because the CONFIG_PRINTK_TIME option is set, or +the kernel command line option "time" is specified. When run with no +options, the time information is converted to show the time delta between +each printk line and the next. When run with the '-b' option, all times +are relative to a single (base) point in time. + +Options: + -h Show this usage help. + -b Specify a base for time references. + can be a number or a string. + If it is a string, the first message line + which matches (at the beginning of the + line) is used as the time reference. + +ex: $ dmesg >timefile + $ show_delta -b NET4 timefile + +will show times relative to the line in the kernel output +starting with "NET4". +""") + sys.exit(1) + +# returns a tuple containing the seconds and text for each message line +# seconds is returned as a float +# raise an exception if no timing data was found +def get_time(line): + if line[0]!="[": + raise ValueError + + # split on closing bracket + (time_str, rest) = string.split(line[1:],']',1) + time = string.atof(time_str) + + #print "time=", time + return (time, rest) + + +# average line looks like: +# [ 0.084282] VFS: Mounted root (romfs filesystem) readonly +# time data is expressed in seconds.useconds, +# convert_line adds a delta for each line +last_time = 0.0 +def convert_line(line, base_time): + global last_time + + try: + (time, rest) = get_time(line) + except: + # if any problem parsing time, don't convert anything + return line + + if base_time: + # show time from base + delta = time - base_time + else: + # just show time from last line + delta = time - last_time + last_time = time + + return ("[%5.6f < %5.6f >]" % (time, delta)) + rest + +def main(): + base_str = "" + filein = "" + for arg in sys.argv[1:]: + if arg=="-b": + base_str = sys.argv[sys.argv.index("-b")+1] + elif arg=="-h": + usage() + else: + filein = arg + + if not filein: + usage() + + try: + lines = open(filein,"r").readlines() + except: + print ("Problem opening file: %s" % filein) + sys.exit(1) + + if base_str: + print ('base= "%s"' % base_str) + # assume a numeric base. If that fails, try searching + # for a matching line. + try: + base_time = float(base_str) + except: + # search for line matching string + found = 0 + for line in lines: + try: + (time, rest) = get_time(line) + except: + continue + if string.find(rest, base_str)==1: + base_time = time + found = 1 + # stop at first match + break + if not found: + print ('Couldn\'t find line matching base pattern "%s"' % base_str) + sys.exit(1) + else: + base_time = 0.0 + + for line in lines: + print (convert_line(line, base_time),) + +main() diff --git a/data/linux-small/tools/cgroup/iocost_monitor.py b/data/linux-small/tools/cgroup/iocost_monitor.py new file mode 100644 index 0000000..c4ff907 --- /dev/null +++ b/data/linux-small/tools/cgroup/iocost_monitor.py @@ -0,0 +1,270 @@ +#!/usr/bin/env drgn +# +# Copyright (C) 2019 Tejun Heo +# Copyright (C) 2019 Facebook + +desc = """ +This is a drgn script to monitor the blk-iocost cgroup controller. +See the comment at the top of block/blk-iocost.c for more details. +For drgn, visit https://github.com/osandov/drgn. +""" + +import sys +import re +import time +import json +import math + +import drgn +from drgn import container_of +from drgn.helpers.linux.list import list_for_each_entry,list_empty +from drgn.helpers.linux.radixtree import radix_tree_for_each,radix_tree_lookup + +import argparse +parser = argparse.ArgumentParser(description=desc, + formatter_class=argparse.RawTextHelpFormatter) +parser.add_argument('devname', metavar='DEV', + help='Target block device name (e.g. sda)') +parser.add_argument('--cgroup', action='append', metavar='REGEX', + help='Regex for target cgroups, ') +parser.add_argument('--interval', '-i', metavar='SECONDS', type=float, default=1, + help='Monitoring interval in seconds (0 exits immediately ' + 'after checking requirements)') +parser.add_argument('--json', action='store_true', + help='Output in json') +args = parser.parse_args() + +def err(s): + print(s, file=sys.stderr, flush=True) + sys.exit(1) + +try: + blkcg_root = prog['blkcg_root'] + plid = prog['blkcg_policy_iocost'].plid.value_() +except: + err('The kernel does not have iocost enabled') + +IOC_RUNNING = prog['IOC_RUNNING'].value_() +WEIGHT_ONE = prog['WEIGHT_ONE'].value_() +VTIME_PER_SEC = prog['VTIME_PER_SEC'].value_() +VTIME_PER_USEC = prog['VTIME_PER_USEC'].value_() +AUTOP_SSD_FAST = prog['AUTOP_SSD_FAST'].value_() +AUTOP_SSD_DFL = prog['AUTOP_SSD_DFL'].value_() +AUTOP_SSD_QD1 = prog['AUTOP_SSD_QD1'].value_() +AUTOP_HDD = prog['AUTOP_HDD'].value_() + +autop_names = { + AUTOP_SSD_FAST: 'ssd_fast', + AUTOP_SSD_DFL: 'ssd_dfl', + AUTOP_SSD_QD1: 'ssd_qd1', + AUTOP_HDD: 'hdd', +} + +class BlkgIterator: + def blkcg_name(blkcg): + return blkcg.css.cgroup.kn.name.string_().decode('utf-8') + + def walk(self, blkcg, q_id, parent_path): + if not self.include_dying and \ + not (blkcg.css.flags.value_() & prog['CSS_ONLINE'].value_()): + return + + name = BlkgIterator.blkcg_name(blkcg) + path = parent_path + '/' + name if parent_path else name + blkg = drgn.Object(prog, 'struct blkcg_gq', + address=radix_tree_lookup(blkcg.blkg_tree.address_of_(), q_id)) + if not blkg.address_: + return + + self.blkgs.append((path if path else '/', blkg)) + + for c in list_for_each_entry('struct blkcg', + blkcg.css.children.address_of_(), 'css.sibling'): + self.walk(c, q_id, path) + + def __init__(self, root_blkcg, q_id, include_dying=False): + self.include_dying = include_dying + self.blkgs = [] + self.walk(root_blkcg, q_id, '') + + def __iter__(self): + return iter(self.blkgs) + +class IocStat: + def __init__(self, ioc): + global autop_names + + self.enabled = ioc.enabled.value_() + self.running = ioc.running.value_() == IOC_RUNNING + self.period_ms = ioc.period_us.value_() / 1_000 + self.period_at = ioc.period_at.value_() / 1_000_000 + self.vperiod_at = ioc.period_at_vtime.value_() / VTIME_PER_SEC + self.vrate_pct = ioc.vtime_base_rate.value_() * 100 / VTIME_PER_USEC + self.busy_level = ioc.busy_level.value_() + self.autop_idx = ioc.autop_idx.value_() + self.user_cost_model = ioc.user_cost_model.value_() + self.user_qos_params = ioc.user_qos_params.value_() + + if self.autop_idx in autop_names: + self.autop_name = autop_names[self.autop_idx] + else: + self.autop_name = '?' + + def dict(self, now): + return { 'device' : devname, + 'timestamp' : now, + 'enabled' : self.enabled, + 'running' : self.running, + 'period_ms' : self.period_ms, + 'period_at' : self.period_at, + 'period_vtime_at' : self.vperiod_at, + 'busy_level' : self.busy_level, + 'vrate_pct' : self.vrate_pct, } + + def table_preamble_str(self): + state = ('RUN' if self.running else 'IDLE') if self.enabled else 'OFF' + output = f'{devname} {state:4} ' \ + f'per={self.period_ms}ms ' \ + f'cur_per={self.period_at:.3f}:v{self.vperiod_at:.3f} ' \ + f'busy={self.busy_level:+3} ' \ + f'vrate={self.vrate_pct:6.2f}% ' \ + f'params={self.autop_name}' + if self.user_cost_model or self.user_qos_params: + output += f'({"C" if self.user_cost_model else ""}{"Q" if self.user_qos_params else ""})' + return output + + def table_header_str(self): + return f'{"":25} active {"weight":>9} {"hweight%":>13} {"inflt%":>6} ' \ + f'{"debt":>7} {"delay":>7} {"usage%"}' + +class IocgStat: + def __init__(self, iocg): + ioc = iocg.ioc + blkg = iocg.pd.blkg + + self.is_active = not list_empty(iocg.active_list.address_of_()) + self.weight = iocg.weight.value_() / WEIGHT_ONE + self.active = iocg.active.value_() / WEIGHT_ONE + self.inuse = iocg.inuse.value_() / WEIGHT_ONE + self.hwa_pct = iocg.hweight_active.value_() * 100 / WEIGHT_ONE + self.hwi_pct = iocg.hweight_inuse.value_() * 100 / WEIGHT_ONE + self.address = iocg.value_() + + vdone = iocg.done_vtime.counter.value_() + vtime = iocg.vtime.counter.value_() + vrate = ioc.vtime_rate.counter.value_() + period_vtime = ioc.period_us.value_() * vrate + if period_vtime: + self.inflight_pct = (vtime - vdone) * 100 / period_vtime + else: + self.inflight_pct = 0 + + self.usage = (100 * iocg.usage_delta_us.value_() / + ioc.period_us.value_()) if self.active else 0 + self.debt_ms = iocg.abs_vdebt.value_() / VTIME_PER_USEC / 1000 + if blkg.use_delay.counter.value_() != 0: + self.delay_ms = blkg.delay_nsec.counter.value_() / 1_000_000 + else: + self.delay_ms = 0 + + def dict(self, now, path): + out = { 'cgroup' : path, + 'timestamp' : now, + 'is_active' : self.is_active, + 'weight' : self.weight, + 'weight_active' : self.active, + 'weight_inuse' : self.inuse, + 'hweight_active_pct' : self.hwa_pct, + 'hweight_inuse_pct' : self.hwi_pct, + 'inflight_pct' : self.inflight_pct, + 'debt_ms' : self.debt_ms, + 'delay_ms' : self.delay_ms, + 'usage_pct' : self.usage, + 'address' : self.address } + return out + + def table_row_str(self, path): + out = f'{path[-28:]:28} ' \ + f'{"*" if self.is_active else " "} ' \ + f'{round(self.inuse):5}/{round(self.active):5} ' \ + f'{self.hwi_pct:6.2f}/{self.hwa_pct:6.2f} ' \ + f'{self.inflight_pct:6.2f} ' \ + f'{self.debt_ms:7.2f} ' \ + f'{self.delay_ms:7.2f} '\ + f'{min(self.usage, 999):6.2f}' + out = out.rstrip(':') + return out + +# handle args +table_fmt = not args.json +interval = args.interval +devname = args.devname + +if args.json: + table_fmt = False + +re_str = None +if args.cgroup: + for r in args.cgroup: + if re_str is None: + re_str = r + else: + re_str += '|' + r + +filter_re = re.compile(re_str) if re_str else None + +# Locate the roots +q_id = None +root_iocg = None +ioc = None + +for i, ptr in radix_tree_for_each(blkcg_root.blkg_tree.address_of_()): + blkg = drgn.Object(prog, 'struct blkcg_gq', address=ptr) + try: + if devname == blkg.q.kobj.parent.name.string_().decode('utf-8'): + q_id = blkg.q.id.value_() + if blkg.pd[plid]: + root_iocg = container_of(blkg.pd[plid], 'struct ioc_gq', 'pd') + ioc = root_iocg.ioc + break + except: + pass + +if ioc is None: + err(f'Could not find ioc for {devname}'); + +if interval == 0: + sys.exit(0) + +# Keep printing +while True: + now = time.time() + iocstat = IocStat(ioc) + output = '' + + if table_fmt: + output += '\n' + iocstat.table_preamble_str() + output += '\n' + iocstat.table_header_str() + else: + output += json.dumps(iocstat.dict(now)) + + for path, blkg in BlkgIterator(blkcg_root, q_id): + if filter_re and not filter_re.match(path): + continue + if not blkg.pd[plid]: + continue + + iocg = container_of(blkg.pd[plid], 'struct ioc_gq', 'pd') + iocg_stat = IocgStat(iocg) + + if not filter_re and not iocg_stat.is_active: + continue + + if table_fmt: + output += '\n' + iocg_stat.table_row_str(path) + else: + output += '\n' + json.dumps(iocg_stat.dict(now, path)) + + print(output) + sys.stdout.flush() + time.sleep(interval) diff --git a/data/linux-small/tools/cgroup/memcg_slabinfo.py b/data/linux-small/tools/cgroup/memcg_slabinfo.py new file mode 100644 index 0000000..1600b17 --- /dev/null +++ b/data/linux-small/tools/cgroup/memcg_slabinfo.py @@ -0,0 +1,226 @@ +#!/usr/bin/env drgn +# +# Copyright (C) 2020 Roman Gushchin +# Copyright (C) 2020 Facebook + +from os import stat +import argparse +import sys + +from drgn.helpers.linux import list_for_each_entry, list_empty +from drgn.helpers.linux import for_each_page +from drgn.helpers.linux.cpumask import for_each_online_cpu +from drgn.helpers.linux.percpu import per_cpu_ptr +from drgn import container_of, FaultError, Object + + +DESC = """ +This is a drgn script to provide slab statistics for memory cgroups. +It supports cgroup v2 and v1 and can emulate memory.kmem.slabinfo +interface of cgroup v1. +For drgn, visit https://github.com/osandov/drgn. +""" + + +MEMCGS = {} + +OO_SHIFT = 16 +OO_MASK = ((1 << OO_SHIFT) - 1) + + +def err(s): + print('slabinfo.py: error: %s' % s, file=sys.stderr, flush=True) + sys.exit(1) + + +def find_memcg_ids(css=prog['root_mem_cgroup'].css, prefix=''): + if not list_empty(css.children.address_of_()): + for css in list_for_each_entry('struct cgroup_subsys_state', + css.children.address_of_(), + 'sibling'): + name = prefix + '/' + css.cgroup.kn.name.string_().decode('utf-8') + memcg = container_of(css, 'struct mem_cgroup', 'css') + MEMCGS[css.cgroup.kn.id.value_()] = memcg + find_memcg_ids(css, name) + + +def is_root_cache(s): + try: + return False if s.memcg_params.root_cache else True + except AttributeError: + return True + + +def cache_name(s): + if is_root_cache(s): + return s.name.string_().decode('utf-8') + else: + return s.memcg_params.root_cache.name.string_().decode('utf-8') + + +# SLUB + +def oo_order(s): + return s.oo.x >> OO_SHIFT + + +def oo_objects(s): + return s.oo.x & OO_MASK + + +def count_partial(n, fn): + nr_pages = 0 + for page in list_for_each_entry('struct page', n.partial.address_of_(), + 'lru'): + nr_pages += fn(page) + return nr_pages + + +def count_free(page): + return page.objects - page.inuse + + +def slub_get_slabinfo(s, cfg): + nr_slabs = 0 + nr_objs = 0 + nr_free = 0 + + for node in range(cfg['nr_nodes']): + n = s.node[node] + nr_slabs += n.nr_slabs.counter.value_() + nr_objs += n.total_objects.counter.value_() + nr_free += count_partial(n, count_free) + + return {'active_objs': nr_objs - nr_free, + 'num_objs': nr_objs, + 'active_slabs': nr_slabs, + 'num_slabs': nr_slabs, + 'objects_per_slab': oo_objects(s), + 'cache_order': oo_order(s), + 'limit': 0, + 'batchcount': 0, + 'shared': 0, + 'shared_avail': 0} + + +def cache_show(s, cfg, objs): + if cfg['allocator'] == 'SLUB': + sinfo = slub_get_slabinfo(s, cfg) + else: + err('SLAB isn\'t supported yet') + + if cfg['shared_slab_pages']: + sinfo['active_objs'] = objs + sinfo['num_objs'] = objs + + print('%-17s %6lu %6lu %6u %4u %4d' + ' : tunables %4u %4u %4u' + ' : slabdata %6lu %6lu %6lu' % ( + cache_name(s), sinfo['active_objs'], sinfo['num_objs'], + s.size, sinfo['objects_per_slab'], 1 << sinfo['cache_order'], + sinfo['limit'], sinfo['batchcount'], sinfo['shared'], + sinfo['active_slabs'], sinfo['num_slabs'], + sinfo['shared_avail'])) + + +def detect_kernel_config(): + cfg = {} + + cfg['nr_nodes'] = prog['nr_online_nodes'].value_() + + if prog.type('struct kmem_cache').members[1].name == 'flags': + cfg['allocator'] = 'SLUB' + elif prog.type('struct kmem_cache').members[1].name == 'batchcount': + cfg['allocator'] = 'SLAB' + else: + err('Can\'t determine the slab allocator') + + cfg['shared_slab_pages'] = False + try: + if prog.type('struct obj_cgroup'): + cfg['shared_slab_pages'] = True + except: + pass + + return cfg + + +def for_each_slab_page(prog): + PGSlab = 1 << prog.constant('PG_slab') + PGHead = 1 << prog.constant('PG_head') + + for page in for_each_page(prog): + try: + if page.flags.value_() & PGSlab: + yield page + except FaultError: + pass + + +def main(): + parser = argparse.ArgumentParser(description=DESC, + formatter_class= + argparse.RawTextHelpFormatter) + parser.add_argument('cgroup', metavar='CGROUP', + help='Target memory cgroup') + args = parser.parse_args() + + try: + cgroup_id = stat(args.cgroup).st_ino + find_memcg_ids() + memcg = MEMCGS[cgroup_id] + except KeyError: + err('Can\'t find the memory cgroup') + + cfg = detect_kernel_config() + + print('# name ' + ' : tunables ' + ' : slabdata ') + + if cfg['shared_slab_pages']: + obj_cgroups = set() + stats = {} + caches = {} + + # find memcg pointers belonging to the specified cgroup + obj_cgroups.add(memcg.objcg.value_()) + for ptr in list_for_each_entry('struct obj_cgroup', + memcg.objcg_list.address_of_(), + 'list'): + obj_cgroups.add(ptr.value_()) + + # look over all slab pages, belonging to non-root memcgs + # and look for objects belonging to the given memory cgroup + for page in for_each_slab_page(prog): + objcg_vec_raw = page.memcg_data.value_() + if objcg_vec_raw == 0: + continue + cache = page.slab_cache + if not cache: + continue + addr = cache.value_() + caches[addr] = cache + # clear the lowest bit to get the true obj_cgroups + objcg_vec = Object(prog, 'struct obj_cgroup **', + value=objcg_vec_raw & ~1) + + if addr not in stats: + stats[addr] = 0 + + for i in range(oo_objects(cache)): + if objcg_vec[i].value_() in obj_cgroups: + stats[addr] += 1 + + for addr in caches: + if stats[addr] > 0: + cache_show(caches[addr], cfg, stats[addr]) + + else: + for s in list_for_each_entry('struct kmem_cache', + memcg.kmem_caches.address_of_(), + 'memcg_params.kmem_caches_node'): + cache_show(s, cfg, None) + + +main() diff --git a/data/linux-small/tools/hv/vmbus_testing b/data/linux-small/tools/hv/vmbus_testing new file mode 100755 index 0000000..e721290 --- /dev/null +++ b/data/linux-small/tools/hv/vmbus_testing @@ -0,0 +1,376 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0 +# +# Program to allow users to fuzz test Hyper-V drivers +# by interfacing with Hyper-V debugfs attributes. +# Current test methods available: +# 1. delay testing +# +# Current file/directory structure of hyper-V debugfs: +# /sys/kernel/debug/hyperv/UUID +# /sys/kernel/debug/hyperv/UUID/ +# /sys/kernel/debug/hyperv/UUID/ +# +# author: Branden Bonaby + +import os +import cmd +import argparse +import glob +from argparse import RawDescriptionHelpFormatter +from argparse import RawTextHelpFormatter +from enum import Enum + +# Do not change unless, you change the debugfs attributes +# in /drivers/hv/debugfs.c. All fuzz testing +# attributes will start with "fuzz_test". + +# debugfs path for hyperv must exist before proceeding +debugfs_hyperv_path = "/sys/kernel/debug/hyperv" +if not os.path.isdir(debugfs_hyperv_path): + print("{} doesn't exist/check permissions".format(debugfs_hyperv_path)) + exit(-1) + +class dev_state(Enum): + off = 0 + on = 1 + +# File names, that correspond to the files created in +# /drivers/hv/debugfs.c +class f_names(Enum): + state_f = "fuzz_test_state" + buff_f = "fuzz_test_buffer_interrupt_delay" + mess_f = "fuzz_test_message_delay" + +# Both single_actions and all_actions are used +# for error checking and to allow for some subparser +# names to be abbreviated. Do not abbreviate the +# test method names, as it will become less intuitive +# as to what the user can do. If you do decide to +# abbreviate the test method name, make sure the main +# function reflects this change. + +all_actions = [ + "disable_all", + "D", + "enable_all", + "view_all", + "V" +] + +single_actions = [ + "disable_single", + "d", + "enable_single", + "view_single", + "v" +] + +def main(): + + file_map = recursive_file_lookup(debugfs_hyperv_path, dict()) + args = parse_args() + if (not args.action): + print ("Error, no options selected...exiting") + exit(-1) + arg_set = { k for (k,v) in vars(args).items() if v and k != "action" } + arg_set.add(args.action) + path = args.path if "path" in arg_set else None + if (path and path[-1] == "/"): + path = path[:-1] + validate_args_path(path, arg_set, file_map) + if (path and "enable_single" in arg_set): + state_path = locate_state(path, file_map) + set_test_state(state_path, dev_state.on.value, args.quiet) + + # Use subparsers as the key for different actions + if ("delay" in arg_set): + validate_delay_values(args.delay_time) + if (args.enable_all): + set_delay_all_devices(file_map, args.delay_time, + args.quiet) + else: + set_delay_values(path, file_map, args.delay_time, + args.quiet) + elif ("disable_all" in arg_set or "D" in arg_set): + disable_all_testing(file_map) + elif ("disable_single" in arg_set or "d" in arg_set): + disable_testing_single_device(path, file_map) + elif ("view_all" in arg_set or "V" in arg_set): + get_all_devices_test_status(file_map) + elif ("view_single" in arg_set or "v" in arg_set): + get_device_test_values(path, file_map) + +# Get the state location +def locate_state(device, file_map): + return file_map[device][f_names.state_f.value] + +# Validate delay values to make sure they are acceptable to +# enable delays on a device +def validate_delay_values(delay): + + if (delay[0] == -1 and delay[1] == -1): + print("\nError, At least 1 value must be greater than 0") + exit(-1) + for i in delay: + if (i < -1 or i == 0 or i > 1000): + print("\nError, Values must be equal to -1 " + "or be > 0 and <= 1000") + exit(-1) + +# Validate argument path +def validate_args_path(path, arg_set, file_map): + + if (not path and any(element in arg_set for element in single_actions)): + print("Error, path (-p) REQUIRED for the specified option. " + "Use (-h) to check usage.") + exit(-1) + elif (path and any(item in arg_set for item in all_actions)): + print("Error, path (-p) NOT REQUIRED for the specified option. " + "Use (-h) to check usage." ) + exit(-1) + elif (path not in file_map and any(item in arg_set + for item in single_actions)): + print("Error, path '{}' not a valid vmbus device".format(path)) + exit(-1) + +# display Testing status of single device +def get_device_test_values(path, file_map): + + for name in file_map[path]: + file_location = file_map[path][name] + print( name + " = " + str(read_test_files(file_location))) + +# Create a map of the vmbus devices and their associated files +# [key=device, value = [key = filename, value = file path]] +def recursive_file_lookup(path, file_map): + + for f_path in glob.iglob(path + '**/*'): + if (os.path.isfile(f_path)): + if (f_path.rsplit("/",2)[0] == debugfs_hyperv_path): + directory = f_path.rsplit("/",1)[0] + else: + directory = f_path.rsplit("/",2)[0] + f_name = f_path.split("/")[-1] + if (file_map.get(directory)): + file_map[directory].update({f_name:f_path}) + else: + file_map[directory] = {f_name:f_path} + elif (os.path.isdir(f_path)): + recursive_file_lookup(f_path,file_map) + return file_map + +# display Testing state of devices +def get_all_devices_test_status(file_map): + + for device in file_map: + if (get_test_state(locate_state(device, file_map)) is 1): + print("Testing = ON for: {}" + .format(device.split("/")[5])) + else: + print("Testing = OFF for: {}" + .format(device.split("/")[5])) + +# read the vmbus device files, path must be absolute path before calling +def read_test_files(path): + try: + with open(path,"r") as f: + file_value = f.readline().strip() + return int(file_value) + + except IOError as e: + errno, strerror = e.args + print("I/O error({0}): {1} on file {2}" + .format(errno, strerror, path)) + exit(-1) + except ValueError: + print ("Element to int conversion error in: \n{}".format(path)) + exit(-1) + +# writing to vmbus device files, path must be absolute path before calling +def write_test_files(path, value): + + try: + with open(path,"w") as f: + f.write("{}".format(value)) + except IOError as e: + errno, strerror = e.args + print("I/O error({0}): {1} on file {2}" + .format(errno, strerror, path)) + exit(-1) + +# set testing state of device +def set_test_state(state_path, state_value, quiet): + + write_test_files(state_path, state_value) + if (get_test_state(state_path) is 1): + if (not quiet): + print("Testing = ON for device: {}" + .format(state_path.split("/")[5])) + else: + if (not quiet): + print("Testing = OFF for device: {}" + .format(state_path.split("/")[5])) + +# get testing state of device +def get_test_state(state_path): + #state == 1 - test = ON + #state == 0 - test = OFF + return read_test_files(state_path) + +# write 1 - 1000 microseconds, into a single device using the +# fuzz_test_buffer_interrupt_delay and fuzz_test_message_delay +# debugfs attributes +def set_delay_values(device, file_map, delay_length, quiet): + + try: + interrupt = file_map[device][f_names.buff_f.value] + message = file_map[device][f_names.mess_f.value] + + # delay[0]- buffer interrupt delay, delay[1]- message delay + if (delay_length[0] >= 0 and delay_length[0] <= 1000): + write_test_files(interrupt, delay_length[0]) + if (delay_length[1] >= 0 and delay_length[1] <= 1000): + write_test_files(message, delay_length[1]) + if (not quiet): + print("Buffer delay testing = {} for: {}" + .format(read_test_files(interrupt), + interrupt.split("/")[5])) + print("Message delay testing = {} for: {}" + .format(read_test_files(message), + message.split("/")[5])) + except IOError as e: + errno, strerror = e.args + print("I/O error({0}): {1} on files {2}{3}" + .format(errno, strerror, interrupt, message)) + exit(-1) + +# enabling delay testing on all devices +def set_delay_all_devices(file_map, delay, quiet): + + for device in (file_map): + set_test_state(locate_state(device, file_map), + dev_state.on.value, + quiet) + set_delay_values(device, file_map, delay, quiet) + +# disable all testing on a SINGLE device. +def disable_testing_single_device(device, file_map): + + for name in file_map[device]: + file_location = file_map[device][name] + write_test_files(file_location, dev_state.off.value) + print("ALL testing now OFF for {}".format(device.split("/")[-1])) + +# disable all testing on ALL devices +def disable_all_testing(file_map): + + for device in file_map: + disable_testing_single_device(device, file_map) + +def parse_args(): + parser = argparse.ArgumentParser(prog = "vmbus_testing",usage ="\n" + "%(prog)s [delay] [-h] [-e|-E] -t [-p]\n" + "%(prog)s [view_all | V] [-h]\n" + "%(prog)s [disable_all | D] [-h]\n" + "%(prog)s [disable_single | d] [-h|-p]\n" + "%(prog)s [view_single | v] [-h|-p]\n" + "%(prog)s --version\n", + description = "\nUse lsvmbus to get vmbus device type " + "information.\n" "\nThe debugfs root path is " + "/sys/kernel/debug/hyperv", + formatter_class = RawDescriptionHelpFormatter) + subparsers = parser.add_subparsers(dest = "action") + parser.add_argument("--version", action = "version", + version = '%(prog)s 0.1.0') + parser.add_argument("-q","--quiet", action = "store_true", + help = "silence none important test messages." + " This will only work when enabling testing" + " on a device.") + # Use the path parser to hold the --path attribute so it can + # be shared between subparsers. Also do the same for the state + # parser, as all testing methods will use --enable_all and + # enable_single. + path_parser = argparse.ArgumentParser(add_help=False) + path_parser.add_argument("-p","--path", metavar = "", + help = "Debugfs path to a vmbus device. The path " + "must be the absolute path to the device.") + state_parser = argparse.ArgumentParser(add_help=False) + state_group = state_parser.add_mutually_exclusive_group(required = True) + state_group.add_argument("-E", "--enable_all", action = "store_const", + const = "enable_all", + help = "Enable the specified test type " + "on ALL vmbus devices.") + state_group.add_argument("-e", "--enable_single", + action = "store_const", + const = "enable_single", + help = "Enable the specified test type on a " + "SINGLE vmbus device.") + parser_delay = subparsers.add_parser("delay", + parents = [state_parser, path_parser], + help = "Delay the ring buffer interrupt or the " + "ring buffer message reads in microseconds.", + prog = "vmbus_testing", + usage = "%(prog)s [-h]\n" + "%(prog)s -E -t [value] [value]\n" + "%(prog)s -e -t [value] [value] -p", + description = "Delay the ring buffer interrupt for " + "vmbus devices, or delay the ring buffer message " + "reads for vmbus devices (both in microseconds). This " + "is only on the host to guest channel.") + parser_delay.add_argument("-t", "--delay_time", metavar = "", nargs = 2, + type = check_range, default =[0,0], required = (True), + help = "Set [buffer] & [message] delay time. " + "Value constraints: -1 == value " + "or 0 < value <= 1000.\n" + "Use -1 to keep the previous value for that delay " + "type, or a value > 0 <= 1000 to change the delay " + "time.") + parser_dis_all = subparsers.add_parser("disable_all", + aliases = ['D'], prog = "vmbus_testing", + usage = "%(prog)s [disable_all | D] -h\n" + "%(prog)s [disable_all | D]\n", + help = "Disable ALL testing on ALL vmbus devices.", + description = "Disable ALL testing on ALL vmbus " + "devices.") + parser_dis_single = subparsers.add_parser("disable_single", + aliases = ['d'], + parents = [path_parser], prog = "vmbus_testing", + usage = "%(prog)s [disable_single | d] -h\n" + "%(prog)s [disable_single | d] -p\n", + help = "Disable ALL testing on a SINGLE vmbus device.", + description = "Disable ALL testing on a SINGLE vmbus " + "device.") + parser_view_all = subparsers.add_parser("view_all", aliases = ['V'], + help = "View the test state for ALL vmbus devices.", + prog = "vmbus_testing", + usage = "%(prog)s [view_all | V] -h\n" + "%(prog)s [view_all | V]\n", + description = "This shows the test state for ALL the " + "vmbus devices.") + parser_view_single = subparsers.add_parser("view_single", + aliases = ['v'],parents = [path_parser], + help = "View the test values for a SINGLE vmbus " + "device.", + description = "This shows the test values for a SINGLE " + "vmbus device.", prog = "vmbus_testing", + usage = "%(prog)s [view_single | v] -h\n" + "%(prog)s [view_single | v] -p") + + return parser.parse_args() + +# value checking for range checking input in parser +def check_range(arg1): + + try: + val = int(arg1) + except ValueError as err: + raise argparse.ArgumentTypeError(str(err)) + if val < -1 or val > 1000: + message = ("\n\nvalue must be -1 or 0 < value <= 1000. " + "Value program received: {}\n").format(val) + raise argparse.ArgumentTypeError(message) + return val + +if __name__ == "__main__": + main() diff --git a/data/linux-small/tools/kvm/kvm_stat/kvm_stat b/data/linux-small/tools/kvm/kvm_stat/kvm_stat new file mode 100755 index 0000000..b0bf56c --- /dev/null +++ b/data/linux-small/tools/kvm/kvm_stat/kvm_stat @@ -0,0 +1,1819 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0-only +# +# top-like utility for displaying kvm statistics +# +# Copyright 2006-2008 Qumranet Technologies +# Copyright 2008-2011 Red Hat, Inc. +# +# Authors: +# Avi Kivity +# +"""The kvm_stat module outputs statistics about running KVM VMs + +Three different ways of output formatting are available: +- as a top-like text ui +- in a key -> value format +- in an all keys, all values format + +The data is sampled from the KVM's debugfs entries and its perf events. +""" +from __future__ import print_function + +import curses +import sys +import locale +import os +import time +import argparse +import ctypes +import fcntl +import resource +import struct +import re +import subprocess +import signal +from collections import defaultdict, namedtuple +from functools import reduce +from datetime import datetime + +VMX_EXIT_REASONS = { + 'EXCEPTION_NMI': 0, + 'EXTERNAL_INTERRUPT': 1, + 'TRIPLE_FAULT': 2, + 'PENDING_INTERRUPT': 7, + 'NMI_WINDOW': 8, + 'TASK_SWITCH': 9, + 'CPUID': 10, + 'HLT': 12, + 'INVLPG': 14, + 'RDPMC': 15, + 'RDTSC': 16, + 'VMCALL': 18, + 'VMCLEAR': 19, + 'VMLAUNCH': 20, + 'VMPTRLD': 21, + 'VMPTRST': 22, + 'VMREAD': 23, + 'VMRESUME': 24, + 'VMWRITE': 25, + 'VMOFF': 26, + 'VMON': 27, + 'CR_ACCESS': 28, + 'DR_ACCESS': 29, + 'IO_INSTRUCTION': 30, + 'MSR_READ': 31, + 'MSR_WRITE': 32, + 'INVALID_STATE': 33, + 'MWAIT_INSTRUCTION': 36, + 'MONITOR_INSTRUCTION': 39, + 'PAUSE_INSTRUCTION': 40, + 'MCE_DURING_VMENTRY': 41, + 'TPR_BELOW_THRESHOLD': 43, + 'APIC_ACCESS': 44, + 'EPT_VIOLATION': 48, + 'EPT_MISCONFIG': 49, + 'WBINVD': 54, + 'XSETBV': 55, + 'APIC_WRITE': 56, + 'INVPCID': 58, +} + +SVM_EXIT_REASONS = { + 'READ_CR0': 0x000, + 'READ_CR3': 0x003, + 'READ_CR4': 0x004, + 'READ_CR8': 0x008, + 'WRITE_CR0': 0x010, + 'WRITE_CR3': 0x013, + 'WRITE_CR4': 0x014, + 'WRITE_CR8': 0x018, + 'READ_DR0': 0x020, + 'READ_DR1': 0x021, + 'READ_DR2': 0x022, + 'READ_DR3': 0x023, + 'READ_DR4': 0x024, + 'READ_DR5': 0x025, + 'READ_DR6': 0x026, + 'READ_DR7': 0x027, + 'WRITE_DR0': 0x030, + 'WRITE_DR1': 0x031, + 'WRITE_DR2': 0x032, + 'WRITE_DR3': 0x033, + 'WRITE_DR4': 0x034, + 'WRITE_DR5': 0x035, + 'WRITE_DR6': 0x036, + 'WRITE_DR7': 0x037, + 'EXCP_BASE': 0x040, + 'INTR': 0x060, + 'NMI': 0x061, + 'SMI': 0x062, + 'INIT': 0x063, + 'VINTR': 0x064, + 'CR0_SEL_WRITE': 0x065, + 'IDTR_READ': 0x066, + 'GDTR_READ': 0x067, + 'LDTR_READ': 0x068, + 'TR_READ': 0x069, + 'IDTR_WRITE': 0x06a, + 'GDTR_WRITE': 0x06b, + 'LDTR_WRITE': 0x06c, + 'TR_WRITE': 0x06d, + 'RDTSC': 0x06e, + 'RDPMC': 0x06f, + 'PUSHF': 0x070, + 'POPF': 0x071, + 'CPUID': 0x072, + 'RSM': 0x073, + 'IRET': 0x074, + 'SWINT': 0x075, + 'INVD': 0x076, + 'PAUSE': 0x077, + 'HLT': 0x078, + 'INVLPG': 0x079, + 'INVLPGA': 0x07a, + 'IOIO': 0x07b, + 'MSR': 0x07c, + 'TASK_SWITCH': 0x07d, + 'FERR_FREEZE': 0x07e, + 'SHUTDOWN': 0x07f, + 'VMRUN': 0x080, + 'VMMCALL': 0x081, + 'VMLOAD': 0x082, + 'VMSAVE': 0x083, + 'STGI': 0x084, + 'CLGI': 0x085, + 'SKINIT': 0x086, + 'RDTSCP': 0x087, + 'ICEBP': 0x088, + 'WBINVD': 0x089, + 'MONITOR': 0x08a, + 'MWAIT': 0x08b, + 'MWAIT_COND': 0x08c, + 'XSETBV': 0x08d, + 'NPF': 0x400, +} + +# EC definition of HSR (from arch/arm64/include/asm/kvm_arm.h) +AARCH64_EXIT_REASONS = { + 'UNKNOWN': 0x00, + 'WFI': 0x01, + 'CP15_32': 0x03, + 'CP15_64': 0x04, + 'CP14_MR': 0x05, + 'CP14_LS': 0x06, + 'FP_ASIMD': 0x07, + 'CP10_ID': 0x08, + 'CP14_64': 0x0C, + 'ILL_ISS': 0x0E, + 'SVC32': 0x11, + 'HVC32': 0x12, + 'SMC32': 0x13, + 'SVC64': 0x15, + 'HVC64': 0x16, + 'SMC64': 0x17, + 'SYS64': 0x18, + 'IABT': 0x20, + 'IABT_HYP': 0x21, + 'PC_ALIGN': 0x22, + 'DABT': 0x24, + 'DABT_HYP': 0x25, + 'SP_ALIGN': 0x26, + 'FP_EXC32': 0x28, + 'FP_EXC64': 0x2C, + 'SERROR': 0x2F, + 'BREAKPT': 0x30, + 'BREAKPT_HYP': 0x31, + 'SOFTSTP': 0x32, + 'SOFTSTP_HYP': 0x33, + 'WATCHPT': 0x34, + 'WATCHPT_HYP': 0x35, + 'BKPT32': 0x38, + 'VECTOR32': 0x3A, + 'BRK64': 0x3C, +} + +# From include/uapi/linux/kvm.h, KVM_EXIT_xxx +USERSPACE_EXIT_REASONS = { + 'UNKNOWN': 0, + 'EXCEPTION': 1, + 'IO': 2, + 'HYPERCALL': 3, + 'DEBUG': 4, + 'HLT': 5, + 'MMIO': 6, + 'IRQ_WINDOW_OPEN': 7, + 'SHUTDOWN': 8, + 'FAIL_ENTRY': 9, + 'INTR': 10, + 'SET_TPR': 11, + 'TPR_ACCESS': 12, + 'S390_SIEIC': 13, + 'S390_RESET': 14, + 'DCR': 15, + 'NMI': 16, + 'INTERNAL_ERROR': 17, + 'OSI': 18, + 'PAPR_HCALL': 19, + 'S390_UCONTROL': 20, + 'WATCHDOG': 21, + 'S390_TSCH': 22, + 'EPR': 23, + 'SYSTEM_EVENT': 24, +} + +IOCTL_NUMBERS = { + 'SET_FILTER': 0x40082406, + 'ENABLE': 0x00002400, + 'DISABLE': 0x00002401, + 'RESET': 0x00002403, +} + +signal_received = False + +ENCODING = locale.getpreferredencoding(False) +TRACE_FILTER = re.compile(r'^[^\(]*$') + + +class Arch(object): + """Encapsulates global architecture specific data. + + Contains the performance event open syscall and ioctl numbers, as + well as the VM exit reasons for the architecture it runs on. + + """ + @staticmethod + def get_arch(): + machine = os.uname()[4] + + if machine.startswith('ppc'): + return ArchPPC() + elif machine.startswith('aarch64'): + return ArchA64() + elif machine.startswith('s390'): + return ArchS390() + else: + # X86_64 + for line in open('/proc/cpuinfo'): + if not line.startswith('flags'): + continue + + flags = line.split() + if 'vmx' in flags: + return ArchX86(VMX_EXIT_REASONS) + if 'svm' in flags: + return ArchX86(SVM_EXIT_REASONS) + return + + def tracepoint_is_child(self, field): + if (TRACE_FILTER.match(field)): + return None + return field.split('(', 1)[0] + + +class ArchX86(Arch): + def __init__(self, exit_reasons): + self.sc_perf_evt_open = 298 + self.ioctl_numbers = IOCTL_NUMBERS + self.exit_reason_field = 'exit_reason' + self.exit_reasons = exit_reasons + + def debugfs_is_child(self, field): + """ Returns name of parent if 'field' is a child, None otherwise """ + return None + + +class ArchPPC(Arch): + def __init__(self): + self.sc_perf_evt_open = 319 + self.ioctl_numbers = IOCTL_NUMBERS + self.ioctl_numbers['ENABLE'] = 0x20002400 + self.ioctl_numbers['DISABLE'] = 0x20002401 + self.ioctl_numbers['RESET'] = 0x20002403 + + # PPC comes in 32 and 64 bit and some generated ioctl + # numbers depend on the wordsize. + char_ptr_size = ctypes.sizeof(ctypes.c_char_p) + self.ioctl_numbers['SET_FILTER'] = 0x80002406 | char_ptr_size << 16 + self.exit_reason_field = 'exit_nr' + self.exit_reasons = {} + + def debugfs_is_child(self, field): + """ Returns name of parent if 'field' is a child, None otherwise """ + return None + + +class ArchA64(Arch): + def __init__(self): + self.sc_perf_evt_open = 241 + self.ioctl_numbers = IOCTL_NUMBERS + self.exit_reason_field = 'esr_ec' + self.exit_reasons = AARCH64_EXIT_REASONS + + def debugfs_is_child(self, field): + """ Returns name of parent if 'field' is a child, None otherwise """ + return None + + +class ArchS390(Arch): + def __init__(self): + self.sc_perf_evt_open = 331 + self.ioctl_numbers = IOCTL_NUMBERS + self.exit_reason_field = None + self.exit_reasons = None + + def debugfs_is_child(self, field): + """ Returns name of parent if 'field' is a child, None otherwise """ + if field.startswith('instruction_'): + return 'exit_instruction' + + +ARCH = Arch.get_arch() + + +class perf_event_attr(ctypes.Structure): + """Struct that holds the necessary data to set up a trace event. + + For an extensive explanation see perf_event_open(2) and + include/uapi/linux/perf_event.h, struct perf_event_attr + + All fields that are not initialized in the constructor are 0. + + """ + _fields_ = [('type', ctypes.c_uint32), + ('size', ctypes.c_uint32), + ('config', ctypes.c_uint64), + ('sample_freq', ctypes.c_uint64), + ('sample_type', ctypes.c_uint64), + ('read_format', ctypes.c_uint64), + ('flags', ctypes.c_uint64), + ('wakeup_events', ctypes.c_uint32), + ('bp_type', ctypes.c_uint32), + ('bp_addr', ctypes.c_uint64), + ('bp_len', ctypes.c_uint64), + ] + + def __init__(self): + super(self.__class__, self).__init__() + self.type = PERF_TYPE_TRACEPOINT + self.size = ctypes.sizeof(self) + self.read_format = PERF_FORMAT_GROUP + + +PERF_TYPE_TRACEPOINT = 2 +PERF_FORMAT_GROUP = 1 << 3 + + +class Group(object): + """Represents a perf event group.""" + + def __init__(self): + self.events = [] + + def add_event(self, event): + self.events.append(event) + + def read(self): + """Returns a dict with 'event name: value' for all events in the + group. + + Values are read by reading from the file descriptor of the + event that is the group leader. See perf_event_open(2) for + details. + + Read format for the used event configuration is: + struct read_format { + u64 nr; /* The number of events */ + struct { + u64 value; /* The value of the event */ + } values[nr]; + }; + + """ + length = 8 * (1 + len(self.events)) + read_format = 'xxxxxxxx' + 'Q' * len(self.events) + return dict(zip([event.name for event in self.events], + struct.unpack(read_format, + os.read(self.events[0].fd, length)))) + + +class Event(object): + """Represents a performance event and manages its life cycle.""" + def __init__(self, name, group, trace_cpu, trace_pid, trace_point, + trace_filter, trace_set='kvm'): + self.libc = ctypes.CDLL('libc.so.6', use_errno=True) + self.syscall = self.libc.syscall + self.name = name + self.fd = None + self._setup_event(group, trace_cpu, trace_pid, trace_point, + trace_filter, trace_set) + + def __del__(self): + """Closes the event's file descriptor. + + As no python file object was created for the file descriptor, + python will not reference count the descriptor and will not + close it itself automatically, so we do it. + + """ + if self.fd: + os.close(self.fd) + + def _perf_event_open(self, attr, pid, cpu, group_fd, flags): + """Wrapper for the sys_perf_evt_open() syscall. + + Used to set up performance events, returns a file descriptor or -1 + on error. + + Attributes are: + - syscall number + - struct perf_event_attr * + - pid or -1 to monitor all pids + - cpu number or -1 to monitor all cpus + - The file descriptor of the group leader or -1 to create a group. + - flags + + """ + return self.syscall(ARCH.sc_perf_evt_open, ctypes.pointer(attr), + ctypes.c_int(pid), ctypes.c_int(cpu), + ctypes.c_int(group_fd), ctypes.c_long(flags)) + + def _setup_event_attribute(self, trace_set, trace_point): + """Returns an initialized ctype perf_event_attr struct.""" + + id_path = os.path.join(PATH_DEBUGFS_TRACING, 'events', trace_set, + trace_point, 'id') + + event_attr = perf_event_attr() + event_attr.config = int(open(id_path).read()) + return event_attr + + def _setup_event(self, group, trace_cpu, trace_pid, trace_point, + trace_filter, trace_set): + """Sets up the perf event in Linux. + + Issues the syscall to register the event in the kernel and + then sets the optional filter. + + """ + + event_attr = self._setup_event_attribute(trace_set, trace_point) + + # First event will be group leader. + group_leader = -1 + + # All others have to pass the leader's descriptor instead. + if group.events: + group_leader = group.events[0].fd + + fd = self._perf_event_open(event_attr, trace_pid, + trace_cpu, group_leader, 0) + if fd == -1: + err = ctypes.get_errno() + raise OSError(err, os.strerror(err), + 'while calling sys_perf_event_open().') + + if trace_filter: + fcntl.ioctl(fd, ARCH.ioctl_numbers['SET_FILTER'], + trace_filter) + + self.fd = fd + + def enable(self): + """Enables the trace event in the kernel. + + Enabling the group leader makes reading counters from it and the + events under it possible. + + """ + fcntl.ioctl(self.fd, ARCH.ioctl_numbers['ENABLE'], 0) + + def disable(self): + """Disables the trace event in the kernel. + + Disabling the group leader makes reading all counters under it + impossible. + + """ + fcntl.ioctl(self.fd, ARCH.ioctl_numbers['DISABLE'], 0) + + def reset(self): + """Resets the count of the trace event in the kernel.""" + fcntl.ioctl(self.fd, ARCH.ioctl_numbers['RESET'], 0) + + +class Provider(object): + """Encapsulates functionalities used by all providers.""" + def __init__(self, pid): + self.child_events = False + self.pid = pid + + @staticmethod + def is_field_wanted(fields_filter, field): + """Indicate whether field is valid according to fields_filter.""" + if not fields_filter: + return True + return re.match(fields_filter, field) is not None + + @staticmethod + def walkdir(path): + """Returns os.walk() data for specified directory. + + As it is only a wrapper it returns the same 3-tuple of (dirpath, + dirnames, filenames). + """ + return next(os.walk(path)) + + +class TracepointProvider(Provider): + """Data provider for the stats class. + + Manages the events/groups from which it acquires its data. + + """ + def __init__(self, pid, fields_filter): + self.group_leaders = [] + self.filters = self._get_filters() + self.update_fields(fields_filter) + super(TracepointProvider, self).__init__(pid) + + @staticmethod + def _get_filters(): + """Returns a dict of trace events, their filter ids and + the values that can be filtered. + + Trace events can be filtered for special values by setting a + filter string via an ioctl. The string normally has the format + identifier==value. For each filter a new event will be created, to + be able to distinguish the events. + + """ + filters = {} + filters['kvm_userspace_exit'] = ('reason', USERSPACE_EXIT_REASONS) + if ARCH.exit_reason_field and ARCH.exit_reasons: + filters['kvm_exit'] = (ARCH.exit_reason_field, ARCH.exit_reasons) + return filters + + def _get_available_fields(self): + """Returns a list of available events of format 'event name(filter + name)'. + + All available events have directories under + /sys/kernel/debug/tracing/events/ which export information + about the specific event. Therefore, listing the dirs gives us + a list of all available events. + + Some events like the vm exit reasons can be filtered for + specific values. To take account for that, the routine below + creates special fields with the following format: + event name(filter name) + + """ + path = os.path.join(PATH_DEBUGFS_TRACING, 'events', 'kvm') + fields = self.walkdir(path)[1] + extra = [] + for field in fields: + if field in self.filters: + filter_name_, filter_dicts = self.filters[field] + for name in filter_dicts: + extra.append(field + '(' + name + ')') + fields += extra + return fields + + def update_fields(self, fields_filter): + """Refresh fields, applying fields_filter""" + self.fields = [field for field in self._get_available_fields() + if self.is_field_wanted(fields_filter, field)] + # add parents for child fields - otherwise we won't see any output! + for field in self._fields: + parent = ARCH.tracepoint_is_child(field) + if (parent and parent not in self._fields): + self.fields.append(parent) + + @staticmethod + def _get_online_cpus(): + """Returns a list of cpu id integers.""" + def parse_int_list(list_string): + """Returns an int list from a string of comma separated integers and + integer ranges.""" + integers = [] + members = list_string.split(',') + + for member in members: + if '-' not in member: + integers.append(int(member)) + else: + int_range = member.split('-') + integers.extend(range(int(int_range[0]), + int(int_range[1]) + 1)) + + return integers + + with open('/sys/devices/system/cpu/online') as cpu_list: + cpu_string = cpu_list.readline() + return parse_int_list(cpu_string) + + def _setup_traces(self): + """Creates all event and group objects needed to be able to retrieve + data.""" + fields = self._get_available_fields() + if self._pid > 0: + # Fetch list of all threads of the monitored pid, as qemu + # starts a thread for each vcpu. + path = os.path.join('/proc', str(self._pid), 'task') + groupids = self.walkdir(path)[1] + else: + groupids = self._get_online_cpus() + + # The constant is needed as a buffer for python libs, std + # streams and other files that the script opens. + newlim = len(groupids) * len(fields) + 50 + try: + softlim_, hardlim = resource.getrlimit(resource.RLIMIT_NOFILE) + + if hardlim < newlim: + # Now we need CAP_SYS_RESOURCE, to increase the hard limit. + resource.setrlimit(resource.RLIMIT_NOFILE, (newlim, newlim)) + else: + # Raising the soft limit is sufficient. + resource.setrlimit(resource.RLIMIT_NOFILE, (newlim, hardlim)) + + except ValueError: + sys.exit("NOFILE rlimit could not be raised to {0}".format(newlim)) + + for groupid in groupids: + group = Group() + for name in fields: + tracepoint = name + tracefilter = None + match = re.match(r'(.*)\((.*)\)', name) + if match: + tracepoint, sub = match.groups() + tracefilter = ('%s==%d\0' % + (self.filters[tracepoint][0], + self.filters[tracepoint][1][sub])) + + # From perf_event_open(2): + # pid > 0 and cpu == -1 + # This measures the specified process/thread on any CPU. + # + # pid == -1 and cpu >= 0 + # This measures all processes/threads on the specified CPU. + trace_cpu = groupid if self._pid == 0 else -1 + trace_pid = int(groupid) if self._pid != 0 else -1 + + group.add_event(Event(name=name, + group=group, + trace_cpu=trace_cpu, + trace_pid=trace_pid, + trace_point=tracepoint, + trace_filter=tracefilter)) + + self.group_leaders.append(group) + + @property + def fields(self): + return self._fields + + @fields.setter + def fields(self, fields): + """Enables/disables the (un)wanted events""" + self._fields = fields + for group in self.group_leaders: + for index, event in enumerate(group.events): + if event.name in fields: + event.reset() + event.enable() + else: + # Do not disable the group leader. + # It would disable all of its events. + if index != 0: + event.disable() + + @property + def pid(self): + return self._pid + + @pid.setter + def pid(self, pid): + """Changes the monitored pid by setting new traces.""" + self._pid = pid + # The garbage collector will get rid of all Event/Group + # objects and open files after removing the references. + self.group_leaders = [] + self._setup_traces() + self.fields = self._fields + + def read(self, by_guest=0): + """Returns 'event name: current value' for all enabled events.""" + ret = defaultdict(int) + for group in self.group_leaders: + for name, val in group.read().items(): + if name not in self._fields: + continue + parent = ARCH.tracepoint_is_child(name) + if parent: + name += ' ' + parent + ret[name] += val + return ret + + def reset(self): + """Reset all field counters""" + for group in self.group_leaders: + for event in group.events: + event.reset() + + +class DebugfsProvider(Provider): + """Provides data from the files that KVM creates in the kvm debugfs + folder.""" + def __init__(self, pid, fields_filter, include_past): + self.update_fields(fields_filter) + self._baseline = {} + self.do_read = True + self.paths = [] + super(DebugfsProvider, self).__init__(pid) + if include_past: + self._restore() + + def _get_available_fields(self): + """"Returns a list of available fields. + + The fields are all available KVM debugfs files + + """ + exempt_list = ['halt_poll_fail_ns', 'halt_poll_success_ns'] + fields = [field for field in self.walkdir(PATH_DEBUGFS_KVM)[2] + if field not in exempt_list] + + return fields + + def update_fields(self, fields_filter): + """Refresh fields, applying fields_filter""" + self._fields = [field for field in self._get_available_fields() + if self.is_field_wanted(fields_filter, field)] + # add parents for child fields - otherwise we won't see any output! + for field in self._fields: + parent = ARCH.debugfs_is_child(field) + if (parent and parent not in self._fields): + self.fields.append(parent) + + @property + def fields(self): + return self._fields + + @fields.setter + def fields(self, fields): + self._fields = fields + self.reset() + + @property + def pid(self): + return self._pid + + @pid.setter + def pid(self, pid): + self._pid = pid + if pid != 0: + vms = self.walkdir(PATH_DEBUGFS_KVM)[1] + if len(vms) == 0: + self.do_read = False + + self.paths = list(filter(lambda x: "{}-".format(pid) in x, vms)) + + else: + self.paths = [] + self.do_read = True + + def _verify_paths(self): + """Remove invalid paths""" + for path in self.paths: + if not os.path.exists(os.path.join(PATH_DEBUGFS_KVM, path)): + self.paths.remove(path) + continue + + def read(self, reset=0, by_guest=0): + """Returns a dict with format:'file name / field -> current value'. + + Parameter 'reset': + 0 plain read + 1 reset field counts to 0 + 2 restore the original field counts + + """ + results = {} + + # If no debugfs filtering support is available, then don't read. + if not self.do_read: + return results + self._verify_paths() + + paths = self.paths + if self._pid == 0: + paths = [] + for entry in os.walk(PATH_DEBUGFS_KVM): + for dir in entry[1]: + paths.append(dir) + for path in paths: + for field in self._fields: + value = self._read_field(field, path) + key = path + field + if reset == 1: + self._baseline[key] = value + if reset == 2: + self._baseline[key] = 0 + if self._baseline.get(key, -1) == -1: + self._baseline[key] = value + parent = ARCH.debugfs_is_child(field) + if parent: + field = field + ' ' + parent + else: + if by_guest: + field = key.split('-')[0] # set 'field' to 'pid' + increment = value - self._baseline.get(key, 0) + if field in results: + results[field] += increment + else: + results[field] = increment + + return results + + def _read_field(self, field, path): + """Returns the value of a single field from a specific VM.""" + try: + return int(open(os.path.join(PATH_DEBUGFS_KVM, + path, + field)) + .read()) + except IOError: + return 0 + + def reset(self): + """Reset field counters""" + self._baseline = {} + self.read(1) + + def _restore(self): + """Reset field counters""" + self._baseline = {} + self.read(2) + + +EventStat = namedtuple('EventStat', ['value', 'delta']) + + +class Stats(object): + """Manages the data providers and the data they provide. + + It is used to set filters on the provider's data and collect all + provider data. + + """ + def __init__(self, options): + self.providers = self._get_providers(options) + self._pid_filter = options.pid + self._fields_filter = options.fields + self.values = {} + self._child_events = False + + def _get_providers(self, options): + """Returns a list of data providers depending on the passed options.""" + providers = [] + + if options.debugfs: + providers.append(DebugfsProvider(options.pid, options.fields, + options.debugfs_include_past)) + if options.tracepoints or not providers: + providers.append(TracepointProvider(options.pid, options.fields)) + + return providers + + def _update_provider_filters(self): + """Propagates fields filters to providers.""" + # As we reset the counters when updating the fields we can + # also clear the cache of old values. + self.values = {} + for provider in self.providers: + provider.update_fields(self._fields_filter) + + def reset(self): + self.values = {} + for provider in self.providers: + provider.reset() + + @property + def fields_filter(self): + return self._fields_filter + + @fields_filter.setter + def fields_filter(self, fields_filter): + if fields_filter != self._fields_filter: + self._fields_filter = fields_filter + self._update_provider_filters() + + @property + def pid_filter(self): + return self._pid_filter + + @pid_filter.setter + def pid_filter(self, pid): + if pid != self._pid_filter: + self._pid_filter = pid + self.values = {} + for provider in self.providers: + provider.pid = self._pid_filter + + @property + def child_events(self): + return self._child_events + + @child_events.setter + def child_events(self, val): + self._child_events = val + for provider in self.providers: + provider.child_events = val + + def get(self, by_guest=0): + """Returns a dict with field -> (value, delta to last value) of all + provider data. + Key formats: + * plain: 'key' is event name + * child-parent: 'key' is in format ' ' + * pid: 'key' is the pid of the guest, and the record contains the + aggregated event data + These formats are generated by the providers, and handled in class TUI. + """ + for provider in self.providers: + new = provider.read(by_guest=by_guest) + for key in new: + oldval = self.values.get(key, EventStat(0, 0)).value + newval = new.get(key, 0) + newdelta = newval - oldval + self.values[key] = EventStat(newval, newdelta) + return self.values + + def toggle_display_guests(self, to_pid): + """Toggle between collection of stats by individual event and by + guest pid + + Events reported by DebugfsProvider change when switching to/from + reading by guest values. Hence we have to remove the excess event + names from self.values. + + """ + if any(isinstance(ins, TracepointProvider) for ins in self.providers): + return 1 + if to_pid: + for provider in self.providers: + if isinstance(provider, DebugfsProvider): + for key in provider.fields: + if key in self.values.keys(): + del self.values[key] + else: + oldvals = self.values.copy() + for key in oldvals: + if key.isdigit(): + del self.values[key] + # Update oldval (see get()) + self.get(to_pid) + return 0 + + +DELAY_DEFAULT = 3.0 +MAX_GUEST_NAME_LEN = 48 +MAX_REGEX_LEN = 44 +SORT_DEFAULT = 0 +MIN_DELAY = 0.1 +MAX_DELAY = 25.5 + + +class Tui(object): + """Instruments curses to draw a nice text ui.""" + def __init__(self, stats, opts): + self.stats = stats + self.screen = None + self._delay_initial = 0.25 + self._delay_regular = opts.set_delay + self._sorting = SORT_DEFAULT + self._display_guests = 0 + + def __enter__(self): + """Initialises curses for later use. Based on curses.wrapper + implementation from the Python standard library.""" + self.screen = curses.initscr() + curses.noecho() + curses.cbreak() + + # The try/catch works around a minor bit of + # over-conscientiousness in the curses module, the error + # return from C start_color() is ignorable. + try: + curses.start_color() + except curses.error: + pass + + # Hide cursor in extra statement as some monochrome terminals + # might support hiding but not colors. + try: + curses.curs_set(0) + except curses.error: + pass + + curses.use_default_colors() + return self + + def __exit__(self, *exception): + """Resets the terminal to its normal state. Based on curses.wrapper + implementation from the Python standard library.""" + if self.screen: + self.screen.keypad(0) + curses.echo() + curses.nocbreak() + curses.endwin() + + @staticmethod + def get_all_gnames(): + """Returns a list of (pid, gname) tuples of all running guests""" + res = [] + try: + child = subprocess.Popen(['ps', '-A', '--format', 'pid,args'], + stdout=subprocess.PIPE) + except: + raise Exception + for line in child.stdout: + line = line.decode(ENCODING).lstrip().split(' ', 1) + # perform a sanity check before calling the more expensive + # function to possibly extract the guest name + if ' -name ' in line[1]: + res.append((line[0], Tui.get_gname_from_pid(line[0]))) + child.stdout.close() + + return res + + def _print_all_gnames(self, row): + """Print a list of all running guests along with their pids.""" + self.screen.addstr(row, 2, '%8s %-60s' % + ('Pid', 'Guest Name (fuzzy list, might be ' + 'inaccurate!)'), + curses.A_UNDERLINE) + row += 1 + try: + for line in self.get_all_gnames(): + self.screen.addstr(row, 2, '%8s %-60s' % (line[0], line[1])) + row += 1 + if row >= self.screen.getmaxyx()[0]: + break + except Exception: + self.screen.addstr(row + 1, 2, 'Not available') + + @staticmethod + def get_pid_from_gname(gname): + """Fuzzy function to convert guest name to QEMU process pid. + + Returns a list of potential pids, can be empty if no match found. + Throws an exception on processing errors. + + """ + pids = [] + for line in Tui.get_all_gnames(): + if gname == line[1]: + pids.append(int(line[0])) + + return pids + + @staticmethod + def get_gname_from_pid(pid): + """Returns the guest name for a QEMU process pid. + + Extracts the guest name from the QEMU comma line by processing the + '-name' option. Will also handle names specified out of sequence. + + """ + name = '' + try: + line = open('/proc/{}/cmdline' + .format(pid), 'r').read().split('\0') + parms = line[line.index('-name') + 1].split(',') + while '' in parms: + # commas are escaped (i.e. ',,'), hence e.g. 'foo,bar' results + # in # ['foo', '', 'bar'], which we revert here + idx = parms.index('') + parms[idx - 1] += ',' + parms[idx + 1] + del parms[idx:idx+2] + # the '-name' switch allows for two ways to specify the guest name, + # where the plain name overrides the name specified via 'guest=' + for arg in parms: + if '=' not in arg: + name = arg + break + if arg[:6] == 'guest=': + name = arg[6:] + except (ValueError, IOError, IndexError): + pass + + return name + + def _update_pid(self, pid): + """Propagates pid selection to stats object.""" + self.screen.addstr(4, 1, 'Updating pid filter...') + self.screen.refresh() + self.stats.pid_filter = pid + + def _refresh_header(self, pid=None): + """Refreshes the header.""" + if pid is None: + pid = self.stats.pid_filter + self.screen.erase() + gname = self.get_gname_from_pid(pid) + self._gname = gname + if gname: + gname = ('({})'.format(gname[:MAX_GUEST_NAME_LEN] + '...' + if len(gname) > MAX_GUEST_NAME_LEN + else gname)) + if pid > 0: + self._headline = 'kvm statistics - pid {0} {1}'.format(pid, gname) + else: + self._headline = 'kvm statistics - summary' + self.screen.addstr(0, 0, self._headline, curses.A_BOLD) + if self.stats.fields_filter: + regex = self.stats.fields_filter + if len(regex) > MAX_REGEX_LEN: + regex = regex[:MAX_REGEX_LEN] + '...' + self.screen.addstr(1, 17, 'regex filter: {0}'.format(regex)) + if self._display_guests: + col_name = 'Guest Name' + else: + col_name = 'Event' + self.screen.addstr(2, 1, '%-40s %10s%7s %8s' % + (col_name, 'Total', '%Total', 'CurAvg/s'), + curses.A_STANDOUT) + self.screen.addstr(4, 1, 'Collecting data...') + self.screen.refresh() + + def _refresh_body(self, sleeptime): + def insert_child(sorted_items, child, values, parent): + num = len(sorted_items) + for i in range(0, num): + # only add child if parent is present + if parent.startswith(sorted_items[i][0]): + sorted_items.insert(i + 1, (' ' + child, values)) + + def get_sorted_events(self, stats): + """ separate parent and child events """ + if self._sorting == SORT_DEFAULT: + def sortkey(pair): + # sort by (delta value, overall value) + v = pair[1] + return (v.delta, v.value) + else: + def sortkey(pair): + # sort by overall value + v = pair[1] + return v.value + + childs = [] + sorted_items = [] + # we can't rule out child events to appear prior to parents even + # when sorted - separate out all children first, and add in later + for key, values in sorted(stats.items(), key=sortkey, + reverse=True): + if values == (0, 0): + continue + if key.find(' ') != -1: + if not self.stats.child_events: + continue + childs.insert(0, (key, values)) + else: + sorted_items.append((key, values)) + if self.stats.child_events: + for key, values in childs: + (child, parent) = key.split(' ') + insert_child(sorted_items, child, values, parent) + + return sorted_items + + if not self._is_running_guest(self.stats.pid_filter): + if self._gname: + try: # ...to identify the guest by name in case it's back + pids = self.get_pid_from_gname(self._gname) + if len(pids) == 1: + self._refresh_header(pids[0]) + self._update_pid(pids[0]) + return + except: + pass + self._display_guest_dead() + # leave final data on screen + return + row = 3 + self.screen.move(row, 0) + self.screen.clrtobot() + stats = self.stats.get(self._display_guests) + total = 0. + ctotal = 0. + for key, values in stats.items(): + if self._display_guests: + if self.get_gname_from_pid(key): + total += values.value + continue + if not key.find(' ') != -1: + total += values.value + else: + ctotal += values.value + if total == 0.: + # we don't have any fields, or all non-child events are filtered + total = ctotal + + # print events + tavg = 0 + tcur = 0 + guest_removed = False + for key, values in get_sorted_events(self, stats): + if row >= self.screen.getmaxyx()[0] - 1 or values == (0, 0): + break + if self._display_guests: + key = self.get_gname_from_pid(key) + if not key: + continue + cur = int(round(values.delta / sleeptime)) if values.delta else 0 + if cur < 0: + guest_removed = True + continue + if key[0] != ' ': + if values.delta: + tcur += values.delta + ptotal = values.value + ltotal = total + else: + ltotal = ptotal + self.screen.addstr(row, 1, '%-40s %10d%7.1f %8s' % (key, + values.value, + values.value * 100 / float(ltotal), cur)) + row += 1 + if row == 3: + if guest_removed: + self.screen.addstr(4, 1, 'Guest removed, updating...') + else: + self.screen.addstr(4, 1, 'No matching events reported yet') + if row > 4: + tavg = int(round(tcur / sleeptime)) if tcur > 0 else '' + self.screen.addstr(row, 1, '%-40s %10d %8s' % + ('Total', total, tavg), curses.A_BOLD) + self.screen.refresh() + + def _display_guest_dead(self): + marker = ' Guest is DEAD ' + y = min(len(self._headline), 80 - len(marker)) + self.screen.addstr(0, y, marker, curses.A_BLINK | curses.A_STANDOUT) + + def _show_msg(self, text): + """Display message centered text and exit on key press""" + hint = 'Press any key to continue' + curses.cbreak() + self.screen.erase() + (x, term_width) = self.screen.getmaxyx() + row = 2 + for line in text: + start = (term_width - len(line)) // 2 + self.screen.addstr(row, start, line) + row += 1 + self.screen.addstr(row + 1, (term_width - len(hint)) // 2, hint, + curses.A_STANDOUT) + self.screen.getkey() + + def _show_help_interactive(self): + """Display help with list of interactive commands""" + msg = (' b toggle events by guests (debugfs only, honors' + ' filters)', + ' c clear filter', + ' f filter by regular expression', + ' g filter by guest name/PID', + ' h display interactive commands reference', + ' o toggle sorting order (Total vs CurAvg/s)', + ' p filter by guest name/PID', + ' q quit', + ' r reset stats', + ' s set delay between refreshs (value range: ' + '%s-%s secs)' % (MIN_DELAY, MAX_DELAY), + ' x toggle reporting of stats for individual child trace' + ' events', + 'Any other key refreshes statistics immediately') + curses.cbreak() + self.screen.erase() + self.screen.addstr(0, 0, "Interactive commands reference", + curses.A_BOLD) + self.screen.addstr(2, 0, "Press any key to exit", curses.A_STANDOUT) + row = 4 + for line in msg: + self.screen.addstr(row, 0, line) + row += 1 + self.screen.getkey() + self._refresh_header() + + def _show_filter_selection(self): + """Draws filter selection mask. + + Asks for a valid regex and sets the fields filter accordingly. + + """ + msg = '' + while True: + self.screen.erase() + self.screen.addstr(0, 0, + "Show statistics for events matching a regex.", + curses.A_BOLD) + self.screen.addstr(2, 0, + "Current regex: {0}" + .format(self.stats.fields_filter)) + self.screen.addstr(5, 0, msg) + self.screen.addstr(3, 0, "New regex: ") + curses.echo() + regex = self.screen.getstr().decode(ENCODING) + curses.noecho() + if len(regex) == 0: + self.stats.fields_filter = '' + self._refresh_header() + return + try: + re.compile(regex) + self.stats.fields_filter = regex + self._refresh_header() + return + except re.error: + msg = '"' + regex + '": Not a valid regular expression' + continue + + def _show_set_update_interval(self): + """Draws update interval selection mask.""" + msg = '' + while True: + self.screen.erase() + self.screen.addstr(0, 0, 'Set update interval (defaults to %.1fs).' + % DELAY_DEFAULT, curses.A_BOLD) + self.screen.addstr(4, 0, msg) + self.screen.addstr(2, 0, 'Change delay from %.1fs to ' % + self._delay_regular) + curses.echo() + val = self.screen.getstr().decode(ENCODING) + curses.noecho() + + try: + if len(val) > 0: + delay = float(val) + err = is_delay_valid(delay) + if err is not None: + msg = err + continue + else: + delay = DELAY_DEFAULT + self._delay_regular = delay + break + + except ValueError: + msg = '"' + str(val) + '": Invalid value' + self._refresh_header() + + def _is_running_guest(self, pid): + """Check if pid is still a running process.""" + if not pid: + return True + return os.path.isdir(os.path.join('/proc/', str(pid))) + + def _show_vm_selection_by_guest(self): + """Draws guest selection mask. + + Asks for a guest name or pid until a valid guest name or '' is entered. + + """ + msg = '' + while True: + self.screen.erase() + self.screen.addstr(0, 0, + 'Show statistics for specific guest or pid.', + curses.A_BOLD) + self.screen.addstr(1, 0, + 'This might limit the shown data to the trace ' + 'statistics.') + self.screen.addstr(5, 0, msg) + self._print_all_gnames(7) + curses.echo() + curses.curs_set(1) + self.screen.addstr(3, 0, "Guest or pid [ENTER exits]: ") + guest = self.screen.getstr().decode(ENCODING) + curses.noecho() + + pid = 0 + if not guest or guest == '0': + break + if guest.isdigit(): + if not self._is_running_guest(guest): + msg = '"' + guest + '": Not a running process' + continue + pid = int(guest) + break + pids = [] + try: + pids = self.get_pid_from_gname(guest) + except: + msg = '"' + guest + '": Internal error while searching, ' \ + 'use pid filter instead' + continue + if len(pids) == 0: + msg = '"' + guest + '": Not an active guest' + continue + if len(pids) > 1: + msg = '"' + guest + '": Multiple matches found, use pid ' \ + 'filter instead' + continue + pid = pids[0] + break + curses.curs_set(0) + self._refresh_header(pid) + self._update_pid(pid) + + def show_stats(self): + """Refreshes the screen and processes user input.""" + sleeptime = self._delay_initial + self._refresh_header() + start = 0.0 # result based on init value never appears on screen + while True: + self._refresh_body(time.time() - start) + curses.halfdelay(int(sleeptime * 10)) + start = time.time() + sleeptime = self._delay_regular + try: + char = self.screen.getkey() + if char == 'b': + self._display_guests = not self._display_guests + if self.stats.toggle_display_guests(self._display_guests): + self._show_msg(['Command not available with ' + 'tracepoints enabled', 'Restart with ' + 'debugfs only (see option \'-d\') and ' + 'try again!']) + self._display_guests = not self._display_guests + self._refresh_header() + if char == 'c': + self.stats.fields_filter = '' + self._refresh_header(0) + self._update_pid(0) + if char == 'f': + curses.curs_set(1) + self._show_filter_selection() + curses.curs_set(0) + sleeptime = self._delay_initial + if char == 'g' or char == 'p': + self._show_vm_selection_by_guest() + sleeptime = self._delay_initial + if char == 'h': + self._show_help_interactive() + if char == 'o': + self._sorting = not self._sorting + if char == 'q': + break + if char == 'r': + self.stats.reset() + if char == 's': + curses.curs_set(1) + self._show_set_update_interval() + curses.curs_set(0) + sleeptime = self._delay_initial + if char == 'x': + self.stats.child_events = not self.stats.child_events + except KeyboardInterrupt: + break + except curses.error: + continue + + +def batch(stats): + """Prints statistics in a key, value format.""" + try: + s = stats.get() + time.sleep(1) + s = stats.get() + for key, values in sorted(s.items()): + print('%-42s%10d%10d' % (key.split(' ')[0], values.value, + values.delta)) + except KeyboardInterrupt: + pass + + +class StdFormat(object): + def __init__(self, keys): + self._banner = '' + for key in keys: + self._banner += key.split(' ')[0] + ' ' + + def get_banner(self): + return self._banner + + def get_statline(self, keys, s): + res = '' + for key in keys: + res += ' %9d' % s[key].delta + return res + + +class CSVFormat(object): + def __init__(self, keys): + self._banner = 'timestamp' + self._banner += reduce(lambda res, key: "{},{!s}".format(res, + key.split(' ')[0]), keys, '') + + def get_banner(self): + return self._banner + + def get_statline(self, keys, s): + return reduce(lambda res, key: "{},{!s}".format(res, s[key].delta), + keys, '') + + +def log(stats, opts, frmt, keys): + """Prints statistics as reiterating key block, multiple value blocks.""" + global signal_received + line = 0 + banner_repeat = 20 + f = None + + def do_banner(opts): + nonlocal f + if opts.log_to_file: + if not f: + try: + f = open(opts.log_to_file, 'a') + except (IOError, OSError): + sys.exit("Error: Could not open file: %s" % + opts.log_to_file) + if isinstance(frmt, CSVFormat) and f.tell() != 0: + return + print(frmt.get_banner(), file=f or sys.stdout) + + def do_statline(opts, values): + statline = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + \ + frmt.get_statline(keys, values) + print(statline, file=f or sys.stdout) + + do_banner(opts) + banner_printed = True + while True: + try: + time.sleep(opts.set_delay) + if signal_received: + banner_printed = True + line = 0 + f.close() + do_banner(opts) + signal_received = False + if (line % banner_repeat == 0 and not banner_printed and + not (opts.log_to_file and isinstance(frmt, CSVFormat))): + do_banner(opts) + banner_printed = True + values = stats.get() + if (not opts.skip_zero_records or + any(values[k].delta != 0 for k in keys)): + do_statline(opts, values) + line += 1 + banner_printed = False + except KeyboardInterrupt: + break + + if opts.log_to_file: + f.close() + + +def handle_signal(sig, frame): + global signal_received + + signal_received = True + + return + + +def is_delay_valid(delay): + """Verify delay is in valid value range.""" + msg = None + if delay < MIN_DELAY: + msg = '"' + str(delay) + '": Delay must be >=%s' % MIN_DELAY + if delay > MAX_DELAY: + msg = '"' + str(delay) + '": Delay must be <=%s' % MAX_DELAY + return msg + + +def get_options(): + """Returns processed program arguments.""" + description_text = """ +This script displays various statistics about VMs running under KVM. +The statistics are gathered from the KVM debugfs entries and / or the +currently available perf traces. + +The monitoring takes additional cpu cycles and might affect the VM's +performance. + +Requirements: +- Access to: + %s + %s/events/* + /proc/pid/task +- /proc/sys/kernel/perf_event_paranoid < 1 if user has no + CAP_SYS_ADMIN and perf events are used. +- CAP_SYS_RESOURCE if the hard limit is not high enough to allow + the large number of files that are possibly opened. + +Interactive Commands: + b toggle events by guests (debugfs only, honors filters) + c clear filter + f filter by regular expression + g filter by guest name + h display interactive commands reference + o toggle sorting order (Total vs CurAvg/s) + p filter by PID + q quit + r reset stats + s set update interval (value range: 0.1-25.5 secs) + x toggle reporting of stats for individual child trace events +Press any other key to refresh statistics immediately. +""" % (PATH_DEBUGFS_KVM, PATH_DEBUGFS_TRACING) + + class Guest_to_pid(argparse.Action): + def __call__(self, parser, namespace, values, option_string=None): + try: + pids = Tui.get_pid_from_gname(values) + except: + sys.exit('Error while searching for guest "{}". Use "-p" to ' + 'specify a pid instead?'.format(values)) + if len(pids) == 0: + sys.exit('Error: No guest by the name "{}" found' + .format(values)) + if len(pids) > 1: + sys.exit('Error: Multiple processes found (pids: {}). Use "-p"' + ' to specify the desired pid'.format(" ".join(pids))) + namespace.pid = pids[0] + + argparser = argparse.ArgumentParser(description=description_text, + formatter_class=argparse + .RawTextHelpFormatter) + argparser.add_argument('-1', '--once', '--batch', + action='store_true', + default=False, + help='run in batch mode for one second', + ) + argparser.add_argument('-c', '--csv', + action='store_true', + default=False, + help='log in csv format - requires option -l/-L', + ) + argparser.add_argument('-d', '--debugfs', + action='store_true', + default=False, + help='retrieve statistics from debugfs', + ) + argparser.add_argument('-f', '--fields', + default='', + help='''fields to display (regex) +"-f help" for a list of available events''', + ) + argparser.add_argument('-g', '--guest', + type=str, + help='restrict statistics to guest by name', + action=Guest_to_pid, + ) + argparser.add_argument('-i', '--debugfs-include-past', + action='store_true', + default=False, + help='include all available data on past events for' + ' debugfs', + ) + argparser.add_argument('-l', '--log', + action='store_true', + default=False, + help='run in logging mode (like vmstat)', + ) + argparser.add_argument('-L', '--log-to-file', + type=str, + metavar='FILE', + help="like '--log', but logging to a file" + ) + argparser.add_argument('-p', '--pid', + type=int, + default=0, + help='restrict statistics to pid', + ) + argparser.add_argument('-s', '--set-delay', + type=float, + default=DELAY_DEFAULT, + metavar='DELAY', + help='set delay between refreshs (value range: ' + '%s-%s secs)' % (MIN_DELAY, MAX_DELAY), + ) + argparser.add_argument('-t', '--tracepoints', + action='store_true', + default=False, + help='retrieve statistics from tracepoints', + ) + argparser.add_argument('-z', '--skip-zero-records', + action='store_true', + default=False, + help='omit records with all zeros in logging mode', + ) + options = argparser.parse_args() + if options.csv and not (options.log or options.log_to_file): + sys.exit('Error: Option -c/--csv requires -l/--log') + if options.skip_zero_records and not (options.log or options.log_to_file): + sys.exit('Error: Option -z/--skip-zero-records requires -l/-L') + try: + # verify that we were passed a valid regex up front + re.compile(options.fields) + except re.error: + sys.exit('Error: "' + options.fields + '" is not a valid regular ' + 'expression') + + return options + + +def check_access(options): + """Exits if the current user can't access all needed directories.""" + if not os.path.exists(PATH_DEBUGFS_TRACING) and (options.tracepoints or + not options.debugfs): + sys.stderr.write("Please enable CONFIG_TRACING in your kernel " + "when using the option -t (default).\n" + "If it is enabled, make {0} readable by the " + "current user.\n" + .format(PATH_DEBUGFS_TRACING)) + if options.tracepoints: + sys.exit(1) + + sys.stderr.write("Falling back to debugfs statistics!\n") + options.debugfs = True + time.sleep(5) + + return options + + +def assign_globals(): + global PATH_DEBUGFS_KVM + global PATH_DEBUGFS_TRACING + + debugfs = '' + for line in open('/proc/mounts'): + if line.split(' ')[0] == 'debugfs': + debugfs = line.split(' ')[1] + break + if debugfs == '': + sys.stderr.write("Please make sure that CONFIG_DEBUG_FS is enabled in " + "your kernel, mounted and\nreadable by the current " + "user:\n" + "('mount -t debugfs debugfs /sys/kernel/debug')\n") + sys.exit(1) + + PATH_DEBUGFS_KVM = os.path.join(debugfs, 'kvm') + PATH_DEBUGFS_TRACING = os.path.join(debugfs, 'tracing') + + if not os.path.exists(PATH_DEBUGFS_KVM): + sys.stderr.write("Please make sure that CONFIG_KVM is enabled in " + "your kernel and that the modules are loaded.\n") + sys.exit(1) + + +def main(): + assign_globals() + options = get_options() + options = check_access(options) + + if (options.pid > 0 and + not os.path.isdir(os.path.join('/proc/', + str(options.pid)))): + sys.stderr.write('Did you use a (unsupported) tid instead of a pid?\n') + sys.exit('Specified pid does not exist.') + + err = is_delay_valid(options.set_delay) + if err is not None: + sys.exit('Error: ' + err) + + stats = Stats(options) + + if options.fields == 'help': + stats.fields_filter = None + event_list = [] + for key in stats.get().keys(): + event_list.append(key.split('(', 1)[0]) + sys.stdout.write(' ' + '\n '.join(sorted(set(event_list))) + '\n') + sys.exit(0) + + if options.log or options.log_to_file: + if options.log_to_file: + signal.signal(signal.SIGHUP, handle_signal) + keys = sorted(stats.get().keys()) + if options.csv: + frmt = CSVFormat(keys) + else: + frmt = StdFormat(keys) + log(stats, options, frmt, keys) + elif not options.once: + with Tui(stats, options) as tui: + tui.show_stats() + else: + batch(stats) + + +if __name__ == "__main__": + main() diff --git a/data/linux-small/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py b/data/linux-small/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py new file mode 100755 index 0000000..21a7a12 --- /dev/null +++ b/data/linux-small/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py @@ -0,0 +1,97 @@ +# EventClass.py +# SPDX-License-Identifier: GPL-2.0 +# +# This is a library defining some events types classes, which could +# be used by other scripts to analyzing the perf samples. +# +# Currently there are just a few classes defined for examples, +# PerfEvent is the base class for all perf event sample, PebsEvent +# is a HW base Intel x86 PEBS event, and user could add more SW/HW +# event classes based on requirements. +from __future__ import print_function + +import struct + +# Event types, user could add more here +EVTYPE_GENERIC = 0 +EVTYPE_PEBS = 1 # Basic PEBS event +EVTYPE_PEBS_LL = 2 # PEBS event with load latency info +EVTYPE_IBS = 3 + +# +# Currently we don't have good way to tell the event type, but by +# the size of raw buffer, raw PEBS event with load latency data's +# size is 176 bytes, while the pure PEBS event's size is 144 bytes. +# +def create_event(name, comm, dso, symbol, raw_buf): + if (len(raw_buf) == 144): + event = PebsEvent(name, comm, dso, symbol, raw_buf) + elif (len(raw_buf) == 176): + event = PebsNHM(name, comm, dso, symbol, raw_buf) + else: + event = PerfEvent(name, comm, dso, symbol, raw_buf) + + return event + +class PerfEvent(object): + event_num = 0 + def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_GENERIC): + self.name = name + self.comm = comm + self.dso = dso + self.symbol = symbol + self.raw_buf = raw_buf + self.ev_type = ev_type + PerfEvent.event_num += 1 + + def show(self): + print("PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % + (self.name, self.symbol, self.comm, self.dso)) + +# +# Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer +# contains the context info when that event happened: the EFLAGS and +# linear IP info, as well as all the registers. +# +class PebsEvent(PerfEvent): + pebs_num = 0 + def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS): + tmp_buf=raw_buf[0:80] + flags, ip, ax, bx, cx, dx, si, di, bp, sp = struct.unpack('QQQQQQQQQQ', tmp_buf) + self.flags = flags + self.ip = ip + self.ax = ax + self.bx = bx + self.cx = cx + self.dx = dx + self.si = si + self.di = di + self.bp = bp + self.sp = sp + + PerfEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type) + PebsEvent.pebs_num += 1 + del tmp_buf + +# +# Intel Nehalem and Westmere support PEBS plus Load Latency info which lie +# in the four 64 bit words write after the PEBS data: +# Status: records the IA32_PERF_GLOBAL_STATUS register value +# DLA: Data Linear Address (EIP) +# DSE: Data Source Encoding, where the latency happens, hit or miss +# in L1/L2/L3 or IO operations +# LAT: the actual latency in cycles +# +class PebsNHM(PebsEvent): + pebs_nhm_num = 0 + def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS_LL): + tmp_buf=raw_buf[144:176] + status, dla, dse, lat = struct.unpack('QQQQ', tmp_buf) + self.status = status + self.dla = dla + self.dse = dse + self.lat = lat + + PebsEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type) + PebsNHM.pebs_nhm_num += 1 + del tmp_buf diff --git a/data/linux-small/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py b/data/linux-small/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py new file mode 100644 index 0000000..7384dcb --- /dev/null +++ b/data/linux-small/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py @@ -0,0 +1,91 @@ +# Util.py - Python extension for perf script, miscellaneous utility code +# +# Copyright (C) 2010 by Tom Zanussi +# +# This software may be distributed under the terms of the GNU General +# Public License ("GPL") version 2 as published by the Free Software +# Foundation. +from __future__ import print_function + +import errno, os + +FUTEX_WAIT = 0 +FUTEX_WAKE = 1 +FUTEX_PRIVATE_FLAG = 128 +FUTEX_CLOCK_REALTIME = 256 +FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME) + +NSECS_PER_SEC = 1000000000 + +def avg(total, n): + return total / n + +def nsecs(secs, nsecs): + return secs * NSECS_PER_SEC + nsecs + +def nsecs_secs(nsecs): + return nsecs / NSECS_PER_SEC + +def nsecs_nsecs(nsecs): + return nsecs % NSECS_PER_SEC + +def nsecs_str(nsecs): + str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)), + return str + +def add_stats(dict, key, value): + if key not in dict: + dict[key] = (value, value, value, 1) + else: + min, max, avg, count = dict[key] + if value < min: + min = value + if value > max: + max = value + avg = (avg + value) / 2 + dict[key] = (min, max, avg, count + 1) + +def clear_term(): + print("\x1b[H\x1b[2J") + +audit_package_warned = False + +try: + import audit + machine_to_id = { + 'x86_64': audit.MACH_86_64, + 'alpha' : audit.MACH_ALPHA, + 'ia64' : audit.MACH_IA64, + 'ppc' : audit.MACH_PPC, + 'ppc64' : audit.MACH_PPC64, + 'ppc64le' : audit.MACH_PPC64LE, + 's390' : audit.MACH_S390, + 's390x' : audit.MACH_S390X, + 'i386' : audit.MACH_X86, + 'i586' : audit.MACH_X86, + 'i686' : audit.MACH_X86, + } + try: + machine_to_id['armeb'] = audit.MACH_ARMEB + except: + pass + machine_id = machine_to_id[os.uname()[4]] +except: + if not audit_package_warned: + audit_package_warned = True + print("Install the audit-libs-python package to get syscall names.\n" + "For example:\n # apt-get install python-audit (Ubuntu)" + "\n # yum install audit-libs-python (Fedora)" + "\n etc.\n") + +def syscall_name(id): + try: + return audit.audit_syscall_to_name(id, machine_id) + except: + return str(id) + +def strerror(nr): + try: + return errno.errorcode[abs(nr)] + except: + return "Unknown %d errno" % nr diff --git a/data/linux-small/tools/perf/scripts/python/check-perf-trace.py b/data/linux-small/tools/perf/scripts/python/check-perf-trace.py new file mode 100644 index 0000000..d2c2295 --- /dev/null +++ b/data/linux-small/tools/perf/scripts/python/check-perf-trace.py @@ -0,0 +1,84 @@ +# perf script event handlers, generated by perf script -g python +# (c) 2010, Tom Zanussi +# Licensed under the terms of the GNU GPL License version 2 +# +# This script tests basic functionality such as flag and symbol +# strings, common_xxx() calls back into perf, begin, end, unhandled +# events, etc. Basically, if this script runs successfully and +# displays expected results, Python scripting support should be ok. + +from __future__ import print_function + +import os +import sys + +sys.path.append(os.environ['PERF_EXEC_PATH'] + \ + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') + +from Core import * +from perf_trace_context import * + +unhandled = autodict() + +def trace_begin(): + print("trace_begin") + pass + +def trace_end(): + print_unhandled() + +def irq__softirq_entry(event_name, context, common_cpu, + common_secs, common_nsecs, common_pid, common_comm, + common_callchain, vec): + print_header(event_name, common_cpu, common_secs, common_nsecs, + common_pid, common_comm) + + print_uncommon(context) + + print("vec=%s" % (symbol_str("irq__softirq_entry", "vec", vec))) + +def kmem__kmalloc(event_name, context, common_cpu, + common_secs, common_nsecs, common_pid, common_comm, + common_callchain, call_site, ptr, bytes_req, bytes_alloc, + gfp_flags): + print_header(event_name, common_cpu, common_secs, common_nsecs, + common_pid, common_comm) + + print_uncommon(context) + + print("call_site=%u, ptr=%u, bytes_req=%u, " + "bytes_alloc=%u, gfp_flags=%s" % + (call_site, ptr, bytes_req, bytes_alloc, + flag_str("kmem__kmalloc", "gfp_flags", gfp_flags))) + +def trace_unhandled(event_name, context, event_fields_dict): + try: + unhandled[event_name] += 1 + except TypeError: + unhandled[event_name] = 1 + +def print_header(event_name, cpu, secs, nsecs, pid, comm): + print("%-20s %5u %05u.%09u %8u %-20s " % + (event_name, cpu, secs, nsecs, pid, comm), + end=' ') + +# print trace fields not included in handler args +def print_uncommon(context): + print("common_preempt_count=%d, common_flags=%s, " + "common_lock_depth=%d, " % + (common_pc(context), trace_flag_str(common_flags(context)), + common_lock_depth(context))) + +def print_unhandled(): + keys = unhandled.keys() + if not keys: + return + + print("\nunhandled events:\n") + + print("%-40s %10s" % ("event", "count")) + print("%-40s %10s" % ("----------------------------------------", + "-----------")) + + for event_name in keys: + print("%-40s %10d\n" % (event_name, unhandled[event_name])) diff --git a/data/linux-small/tools/perf/scripts/python/compaction-times.py b/data/linux-small/tools/perf/scripts/python/compaction-times.py new file mode 100644 index 0000000..2560a04 --- /dev/null +++ b/data/linux-small/tools/perf/scripts/python/compaction-times.py @@ -0,0 +1,311 @@ +# report time spent in compaction +# Licensed under the terms of the GNU GPL License version 2 + +# testing: +# 'echo 1 > /proc/sys/vm/compact_memory' to force compaction of all zones + +import os +import sys +import re + +import signal +signal.signal(signal.SIGPIPE, signal.SIG_DFL) + +usage = "usage: perf script report compaction-times.py -- [-h] [-u] [-p|-pv] [-t | [-m] [-fs] [-ms]] [pid|pid-range|comm-regex]\n" + +class popt: + DISP_DFL = 0 + DISP_PROC = 1 + DISP_PROC_VERBOSE=2 + +class topt: + DISP_TIME = 0 + DISP_MIG = 1 + DISP_ISOLFREE = 2 + DISP_ISOLMIG = 4 + DISP_ALL = 7 + +class comm_filter: + def __init__(self, re): + self.re = re + + def filter(self, pid, comm): + m = self.re.search(comm) + return m == None or m.group() == "" + +class pid_filter: + def __init__(self, low, high): + self.low = (0 if low == "" else int(low)) + self.high = (0 if high == "" else int(high)) + + def filter(self, pid, comm): + return not (pid >= self.low and (self.high == 0 or pid <= self.high)) + +def set_type(t): + global opt_disp + opt_disp = (t if opt_disp == topt.DISP_ALL else opt_disp|t) + +def ns(sec, nsec): + return (sec * 1000000000) + nsec + +def time(ns): + return "%dns" % ns if opt_ns else "%dus" % (round(ns, -3) / 1000) + +class pair: + def __init__(self, aval, bval, alabel = None, blabel = None): + self.alabel = alabel + self.blabel = blabel + self.aval = aval + self.bval = bval + + def __add__(self, rhs): + self.aval += rhs.aval + self.bval += rhs.bval + return self + + def __str__(self): + return "%s=%d %s=%d" % (self.alabel, self.aval, self.blabel, self.bval) + +class cnode: + def __init__(self, ns): + self.ns = ns + self.migrated = pair(0, 0, "moved", "failed") + self.fscan = pair(0,0, "scanned", "isolated") + self.mscan = pair(0,0, "scanned", "isolated") + + def __add__(self, rhs): + self.ns += rhs.ns + self.migrated += rhs.migrated + self.fscan += rhs.fscan + self.mscan += rhs.mscan + return self + + def __str__(self): + prev = 0 + s = "%s " % time(self.ns) + if (opt_disp & topt.DISP_MIG): + s += "migration: %s" % self.migrated + prev = 1 + if (opt_disp & topt.DISP_ISOLFREE): + s += "%sfree_scanner: %s" % (" " if prev else "", self.fscan) + prev = 1 + if (opt_disp & topt.DISP_ISOLMIG): + s += "%smigration_scanner: %s" % (" " if prev else "", self.mscan) + return s + + def complete(self, secs, nsecs): + self.ns = ns(secs, nsecs) - self.ns + + def increment(self, migrated, fscan, mscan): + if (migrated != None): + self.migrated += migrated + if (fscan != None): + self.fscan += fscan + if (mscan != None): + self.mscan += mscan + + +class chead: + heads = {} + val = cnode(0); + fobj = None + + @classmethod + def add_filter(cls, filter): + cls.fobj = filter + + @classmethod + def create_pending(cls, pid, comm, start_secs, start_nsecs): + filtered = 0 + try: + head = cls.heads[pid] + filtered = head.is_filtered() + except KeyError: + if cls.fobj != None: + filtered = cls.fobj.filter(pid, comm) + head = cls.heads[pid] = chead(comm, pid, filtered) + + if not filtered: + head.mark_pending(start_secs, start_nsecs) + + @classmethod + def increment_pending(cls, pid, migrated, fscan, mscan): + head = cls.heads[pid] + if not head.is_filtered(): + if head.is_pending(): + head.do_increment(migrated, fscan, mscan) + else: + sys.stderr.write("missing start compaction event for pid %d\n" % pid) + + @classmethod + def complete_pending(cls, pid, secs, nsecs): + head = cls.heads[pid] + if not head.is_filtered(): + if head.is_pending(): + head.make_complete(secs, nsecs) + else: + sys.stderr.write("missing start compaction event for pid %d\n" % pid) + + @classmethod + def gen(cls): + if opt_proc != popt.DISP_DFL: + for i in cls.heads: + yield cls.heads[i] + + @classmethod + def str(cls): + return cls.val + + def __init__(self, comm, pid, filtered): + self.comm = comm + self.pid = pid + self.val = cnode(0) + self.pending = None + self.filtered = filtered + self.list = [] + + def __add__(self, rhs): + self.ns += rhs.ns + self.val += rhs.val + return self + + def mark_pending(self, secs, nsecs): + self.pending = cnode(ns(secs, nsecs)) + + def do_increment(self, migrated, fscan, mscan): + self.pending.increment(migrated, fscan, mscan) + + def make_complete(self, secs, nsecs): + self.pending.complete(secs, nsecs) + chead.val += self.pending + + if opt_proc != popt.DISP_DFL: + self.val += self.pending + + if opt_proc == popt.DISP_PROC_VERBOSE: + self.list.append(self.pending) + self.pending = None + + def enumerate(self): + if opt_proc == popt.DISP_PROC_VERBOSE and not self.is_filtered(): + for i, pelem in enumerate(self.list): + sys.stdout.write("%d[%s].%d: %s\n" % (self.pid, self.comm, i+1, pelem)) + + def is_pending(self): + return self.pending != None + + def is_filtered(self): + return self.filtered + + def display(self): + if not self.is_filtered(): + sys.stdout.write("%d[%s]: %s\n" % (self.pid, self.comm, self.val)) + + +def trace_end(): + sys.stdout.write("total: %s\n" % chead.str()) + for i in chead.gen(): + i.display(), + i.enumerate() + +def compaction__mm_compaction_migratepages(event_name, context, common_cpu, + common_secs, common_nsecs, common_pid, common_comm, + common_callchain, nr_migrated, nr_failed): + + chead.increment_pending(common_pid, + pair(nr_migrated, nr_failed), None, None) + +def compaction__mm_compaction_isolate_freepages(event_name, context, common_cpu, + common_secs, common_nsecs, common_pid, common_comm, + common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken): + + chead.increment_pending(common_pid, + None, pair(nr_scanned, nr_taken), None) + +def compaction__mm_compaction_isolate_migratepages(event_name, context, common_cpu, + common_secs, common_nsecs, common_pid, common_comm, + common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken): + + chead.increment_pending(common_pid, + None, None, pair(nr_scanned, nr_taken)) + +def compaction__mm_compaction_end(event_name, context, common_cpu, + common_secs, common_nsecs, common_pid, common_comm, + common_callchain, zone_start, migrate_start, free_start, zone_end, + sync, status): + + chead.complete_pending(common_pid, common_secs, common_nsecs) + +def compaction__mm_compaction_begin(event_name, context, common_cpu, + common_secs, common_nsecs, common_pid, common_comm, + common_callchain, zone_start, migrate_start, free_start, zone_end, + sync): + + chead.create_pending(common_pid, common_comm, common_secs, common_nsecs) + +def pr_help(): + global usage + + sys.stdout.write(usage) + sys.stdout.write("\n") + sys.stdout.write("-h display this help\n") + sys.stdout.write("-p display by process\n") + sys.stdout.write("-pv display by process (verbose)\n") + sys.stdout.write("-t display stall times only\n") + sys.stdout.write("-m display stats for migration\n") + sys.stdout.write("-fs display stats for free scanner\n") + sys.stdout.write("-ms display stats for migration scanner\n") + sys.stdout.write("-u display results in microseconds (default nanoseconds)\n") + + +comm_re = None +pid_re = None +pid_regex = "^(\d*)-(\d*)$|^(\d*)$" + +opt_proc = popt.DISP_DFL +opt_disp = topt.DISP_ALL + +opt_ns = True + +argc = len(sys.argv) - 1 +if argc >= 1: + pid_re = re.compile(pid_regex) + + for i, opt in enumerate(sys.argv[1:]): + if opt[0] == "-": + if opt == "-h": + pr_help() + exit(0); + elif opt == "-p": + opt_proc = popt.DISP_PROC + elif opt == "-pv": + opt_proc = popt.DISP_PROC_VERBOSE + elif opt == '-u': + opt_ns = False + elif opt == "-t": + set_type(topt.DISP_TIME) + elif opt == "-m": + set_type(topt.DISP_MIG) + elif opt == "-fs": + set_type(topt.DISP_ISOLFREE) + elif opt == "-ms": + set_type(topt.DISP_ISOLMIG) + else: + sys.exit(usage) + + elif i == argc - 1: + m = pid_re.search(opt) + if m != None and m.group() != "": + if m.group(3) != None: + f = pid_filter(m.group(3), m.group(3)) + else: + f = pid_filter(m.group(1), m.group(2)) + else: + try: + comm_re=re.compile(opt) + except: + sys.stderr.write("invalid regex '%s'" % opt) + sys.exit(usage) + f = comm_filter(comm_re) + + chead.add_filter(f) diff --git a/data/linux-small/tools/perf/scripts/python/event_analyzing_sample.py b/data/linux-small/tools/perf/scripts/python/event_analyzing_sample.py new file mode 100644 index 0000000..aa1e2cf --- /dev/null +++ b/data/linux-small/tools/perf/scripts/python/event_analyzing_sample.py @@ -0,0 +1,192 @@ +# event_analyzing_sample.py: general event handler in python +# SPDX-License-Identifier: GPL-2.0 +# +# Current perf report is already very powerful with the annotation integrated, +# and this script is not trying to be as powerful as perf report, but +# providing end user/developer a flexible way to analyze the events other +# than trace points. +# +# The 2 database related functions in this script just show how to gather +# the basic information, and users can modify and write their own functions +# according to their specific requirement. +# +# The first function "show_general_events" just does a basic grouping for all +# generic events with the help of sqlite, and the 2nd one "show_pebs_ll" is +# for a x86 HW PMU event: PEBS with load latency data. +# + +from __future__ import print_function + +import os +import sys +import math +import struct +import sqlite3 + +sys.path.append(os.environ['PERF_EXEC_PATH'] + \ + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') + +from perf_trace_context import * +from EventClass import * + +# +# If the perf.data has a big number of samples, then the insert operation +# will be very time consuming (about 10+ minutes for 10000 samples) if the +# .db database is on disk. Move the .db file to RAM based FS to speedup +# the handling, which will cut the time down to several seconds. +# +con = sqlite3.connect("/dev/shm/perf.db") +con.isolation_level = None + +def trace_begin(): + print("In trace_begin:\n") + + # + # Will create several tables at the start, pebs_ll is for PEBS data with + # load latency info, while gen_events is for general event. + # + con.execute(""" + create table if not exists gen_events ( + name text, + symbol text, + comm text, + dso text + );""") + con.execute(""" + create table if not exists pebs_ll ( + name text, + symbol text, + comm text, + dso text, + flags integer, + ip integer, + status integer, + dse integer, + dla integer, + lat integer + );""") + +# +# Create and insert event object to a database so that user could +# do more analysis with simple database commands. +# +def process_event(param_dict): + event_attr = param_dict["attr"] + sample = param_dict["sample"] + raw_buf = param_dict["raw_buf"] + comm = param_dict["comm"] + name = param_dict["ev_name"] + + # Symbol and dso info are not always resolved + if ("dso" in param_dict): + dso = param_dict["dso"] + else: + dso = "Unknown_dso" + + if ("symbol" in param_dict): + symbol = param_dict["symbol"] + else: + symbol = "Unknown_symbol" + + # Create the event object and insert it to the right table in database + event = create_event(name, comm, dso, symbol, raw_buf) + insert_db(event) + +def insert_db(event): + if event.ev_type == EVTYPE_GENERIC: + con.execute("insert into gen_events values(?, ?, ?, ?)", + (event.name, event.symbol, event.comm, event.dso)) + elif event.ev_type == EVTYPE_PEBS_LL: + event.ip &= 0x7fffffffffffffff + event.dla &= 0x7fffffffffffffff + con.execute("insert into pebs_ll values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", + (event.name, event.symbol, event.comm, event.dso, event.flags, + event.ip, event.status, event.dse, event.dla, event.lat)) + +def trace_end(): + print("In trace_end:\n") + # We show the basic info for the 2 type of event classes + show_general_events() + show_pebs_ll() + con.close() + +# +# As the event number may be very big, so we can't use linear way +# to show the histogram in real number, but use a log2 algorithm. +# + +def num2sym(num): + # Each number will have at least one '#' + snum = '#' * (int)(math.log(num, 2) + 1) + return snum + +def show_general_events(): + + # Check the total record number in the table + count = con.execute("select count(*) from gen_events") + for t in count: + print("There is %d records in gen_events table" % t[0]) + if t[0] == 0: + return + + print("Statistics about the general events grouped by thread/symbol/dso: \n") + + # Group by thread + commq = con.execute("select comm, count(comm) from gen_events group by comm order by -count(comm)") + print("\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)) + for row in commq: + print("%16s %8d %s" % (row[0], row[1], num2sym(row[1]))) + + # Group by symbol + print("\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)) + symbolq = con.execute("select symbol, count(symbol) from gen_events group by symbol order by -count(symbol)") + for row in symbolq: + print("%32s %8d %s" % (row[0], row[1], num2sym(row[1]))) + + # Group by dso + print("\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74)) + dsoq = con.execute("select dso, count(dso) from gen_events group by dso order by -count(dso)") + for row in dsoq: + print("%40s %8d %s" % (row[0], row[1], num2sym(row[1]))) + +# +# This function just shows the basic info, and we could do more with the +# data in the tables, like checking the function parameters when some +# big latency events happen. +# +def show_pebs_ll(): + + count = con.execute("select count(*) from pebs_ll") + for t in count: + print("There is %d records in pebs_ll table" % t[0]) + if t[0] == 0: + return + + print("Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n") + + # Group by thread + commq = con.execute("select comm, count(comm) from pebs_ll group by comm order by -count(comm)") + print("\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)) + for row in commq: + print("%16s %8d %s" % (row[0], row[1], num2sym(row[1]))) + + # Group by symbol + print("\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)) + symbolq = con.execute("select symbol, count(symbol) from pebs_ll group by symbol order by -count(symbol)") + for row in symbolq: + print("%32s %8d %s" % (row[0], row[1], num2sym(row[1]))) + + # Group by dse + dseq = con.execute("select dse, count(dse) from pebs_ll group by dse order by -count(dse)") + print("\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58)) + for row in dseq: + print("%32s %8d %s" % (row[0], row[1], num2sym(row[1]))) + + # Group by latency + latq = con.execute("select lat, count(lat) from pebs_ll group by lat order by lat") + print("\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58)) + for row in latq: + print("%32s %8d %s" % (row[0], row[1], num2sym(row[1]))) + +def trace_unhandled(event_name, context, event_fields_dict): + print (' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])) diff --git a/data/linux-small/tools/perf/scripts/python/export-to-postgresql.py b/data/linux-small/tools/perf/scripts/python/export-to-postgresql.py new file mode 100644 index 0000000..d187e46 --- /dev/null +++ b/data/linux-small/tools/perf/scripts/python/export-to-postgresql.py @@ -0,0 +1,1111 @@ +# export-to-postgresql.py: export perf data to a postgresql database +# Copyright (c) 2014, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. + +from __future__ import print_function + +import os +import sys +import struct +import datetime + +# To use this script you will need to have installed package python-pyside which +# provides LGPL-licensed Python bindings for Qt. You will also need the package +# libqt4-sql-psql for Qt postgresql support. +# +# The script assumes postgresql is running on the local machine and that the +# user has postgresql permissions to create databases. Examples of installing +# postgresql and adding such a user are: +# +# fedora: +# +# $ sudo yum install postgresql postgresql-server qt-postgresql +# $ sudo su - postgres -c initdb +# $ sudo service postgresql start +# $ sudo su - postgres +# $ createuser -s # Older versions may not support -s, in which case answer the prompt below: +# Shall the new role be a superuser? (y/n) y +# $ sudo yum install python-pyside +# +# Alternately, to use Python3 and/or pyside 2, one of the following: +# $ sudo yum install python3-pyside +# $ pip install --user PySide2 +# $ pip3 install --user PySide2 +# +# ubuntu: +# +# $ sudo apt-get install postgresql +# $ sudo su - postgres +# $ createuser -s +# $ sudo apt-get install python-pyside.qtsql libqt4-sql-psql +# +# Alternately, to use Python3 and/or pyside 2, one of the following: +# +# $ sudo apt-get install python3-pyside.qtsql libqt4-sql-psql +# $ sudo apt-get install python-pyside2.qtsql libqt5sql5-psql +# $ sudo apt-get install python3-pyside2.qtsql libqt5sql5-psql +# +# An example of using this script with Intel PT: +# +# $ perf record -e intel_pt//u ls +# $ perf script -s ~/libexec/perf-core/scripts/python/export-to-postgresql.py pt_example branches calls +# 2015-05-29 12:49:23.464364 Creating database... +# 2015-05-29 12:49:26.281717 Writing to intermediate files... +# 2015-05-29 12:49:27.190383 Copying to database... +# 2015-05-29 12:49:28.140451 Removing intermediate files... +# 2015-05-29 12:49:28.147451 Adding primary keys +# 2015-05-29 12:49:28.655683 Adding foreign keys +# 2015-05-29 12:49:29.365350 Done +# +# To browse the database, psql can be used e.g. +# +# $ psql pt_example +# pt_example=# select * from samples_view where id < 100; +# pt_example=# \d+ +# pt_example=# \d+ samples_view +# pt_example=# \q +# +# An example of using the database is provided by the script +# exported-sql-viewer.py. Refer to that script for details. +# +# Tables: +# +# The tables largely correspond to perf tools' data structures. They are largely self-explanatory. +# +# samples +# +# 'samples' is the main table. It represents what instruction was executing at a point in time +# when something (a selected event) happened. The memory address is the instruction pointer or 'ip'. +# +# calls +# +# 'calls' represents function calls and is related to 'samples' by 'call_id' and 'return_id'. +# 'calls' is only created when the 'calls' option to this script is specified. +# +# call_paths +# +# 'call_paths' represents all the call stacks. Each 'call' has an associated record in 'call_paths'. +# 'calls_paths' is only created when the 'calls' option to this script is specified. +# +# branch_types +# +# 'branch_types' provides descriptions for each type of branch. +# +# comm_threads +# +# 'comm_threads' shows how 'comms' relates to 'threads'. +# +# comms +# +# 'comms' contains a record for each 'comm' - the name given to the executable that is running. +# +# dsos +# +# 'dsos' contains a record for each executable file or library. +# +# machines +# +# 'machines' can be used to distinguish virtual machines if virtualization is supported. +# +# selected_events +# +# 'selected_events' contains a record for each kind of event that has been sampled. +# +# symbols +# +# 'symbols' contains a record for each symbol. Only symbols that have samples are present. +# +# threads +# +# 'threads' contains a record for each thread. +# +# Views: +# +# Most of the tables have views for more friendly display. The views are: +# +# calls_view +# call_paths_view +# comm_threads_view +# dsos_view +# machines_view +# samples_view +# symbols_view +# threads_view +# +# More examples of browsing the database with psql: +# Note that some of the examples are not the most optimal SQL query. +# Note that call information is only available if the script's 'calls' option has been used. +# +# Top 10 function calls (not aggregated by symbol): +# +# SELECT * FROM calls_view ORDER BY elapsed_time DESC LIMIT 10; +# +# Top 10 function calls (aggregated by symbol): +# +# SELECT symbol_id,(SELECT name FROM symbols WHERE id = symbol_id) AS symbol, +# SUM(elapsed_time) AS tot_elapsed_time,SUM(branch_count) AS tot_branch_count +# FROM calls_view GROUP BY symbol_id ORDER BY tot_elapsed_time DESC LIMIT 10; +# +# Note that the branch count gives a rough estimation of cpu usage, so functions +# that took a long time but have a relatively low branch count must have spent time +# waiting. +# +# Find symbols by pattern matching on part of the name (e.g. names containing 'alloc'): +# +# SELECT * FROM symbols_view WHERE name LIKE '%alloc%'; +# +# Top 10 function calls for a specific symbol (e.g. whose symbol_id is 187): +# +# SELECT * FROM calls_view WHERE symbol_id = 187 ORDER BY elapsed_time DESC LIMIT 10; +# +# Show function calls made by function in the same context (i.e. same call path) (e.g. one with call_path_id 254): +# +# SELECT * FROM calls_view WHERE parent_call_path_id = 254; +# +# Show branches made during a function call (e.g. where call_id is 29357 and return_id is 29370 and tid is 29670) +# +# SELECT * FROM samples_view WHERE id >= 29357 AND id <= 29370 AND tid = 29670 AND event LIKE 'branches%'; +# +# Show transactions: +# +# SELECT * FROM samples_view WHERE event = 'transactions'; +# +# Note transaction start has 'in_tx' true whereas, transaction end has 'in_tx' false. +# Transaction aborts have branch_type_name 'transaction abort' +# +# Show transaction aborts: +# +# SELECT * FROM samples_view WHERE event = 'transactions' AND branch_type_name = 'transaction abort'; +# +# To print a call stack requires walking the call_paths table. For example this python script: +# #!/usr/bin/python2 +# +# import sys +# from PySide.QtSql import * +# +# if __name__ == '__main__': +# if (len(sys.argv) < 3): +# print >> sys.stderr, "Usage is: printcallstack.py " +# raise Exception("Too few arguments") +# dbname = sys.argv[1] +# call_path_id = sys.argv[2] +# db = QSqlDatabase.addDatabase('QPSQL') +# db.setDatabaseName(dbname) +# if not db.open(): +# raise Exception("Failed to open database " + dbname + " error: " + db.lastError().text()) +# query = QSqlQuery(db) +# print " id ip symbol_id symbol dso_id dso_short_name" +# while call_path_id != 0 and call_path_id != 1: +# ret = query.exec_('SELECT * FROM call_paths_view WHERE id = ' + str(call_path_id)) +# if not ret: +# raise Exception("Query failed: " + query.lastError().text()) +# if not query.next(): +# raise Exception("Query failed") +# print "{0:>6} {1:>10} {2:>9} {3:<30} {4:>6} {5:<30}".format(query.value(0), query.value(1), query.value(2), query.value(3), query.value(4), query.value(5)) +# call_path_id = query.value(6) + +pyside_version_1 = True +if not "pyside-version-1" in sys.argv: + try: + from PySide2.QtSql import * + pyside_version_1 = False + except: + pass + +if pyside_version_1: + from PySide.QtSql import * + +if sys.version_info < (3, 0): + def toserverstr(str): + return str + def toclientstr(str): + return str +else: + # Assume UTF-8 server_encoding and client_encoding + def toserverstr(str): + return bytes(str, "UTF_8") + def toclientstr(str): + return bytes(str, "UTF_8") + +# Need to access PostgreSQL C library directly to use COPY FROM STDIN +from ctypes import * +libpq = CDLL("libpq.so.5") +PQconnectdb = libpq.PQconnectdb +PQconnectdb.restype = c_void_p +PQconnectdb.argtypes = [ c_char_p ] +PQfinish = libpq.PQfinish +PQfinish.argtypes = [ c_void_p ] +PQstatus = libpq.PQstatus +PQstatus.restype = c_int +PQstatus.argtypes = [ c_void_p ] +PQexec = libpq.PQexec +PQexec.restype = c_void_p +PQexec.argtypes = [ c_void_p, c_char_p ] +PQresultStatus = libpq.PQresultStatus +PQresultStatus.restype = c_int +PQresultStatus.argtypes = [ c_void_p ] +PQputCopyData = libpq.PQputCopyData +PQputCopyData.restype = c_int +PQputCopyData.argtypes = [ c_void_p, c_void_p, c_int ] +PQputCopyEnd = libpq.PQputCopyEnd +PQputCopyEnd.restype = c_int +PQputCopyEnd.argtypes = [ c_void_p, c_void_p ] + +sys.path.append(os.environ['PERF_EXEC_PATH'] + \ + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') + +# These perf imports are not used at present +#from perf_trace_context import * +#from Core import * + +perf_db_export_mode = True +perf_db_export_calls = False +perf_db_export_callchains = False + +def printerr(*args, **kw_args): + print(*args, file=sys.stderr, **kw_args) + +def printdate(*args, **kw_args): + print(datetime.datetime.today(), *args, sep=' ', **kw_args) + +def usage(): + printerr("Usage is: export-to-postgresql.py [] [] [] []"); + printerr("where: columns 'all' or 'branches'"); + printerr(" calls 'calls' => create calls and call_paths table"); + printerr(" callchains 'callchains' => create call_paths table"); + printerr(" pyside-version-1 'pyside-version-1' => use pyside version 1"); + raise Exception("Too few or bad arguments") + +if (len(sys.argv) < 2): + usage() + +dbname = sys.argv[1] + +if (len(sys.argv) >= 3): + columns = sys.argv[2] +else: + columns = "all" + +if columns not in ("all", "branches"): + usage() + +branches = (columns == "branches") + +for i in range(3,len(sys.argv)): + if (sys.argv[i] == "calls"): + perf_db_export_calls = True + elif (sys.argv[i] == "callchains"): + perf_db_export_callchains = True + elif (sys.argv[i] == "pyside-version-1"): + pass + else: + usage() + +output_dir_name = os.getcwd() + "/" + dbname + "-perf-data" +os.mkdir(output_dir_name) + +def do_query(q, s): + if (q.exec_(s)): + return + raise Exception("Query failed: " + q.lastError().text()) + +printdate("Creating database...") + +db = QSqlDatabase.addDatabase('QPSQL') +query = QSqlQuery(db) +db.setDatabaseName('postgres') +db.open() +try: + do_query(query, 'CREATE DATABASE ' + dbname) +except: + os.rmdir(output_dir_name) + raise +query.finish() +query.clear() +db.close() + +db.setDatabaseName(dbname) +db.open() + +query = QSqlQuery(db) +do_query(query, 'SET client_min_messages TO WARNING') + +do_query(query, 'CREATE TABLE selected_events (' + 'id bigint NOT NULL,' + 'name varchar(80))') +do_query(query, 'CREATE TABLE machines (' + 'id bigint NOT NULL,' + 'pid integer,' + 'root_dir varchar(4096))') +do_query(query, 'CREATE TABLE threads (' + 'id bigint NOT NULL,' + 'machine_id bigint,' + 'process_id bigint,' + 'pid integer,' + 'tid integer)') +do_query(query, 'CREATE TABLE comms (' + 'id bigint NOT NULL,' + 'comm varchar(16),' + 'c_thread_id bigint,' + 'c_time bigint,' + 'exec_flag boolean)') +do_query(query, 'CREATE TABLE comm_threads (' + 'id bigint NOT NULL,' + 'comm_id bigint,' + 'thread_id bigint)') +do_query(query, 'CREATE TABLE dsos (' + 'id bigint NOT NULL,' + 'machine_id bigint,' + 'short_name varchar(256),' + 'long_name varchar(4096),' + 'build_id varchar(64))') +do_query(query, 'CREATE TABLE symbols (' + 'id bigint NOT NULL,' + 'dso_id bigint,' + 'sym_start bigint,' + 'sym_end bigint,' + 'binding integer,' + 'name varchar(2048))') +do_query(query, 'CREATE TABLE branch_types (' + 'id integer NOT NULL,' + 'name varchar(80))') + +if branches: + do_query(query, 'CREATE TABLE samples (' + 'id bigint NOT NULL,' + 'evsel_id bigint,' + 'machine_id bigint,' + 'thread_id bigint,' + 'comm_id bigint,' + 'dso_id bigint,' + 'symbol_id bigint,' + 'sym_offset bigint,' + 'ip bigint,' + 'time bigint,' + 'cpu integer,' + 'to_dso_id bigint,' + 'to_symbol_id bigint,' + 'to_sym_offset bigint,' + 'to_ip bigint,' + 'branch_type integer,' + 'in_tx boolean,' + 'call_path_id bigint,' + 'insn_count bigint,' + 'cyc_count bigint)') +else: + do_query(query, 'CREATE TABLE samples (' + 'id bigint NOT NULL,' + 'evsel_id bigint,' + 'machine_id bigint,' + 'thread_id bigint,' + 'comm_id bigint,' + 'dso_id bigint,' + 'symbol_id bigint,' + 'sym_offset bigint,' + 'ip bigint,' + 'time bigint,' + 'cpu integer,' + 'to_dso_id bigint,' + 'to_symbol_id bigint,' + 'to_sym_offset bigint,' + 'to_ip bigint,' + 'period bigint,' + 'weight bigint,' + 'transaction bigint,' + 'data_src bigint,' + 'branch_type integer,' + 'in_tx boolean,' + 'call_path_id bigint,' + 'insn_count bigint,' + 'cyc_count bigint)') + +if perf_db_export_calls or perf_db_export_callchains: + do_query(query, 'CREATE TABLE call_paths (' + 'id bigint NOT NULL,' + 'parent_id bigint,' + 'symbol_id bigint,' + 'ip bigint)') +if perf_db_export_calls: + do_query(query, 'CREATE TABLE calls (' + 'id bigint NOT NULL,' + 'thread_id bigint,' + 'comm_id bigint,' + 'call_path_id bigint,' + 'call_time bigint,' + 'return_time bigint,' + 'branch_count bigint,' + 'call_id bigint,' + 'return_id bigint,' + 'parent_call_path_id bigint,' + 'flags integer,' + 'parent_id bigint,' + 'insn_count bigint,' + 'cyc_count bigint)') + +do_query(query, 'CREATE TABLE ptwrite (' + 'id bigint NOT NULL,' + 'payload bigint,' + 'exact_ip boolean)') + +do_query(query, 'CREATE TABLE cbr (' + 'id bigint NOT NULL,' + 'cbr integer,' + 'mhz integer,' + 'percent integer)') + +do_query(query, 'CREATE TABLE mwait (' + 'id bigint NOT NULL,' + 'hints integer,' + 'extensions integer)') + +do_query(query, 'CREATE TABLE pwre (' + 'id bigint NOT NULL,' + 'cstate integer,' + 'subcstate integer,' + 'hw boolean)') + +do_query(query, 'CREATE TABLE exstop (' + 'id bigint NOT NULL,' + 'exact_ip boolean)') + +do_query(query, 'CREATE TABLE pwrx (' + 'id bigint NOT NULL,' + 'deepest_cstate integer,' + 'last_cstate integer,' + 'wake_reason integer)') + +do_query(query, 'CREATE TABLE context_switches (' + 'id bigint NOT NULL,' + 'machine_id bigint,' + 'time bigint,' + 'cpu integer,' + 'thread_out_id bigint,' + 'comm_out_id bigint,' + 'thread_in_id bigint,' + 'comm_in_id bigint,' + 'flags integer)') + +do_query(query, 'CREATE VIEW machines_view AS ' + 'SELECT ' + 'id,' + 'pid,' + 'root_dir,' + 'CASE WHEN id=0 THEN \'unknown\' WHEN pid=-1 THEN \'host\' ELSE \'guest\' END AS host_or_guest' + ' FROM machines') + +do_query(query, 'CREATE VIEW dsos_view AS ' + 'SELECT ' + 'id,' + 'machine_id,' + '(SELECT host_or_guest FROM machines_view WHERE id = machine_id) AS host_or_guest,' + 'short_name,' + 'long_name,' + 'build_id' + ' FROM dsos') + +do_query(query, 'CREATE VIEW symbols_view AS ' + 'SELECT ' + 'id,' + 'name,' + '(SELECT short_name FROM dsos WHERE id=dso_id) AS dso,' + 'dso_id,' + 'sym_start,' + 'sym_end,' + 'CASE WHEN binding=0 THEN \'local\' WHEN binding=1 THEN \'global\' ELSE \'weak\' END AS binding' + ' FROM symbols') + +do_query(query, 'CREATE VIEW threads_view AS ' + 'SELECT ' + 'id,' + 'machine_id,' + '(SELECT host_or_guest FROM machines_view WHERE id = machine_id) AS host_or_guest,' + 'process_id,' + 'pid,' + 'tid' + ' FROM threads') + +do_query(query, 'CREATE VIEW comm_threads_view AS ' + 'SELECT ' + 'comm_id,' + '(SELECT comm FROM comms WHERE id = comm_id) AS command,' + 'thread_id,' + '(SELECT pid FROM threads WHERE id = thread_id) AS pid,' + '(SELECT tid FROM threads WHERE id = thread_id) AS tid' + ' FROM comm_threads') + +if perf_db_export_calls or perf_db_export_callchains: + do_query(query, 'CREATE VIEW call_paths_view AS ' + 'SELECT ' + 'c.id,' + 'to_hex(c.ip) AS ip,' + 'c.symbol_id,' + '(SELECT name FROM symbols WHERE id = c.symbol_id) AS symbol,' + '(SELECT dso_id FROM symbols WHERE id = c.symbol_id) AS dso_id,' + '(SELECT dso FROM symbols_view WHERE id = c.symbol_id) AS dso_short_name,' + 'c.parent_id,' + 'to_hex(p.ip) AS parent_ip,' + 'p.symbol_id AS parent_symbol_id,' + '(SELECT name FROM symbols WHERE id = p.symbol_id) AS parent_symbol,' + '(SELECT dso_id FROM symbols WHERE id = p.symbol_id) AS parent_dso_id,' + '(SELECT dso FROM symbols_view WHERE id = p.symbol_id) AS parent_dso_short_name' + ' FROM call_paths c INNER JOIN call_paths p ON p.id = c.parent_id') +if perf_db_export_calls: + do_query(query, 'CREATE VIEW calls_view AS ' + 'SELECT ' + 'calls.id,' + 'thread_id,' + '(SELECT pid FROM threads WHERE id = thread_id) AS pid,' + '(SELECT tid FROM threads WHERE id = thread_id) AS tid,' + '(SELECT comm FROM comms WHERE id = comm_id) AS command,' + 'call_path_id,' + 'to_hex(ip) AS ip,' + 'symbol_id,' + '(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,' + 'call_time,' + 'return_time,' + 'return_time - call_time AS elapsed_time,' + 'branch_count,' + 'insn_count,' + 'cyc_count,' + 'CASE WHEN cyc_count=0 THEN CAST(0 AS NUMERIC(20, 2)) ELSE CAST((CAST(insn_count AS FLOAT) / cyc_count) AS NUMERIC(20, 2)) END AS IPC,' + 'call_id,' + 'return_id,' + 'CASE WHEN flags=0 THEN \'\' WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' WHEN flags=6 THEN \'jump\' ELSE CAST ( flags AS VARCHAR(6) ) END AS flags,' + 'parent_call_path_id,' + 'calls.parent_id' + ' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id') + +do_query(query, 'CREATE VIEW samples_view AS ' + 'SELECT ' + 'id,' + 'time,' + 'cpu,' + '(SELECT pid FROM threads WHERE id = thread_id) AS pid,' + '(SELECT tid FROM threads WHERE id = thread_id) AS tid,' + '(SELECT comm FROM comms WHERE id = comm_id) AS command,' + '(SELECT name FROM selected_events WHERE id = evsel_id) AS event,' + 'to_hex(ip) AS ip_hex,' + '(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,' + 'sym_offset,' + '(SELECT short_name FROM dsos WHERE id = dso_id) AS dso_short_name,' + 'to_hex(to_ip) AS to_ip_hex,' + '(SELECT name FROM symbols WHERE id = to_symbol_id) AS to_symbol,' + 'to_sym_offset,' + '(SELECT short_name FROM dsos WHERE id = to_dso_id) AS to_dso_short_name,' + '(SELECT name FROM branch_types WHERE id = branch_type) AS branch_type_name,' + 'in_tx,' + 'insn_count,' + 'cyc_count,' + 'CASE WHEN cyc_count=0 THEN CAST(0 AS NUMERIC(20, 2)) ELSE CAST((CAST(insn_count AS FLOAT) / cyc_count) AS NUMERIC(20, 2)) END AS IPC' + ' FROM samples') + +do_query(query, 'CREATE VIEW ptwrite_view AS ' + 'SELECT ' + 'ptwrite.id,' + 'time,' + 'cpu,' + 'to_hex(payload) AS payload_hex,' + 'CASE WHEN exact_ip=FALSE THEN \'False\' ELSE \'True\' END AS exact_ip' + ' FROM ptwrite' + ' INNER JOIN samples ON samples.id = ptwrite.id') + +do_query(query, 'CREATE VIEW cbr_view AS ' + 'SELECT ' + 'cbr.id,' + 'time,' + 'cpu,' + 'cbr,' + 'mhz,' + 'percent' + ' FROM cbr' + ' INNER JOIN samples ON samples.id = cbr.id') + +do_query(query, 'CREATE VIEW mwait_view AS ' + 'SELECT ' + 'mwait.id,' + 'time,' + 'cpu,' + 'to_hex(hints) AS hints_hex,' + 'to_hex(extensions) AS extensions_hex' + ' FROM mwait' + ' INNER JOIN samples ON samples.id = mwait.id') + +do_query(query, 'CREATE VIEW pwre_view AS ' + 'SELECT ' + 'pwre.id,' + 'time,' + 'cpu,' + 'cstate,' + 'subcstate,' + 'CASE WHEN hw=FALSE THEN \'False\' ELSE \'True\' END AS hw' + ' FROM pwre' + ' INNER JOIN samples ON samples.id = pwre.id') + +do_query(query, 'CREATE VIEW exstop_view AS ' + 'SELECT ' + 'exstop.id,' + 'time,' + 'cpu,' + 'CASE WHEN exact_ip=FALSE THEN \'False\' ELSE \'True\' END AS exact_ip' + ' FROM exstop' + ' INNER JOIN samples ON samples.id = exstop.id') + +do_query(query, 'CREATE VIEW pwrx_view AS ' + 'SELECT ' + 'pwrx.id,' + 'time,' + 'cpu,' + 'deepest_cstate,' + 'last_cstate,' + 'CASE WHEN wake_reason=1 THEN \'Interrupt\'' + ' WHEN wake_reason=2 THEN \'Timer Deadline\'' + ' WHEN wake_reason=4 THEN \'Monitored Address\'' + ' WHEN wake_reason=8 THEN \'HW\'' + ' ELSE CAST ( wake_reason AS VARCHAR(2) )' + 'END AS wake_reason' + ' FROM pwrx' + ' INNER JOIN samples ON samples.id = pwrx.id') + +do_query(query, 'CREATE VIEW power_events_view AS ' + 'SELECT ' + 'samples.id,' + 'samples.time,' + 'samples.cpu,' + 'selected_events.name AS event,' + 'FORMAT(\'%6s\', cbr.cbr) AS cbr,' + 'FORMAT(\'%6s\', cbr.mhz) AS MHz,' + 'FORMAT(\'%5s\', cbr.percent) AS percent,' + 'to_hex(mwait.hints) AS hints_hex,' + 'to_hex(mwait.extensions) AS extensions_hex,' + 'FORMAT(\'%3s\', pwre.cstate) AS cstate,' + 'FORMAT(\'%3s\', pwre.subcstate) AS subcstate,' + 'CASE WHEN pwre.hw=FALSE THEN \'False\' WHEN pwre.hw=TRUE THEN \'True\' ELSE NULL END AS hw,' + 'CASE WHEN exstop.exact_ip=FALSE THEN \'False\' WHEN exstop.exact_ip=TRUE THEN \'True\' ELSE NULL END AS exact_ip,' + 'FORMAT(\'%3s\', pwrx.deepest_cstate) AS deepest_cstate,' + 'FORMAT(\'%3s\', pwrx.last_cstate) AS last_cstate,' + 'CASE WHEN pwrx.wake_reason=1 THEN \'Interrupt\'' + ' WHEN pwrx.wake_reason=2 THEN \'Timer Deadline\'' + ' WHEN pwrx.wake_reason=4 THEN \'Monitored Address\'' + ' WHEN pwrx.wake_reason=8 THEN \'HW\'' + ' ELSE FORMAT(\'%2s\', pwrx.wake_reason)' + 'END AS wake_reason' + ' FROM cbr' + ' FULL JOIN mwait ON mwait.id = cbr.id' + ' FULL JOIN pwre ON pwre.id = cbr.id' + ' FULL JOIN exstop ON exstop.id = cbr.id' + ' FULL JOIN pwrx ON pwrx.id = cbr.id' + ' INNER JOIN samples ON samples.id = coalesce(cbr.id, mwait.id, pwre.id, exstop.id, pwrx.id)' + ' INNER JOIN selected_events ON selected_events.id = samples.evsel_id' + ' ORDER BY samples.id') + +do_query(query, 'CREATE VIEW context_switches_view AS ' + 'SELECT ' + 'context_switches.id,' + 'context_switches.machine_id,' + 'context_switches.time,' + 'context_switches.cpu,' + 'th_out.pid AS pid_out,' + 'th_out.tid AS tid_out,' + 'comm_out.comm AS comm_out,' + 'th_in.pid AS pid_in,' + 'th_in.tid AS tid_in,' + 'comm_in.comm AS comm_in,' + 'CASE WHEN context_switches.flags = 0 THEN \'in\'' + ' WHEN context_switches.flags = 1 THEN \'out\'' + ' WHEN context_switches.flags = 3 THEN \'out preempt\'' + ' ELSE CAST ( context_switches.flags AS VARCHAR(11) )' + 'END AS flags' + ' FROM context_switches' + ' INNER JOIN threads AS th_out ON th_out.id = context_switches.thread_out_id' + ' INNER JOIN threads AS th_in ON th_in.id = context_switches.thread_in_id' + ' INNER JOIN comms AS comm_out ON comm_out.id = context_switches.comm_out_id' + ' INNER JOIN comms AS comm_in ON comm_in.id = context_switches.comm_in_id') + +file_header = struct.pack("!11sii", b"PGCOPY\n\377\r\n\0", 0, 0) +file_trailer = b"\377\377" + +def open_output_file(file_name): + path_name = output_dir_name + "/" + file_name + file = open(path_name, "wb+") + file.write(file_header) + return file + +def close_output_file(file): + file.write(file_trailer) + file.close() + +def copy_output_file_direct(file, table_name): + close_output_file(file) + sql = "COPY " + table_name + " FROM '" + file.name + "' (FORMAT 'binary')" + do_query(query, sql) + +# Use COPY FROM STDIN because security may prevent postgres from accessing the files directly +def copy_output_file(file, table_name): + conn = PQconnectdb(toclientstr("dbname = " + dbname)) + if (PQstatus(conn)): + raise Exception("COPY FROM STDIN PQconnectdb failed") + file.write(file_trailer) + file.seek(0) + sql = "COPY " + table_name + " FROM STDIN (FORMAT 'binary')" + res = PQexec(conn, toclientstr(sql)) + if (PQresultStatus(res) != 4): + raise Exception("COPY FROM STDIN PQexec failed") + data = file.read(65536) + while (len(data)): + ret = PQputCopyData(conn, data, len(data)) + if (ret != 1): + raise Exception("COPY FROM STDIN PQputCopyData failed, error " + str(ret)) + data = file.read(65536) + ret = PQputCopyEnd(conn, None) + if (ret != 1): + raise Exception("COPY FROM STDIN PQputCopyEnd failed, error " + str(ret)) + PQfinish(conn) + +def remove_output_file(file): + name = file.name + file.close() + os.unlink(name) + +evsel_file = open_output_file("evsel_table.bin") +machine_file = open_output_file("machine_table.bin") +thread_file = open_output_file("thread_table.bin") +comm_file = open_output_file("comm_table.bin") +comm_thread_file = open_output_file("comm_thread_table.bin") +dso_file = open_output_file("dso_table.bin") +symbol_file = open_output_file("symbol_table.bin") +branch_type_file = open_output_file("branch_type_table.bin") +sample_file = open_output_file("sample_table.bin") +if perf_db_export_calls or perf_db_export_callchains: + call_path_file = open_output_file("call_path_table.bin") +if perf_db_export_calls: + call_file = open_output_file("call_table.bin") +ptwrite_file = open_output_file("ptwrite_table.bin") +cbr_file = open_output_file("cbr_table.bin") +mwait_file = open_output_file("mwait_table.bin") +pwre_file = open_output_file("pwre_table.bin") +exstop_file = open_output_file("exstop_table.bin") +pwrx_file = open_output_file("pwrx_table.bin") +context_switches_file = open_output_file("context_switches_table.bin") + +def trace_begin(): + printdate("Writing to intermediate files...") + # id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs + evsel_table(0, "unknown") + machine_table(0, 0, "unknown") + thread_table(0, 0, 0, -1, -1) + comm_table(0, "unknown", 0, 0, 0) + dso_table(0, 0, "unknown", "unknown", "") + symbol_table(0, 0, 0, 0, 0, "unknown") + sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) + if perf_db_export_calls or perf_db_export_callchains: + call_path_table(0, 0, 0, 0) + call_return_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) + +unhandled_count = 0 + +def is_table_empty(table_name): + do_query(query, 'SELECT * FROM ' + table_name + ' LIMIT 1'); + if query.next(): + return False + return True + +def drop(table_name): + do_query(query, 'DROP VIEW ' + table_name + '_view'); + do_query(query, 'DROP TABLE ' + table_name); + +def trace_end(): + printdate("Copying to database...") + copy_output_file(evsel_file, "selected_events") + copy_output_file(machine_file, "machines") + copy_output_file(thread_file, "threads") + copy_output_file(comm_file, "comms") + copy_output_file(comm_thread_file, "comm_threads") + copy_output_file(dso_file, "dsos") + copy_output_file(symbol_file, "symbols") + copy_output_file(branch_type_file, "branch_types") + copy_output_file(sample_file, "samples") + if perf_db_export_calls or perf_db_export_callchains: + copy_output_file(call_path_file, "call_paths") + if perf_db_export_calls: + copy_output_file(call_file, "calls") + copy_output_file(ptwrite_file, "ptwrite") + copy_output_file(cbr_file, "cbr") + copy_output_file(mwait_file, "mwait") + copy_output_file(pwre_file, "pwre") + copy_output_file(exstop_file, "exstop") + copy_output_file(pwrx_file, "pwrx") + copy_output_file(context_switches_file, "context_switches") + + printdate("Removing intermediate files...") + remove_output_file(evsel_file) + remove_output_file(machine_file) + remove_output_file(thread_file) + remove_output_file(comm_file) + remove_output_file(comm_thread_file) + remove_output_file(dso_file) + remove_output_file(symbol_file) + remove_output_file(branch_type_file) + remove_output_file(sample_file) + if perf_db_export_calls or perf_db_export_callchains: + remove_output_file(call_path_file) + if perf_db_export_calls: + remove_output_file(call_file) + remove_output_file(ptwrite_file) + remove_output_file(cbr_file) + remove_output_file(mwait_file) + remove_output_file(pwre_file) + remove_output_file(exstop_file) + remove_output_file(pwrx_file) + remove_output_file(context_switches_file) + os.rmdir(output_dir_name) + printdate("Adding primary keys") + do_query(query, 'ALTER TABLE selected_events ADD PRIMARY KEY (id)') + do_query(query, 'ALTER TABLE machines ADD PRIMARY KEY (id)') + do_query(query, 'ALTER TABLE threads ADD PRIMARY KEY (id)') + do_query(query, 'ALTER TABLE comms ADD PRIMARY KEY (id)') + do_query(query, 'ALTER TABLE comm_threads ADD PRIMARY KEY (id)') + do_query(query, 'ALTER TABLE dsos ADD PRIMARY KEY (id)') + do_query(query, 'ALTER TABLE symbols ADD PRIMARY KEY (id)') + do_query(query, 'ALTER TABLE branch_types ADD PRIMARY KEY (id)') + do_query(query, 'ALTER TABLE samples ADD PRIMARY KEY (id)') + if perf_db_export_calls or perf_db_export_callchains: + do_query(query, 'ALTER TABLE call_paths ADD PRIMARY KEY (id)') + if perf_db_export_calls: + do_query(query, 'ALTER TABLE calls ADD PRIMARY KEY (id)') + do_query(query, 'ALTER TABLE ptwrite ADD PRIMARY KEY (id)') + do_query(query, 'ALTER TABLE cbr ADD PRIMARY KEY (id)') + do_query(query, 'ALTER TABLE mwait ADD PRIMARY KEY (id)') + do_query(query, 'ALTER TABLE pwre ADD PRIMARY KEY (id)') + do_query(query, 'ALTER TABLE exstop ADD PRIMARY KEY (id)') + do_query(query, 'ALTER TABLE pwrx ADD PRIMARY KEY (id)') + do_query(query, 'ALTER TABLE context_switches ADD PRIMARY KEY (id)') + + printdate("Adding foreign keys") + do_query(query, 'ALTER TABLE threads ' + 'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),' + 'ADD CONSTRAINT processfk FOREIGN KEY (process_id) REFERENCES threads (id)') + do_query(query, 'ALTER TABLE comms ' + 'ADD CONSTRAINT threadfk FOREIGN KEY (c_thread_id) REFERENCES threads (id)') + do_query(query, 'ALTER TABLE comm_threads ' + 'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),' + 'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id)') + do_query(query, 'ALTER TABLE dsos ' + 'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id)') + do_query(query, 'ALTER TABLE symbols ' + 'ADD CONSTRAINT dsofk FOREIGN KEY (dso_id) REFERENCES dsos (id)') + do_query(query, 'ALTER TABLE samples ' + 'ADD CONSTRAINT evselfk FOREIGN KEY (evsel_id) REFERENCES selected_events (id),' + 'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),' + 'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id),' + 'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),' + 'ADD CONSTRAINT dsofk FOREIGN KEY (dso_id) REFERENCES dsos (id),' + 'ADD CONSTRAINT symbolfk FOREIGN KEY (symbol_id) REFERENCES symbols (id),' + 'ADD CONSTRAINT todsofk FOREIGN KEY (to_dso_id) REFERENCES dsos (id),' + 'ADD CONSTRAINT tosymbolfk FOREIGN KEY (to_symbol_id) REFERENCES symbols (id)') + if perf_db_export_calls or perf_db_export_callchains: + do_query(query, 'ALTER TABLE call_paths ' + 'ADD CONSTRAINT parentfk FOREIGN KEY (parent_id) REFERENCES call_paths (id),' + 'ADD CONSTRAINT symbolfk FOREIGN KEY (symbol_id) REFERENCES symbols (id)') + if perf_db_export_calls: + do_query(query, 'ALTER TABLE calls ' + 'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id),' + 'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),' + 'ADD CONSTRAINT call_pathfk FOREIGN KEY (call_path_id) REFERENCES call_paths (id),' + 'ADD CONSTRAINT callfk FOREIGN KEY (call_id) REFERENCES samples (id),' + 'ADD CONSTRAINT returnfk FOREIGN KEY (return_id) REFERENCES samples (id),' + 'ADD CONSTRAINT parent_call_pathfk FOREIGN KEY (parent_call_path_id) REFERENCES call_paths (id)') + do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)') + do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)') + do_query(query, 'ALTER TABLE comms ADD has_calls boolean') + do_query(query, 'UPDATE comms SET has_calls = TRUE WHERE comms.id IN (SELECT DISTINCT comm_id FROM calls)') + do_query(query, 'ALTER TABLE ptwrite ' + 'ADD CONSTRAINT idfk FOREIGN KEY (id) REFERENCES samples (id)') + do_query(query, 'ALTER TABLE cbr ' + 'ADD CONSTRAINT idfk FOREIGN KEY (id) REFERENCES samples (id)') + do_query(query, 'ALTER TABLE mwait ' + 'ADD CONSTRAINT idfk FOREIGN KEY (id) REFERENCES samples (id)') + do_query(query, 'ALTER TABLE pwre ' + 'ADD CONSTRAINT idfk FOREIGN KEY (id) REFERENCES samples (id)') + do_query(query, 'ALTER TABLE exstop ' + 'ADD CONSTRAINT idfk FOREIGN KEY (id) REFERENCES samples (id)') + do_query(query, 'ALTER TABLE pwrx ' + 'ADD CONSTRAINT idfk FOREIGN KEY (id) REFERENCES samples (id)') + do_query(query, 'ALTER TABLE context_switches ' + 'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),' + 'ADD CONSTRAINT toutfk FOREIGN KEY (thread_out_id) REFERENCES threads (id),' + 'ADD CONSTRAINT tinfk FOREIGN KEY (thread_in_id) REFERENCES threads (id),' + 'ADD CONSTRAINT coutfk FOREIGN KEY (comm_out_id) REFERENCES comms (id),' + 'ADD CONSTRAINT cinfk FOREIGN KEY (comm_in_id) REFERENCES comms (id)') + + printdate("Dropping unused tables") + if is_table_empty("ptwrite"): + drop("ptwrite") + if is_table_empty("mwait") and is_table_empty("pwre") and is_table_empty("exstop") and is_table_empty("pwrx"): + do_query(query, 'DROP VIEW power_events_view'); + drop("mwait") + drop("pwre") + drop("exstop") + drop("pwrx") + if is_table_empty("cbr"): + drop("cbr") + if is_table_empty("context_switches"): + drop("context_switches") + + if (unhandled_count): + printdate("Warning: ", unhandled_count, " unhandled events") + printdate("Done") + +def trace_unhandled(event_name, context, event_fields_dict): + global unhandled_count + unhandled_count += 1 + +def sched__sched_switch(*x): + pass + +def evsel_table(evsel_id, evsel_name, *x): + evsel_name = toserverstr(evsel_name) + n = len(evsel_name) + fmt = "!hiqi" + str(n) + "s" + value = struct.pack(fmt, 2, 8, evsel_id, n, evsel_name) + evsel_file.write(value) + +def machine_table(machine_id, pid, root_dir, *x): + root_dir = toserverstr(root_dir) + n = len(root_dir) + fmt = "!hiqiii" + str(n) + "s" + value = struct.pack(fmt, 3, 8, machine_id, 4, pid, n, root_dir) + machine_file.write(value) + +def thread_table(thread_id, machine_id, process_id, pid, tid, *x): + value = struct.pack("!hiqiqiqiiii", 5, 8, thread_id, 8, machine_id, 8, process_id, 4, pid, 4, tid) + thread_file.write(value) + +def comm_table(comm_id, comm_str, thread_id, time, exec_flag, *x): + comm_str = toserverstr(comm_str) + n = len(comm_str) + fmt = "!hiqi" + str(n) + "s" + "iqiqiB" + value = struct.pack(fmt, 5, 8, comm_id, n, comm_str, 8, thread_id, 8, time, 1, exec_flag) + comm_file.write(value) + +def comm_thread_table(comm_thread_id, comm_id, thread_id, *x): + fmt = "!hiqiqiq" + value = struct.pack(fmt, 3, 8, comm_thread_id, 8, comm_id, 8, thread_id) + comm_thread_file.write(value) + +def dso_table(dso_id, machine_id, short_name, long_name, build_id, *x): + short_name = toserverstr(short_name) + long_name = toserverstr(long_name) + build_id = toserverstr(build_id) + n1 = len(short_name) + n2 = len(long_name) + n3 = len(build_id) + fmt = "!hiqiqi" + str(n1) + "si" + str(n2) + "si" + str(n3) + "s" + value = struct.pack(fmt, 5, 8, dso_id, 8, machine_id, n1, short_name, n2, long_name, n3, build_id) + dso_file.write(value) + +def symbol_table(symbol_id, dso_id, sym_start, sym_end, binding, symbol_name, *x): + symbol_name = toserverstr(symbol_name) + n = len(symbol_name) + fmt = "!hiqiqiqiqiii" + str(n) + "s" + value = struct.pack(fmt, 6, 8, symbol_id, 8, dso_id, 8, sym_start, 8, sym_end, 4, binding, n, symbol_name) + symbol_file.write(value) + +def branch_type_table(branch_type, name, *x): + name = toserverstr(name) + n = len(name) + fmt = "!hiii" + str(n) + "s" + value = struct.pack(fmt, 2, 4, branch_type, n, name) + branch_type_file.write(value) + +def sample_table(sample_id, evsel_id, machine_id, thread_id, comm_id, dso_id, symbol_id, sym_offset, ip, time, cpu, to_dso_id, to_symbol_id, to_sym_offset, to_ip, period, weight, transaction, data_src, branch_type, in_tx, call_path_id, insn_cnt, cyc_cnt, *x): + if branches: + value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiiiBiqiqiq", 20, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 4, branch_type, 1, in_tx, 8, call_path_id, 8, insn_cnt, 8, cyc_cnt) + else: + value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiqiqiqiqiiiBiqiqiq", 24, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 8, period, 8, weight, 8, transaction, 8, data_src, 4, branch_type, 1, in_tx, 8, call_path_id, 8, insn_cnt, 8, cyc_cnt) + sample_file.write(value) + +def call_path_table(cp_id, parent_id, symbol_id, ip, *x): + fmt = "!hiqiqiqiq" + value = struct.pack(fmt, 4, 8, cp_id, 8, parent_id, 8, symbol_id, 8, ip) + call_path_file.write(value) + +def call_return_table(cr_id, thread_id, comm_id, call_path_id, call_time, return_time, branch_count, call_id, return_id, parent_call_path_id, flags, parent_id, insn_cnt, cyc_cnt, *x): + fmt = "!hiqiqiqiqiqiqiqiqiqiqiiiqiqiq" + value = struct.pack(fmt, 14, 8, cr_id, 8, thread_id, 8, comm_id, 8, call_path_id, 8, call_time, 8, return_time, 8, branch_count, 8, call_id, 8, return_id, 8, parent_call_path_id, 4, flags, 8, parent_id, 8, insn_cnt, 8, cyc_cnt) + call_file.write(value) + +def ptwrite(id, raw_buf): + data = struct.unpack_from("> 32) & 0x3 + value = struct.pack("!hiqiiii", 3, 8, id, 4, hints, 4, extensions) + mwait_file.write(value) + +def pwre(id, raw_buf): + data = struct.unpack_from("> 7) & 1 + cstate = (payload >> 12) & 0xf + subcstate = (payload >> 8) & 0xf + value = struct.pack("!hiqiiiiiB", 4, 8, id, 4, cstate, 4, subcstate, 1, hw) + pwre_file.write(value) + +def exstop(id, raw_buf): + data = struct.unpack_from("> 4) & 0xf + wake_reason = (payload >> 8) & 0xf + value = struct.pack("!hiqiiiiii", 4, 8, id, 4, deepest_cstate, 4, last_cstate, 4, wake_reason) + pwrx_file.write(value) + +def synth_data(id, config, raw_buf, *x): + if config == 0: + ptwrite(id, raw_buf) + elif config == 1: + mwait(id, raw_buf) + elif config == 2: + pwre(id, raw_buf) + elif config == 3: + exstop(id, raw_buf) + elif config == 4: + pwrx(id, raw_buf) + elif config == 5: + cbr(id, raw_buf) + +def context_switch_table(id, machine_id, time, cpu, thread_out_id, comm_out_id, thread_in_id, comm_in_id, flags, *x): + fmt = "!hiqiqiqiiiqiqiqiqii" + value = struct.pack(fmt, 9, 8, id, 8, machine_id, 8, time, 4, cpu, 8, thread_out_id, 8, comm_out_id, 8, thread_in_id, 8, comm_in_id, 4, flags) + context_switches_file.write(value) diff --git a/data/linux-small/tools/perf/scripts/python/export-to-sqlite.py b/data/linux-small/tools/perf/scripts/python/export-to-sqlite.py new file mode 100644 index 0000000..8043a72 --- /dev/null +++ b/data/linux-small/tools/perf/scripts/python/export-to-sqlite.py @@ -0,0 +1,796 @@ +# export-to-sqlite.py: export perf data to a sqlite3 database +# Copyright (c) 2017, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. + +from __future__ import print_function + +import os +import sys +import struct +import datetime + +# To use this script you will need to have installed package python-pyside which +# provides LGPL-licensed Python bindings for Qt. You will also need the package +# libqt4-sql-sqlite for Qt sqlite3 support. +# +# Examples of installing pyside: +# +# ubuntu: +# +# $ sudo apt-get install python-pyside.qtsql libqt4-sql-psql +# +# Alternately, to use Python3 and/or pyside 2, one of the following: +# +# $ sudo apt-get install python3-pyside.qtsql libqt4-sql-psql +# $ sudo apt-get install python-pyside2.qtsql libqt5sql5-psql +# $ sudo apt-get install python3-pyside2.qtsql libqt5sql5-psql +# fedora: +# +# $ sudo yum install python-pyside +# +# Alternately, to use Python3 and/or pyside 2, one of the following: +# $ sudo yum install python3-pyside +# $ pip install --user PySide2 +# $ pip3 install --user PySide2 +# +# An example of using this script with Intel PT: +# +# $ perf record -e intel_pt//u ls +# $ perf script -s ~/libexec/perf-core/scripts/python/export-to-sqlite.py pt_example branches calls +# 2017-07-31 14:26:07.326913 Creating database... +# 2017-07-31 14:26:07.538097 Writing records... +# 2017-07-31 14:26:09.889292 Adding indexes +# 2017-07-31 14:26:09.958746 Done +# +# To browse the database, sqlite3 can be used e.g. +# +# $ sqlite3 pt_example +# sqlite> .header on +# sqlite> select * from samples_view where id < 10; +# sqlite> .mode column +# sqlite> select * from samples_view where id < 10; +# sqlite> .tables +# sqlite> .schema samples_view +# sqlite> .quit +# +# An example of using the database is provided by the script +# exported-sql-viewer.py. Refer to that script for details. +# +# The database structure is practically the same as created by the script +# export-to-postgresql.py. Refer to that script for details. A notable +# difference is the 'transaction' column of the 'samples' table which is +# renamed 'transaction_' in sqlite because 'transaction' is a reserved word. + +pyside_version_1 = True +if not "pyside-version-1" in sys.argv: + try: + from PySide2.QtSql import * + pyside_version_1 = False + except: + pass + +if pyside_version_1: + from PySide.QtSql import * + +sys.path.append(os.environ['PERF_EXEC_PATH'] + \ + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') + +# These perf imports are not used at present +#from perf_trace_context import * +#from Core import * + +perf_db_export_mode = True +perf_db_export_calls = False +perf_db_export_callchains = False + +def printerr(*args, **keyword_args): + print(*args, file=sys.stderr, **keyword_args) + +def printdate(*args, **kw_args): + print(datetime.datetime.today(), *args, sep=' ', **kw_args) + +def usage(): + printerr("Usage is: export-to-sqlite.py [] [] [] []"); + printerr("where: columns 'all' or 'branches'"); + printerr(" calls 'calls' => create calls and call_paths table"); + printerr(" callchains 'callchains' => create call_paths table"); + printerr(" pyside-version-1 'pyside-version-1' => use pyside version 1"); + raise Exception("Too few or bad arguments") + +if (len(sys.argv) < 2): + usage() + +dbname = sys.argv[1] + +if (len(sys.argv) >= 3): + columns = sys.argv[2] +else: + columns = "all" + +if columns not in ("all", "branches"): + usage() + +branches = (columns == "branches") + +for i in range(3,len(sys.argv)): + if (sys.argv[i] == "calls"): + perf_db_export_calls = True + elif (sys.argv[i] == "callchains"): + perf_db_export_callchains = True + elif (sys.argv[i] == "pyside-version-1"): + pass + else: + usage() + +def do_query(q, s): + if (q.exec_(s)): + return + raise Exception("Query failed: " + q.lastError().text()) + +def do_query_(q): + if (q.exec_()): + return + raise Exception("Query failed: " + q.lastError().text()) + +printdate("Creating database ...") + +db_exists = False +try: + f = open(dbname) + f.close() + db_exists = True +except: + pass + +if db_exists: + raise Exception(dbname + " already exists") + +db = QSqlDatabase.addDatabase('QSQLITE') +db.setDatabaseName(dbname) +db.open() + +query = QSqlQuery(db) + +do_query(query, 'PRAGMA journal_mode = OFF') +do_query(query, 'BEGIN TRANSACTION') + +do_query(query, 'CREATE TABLE selected_events (' + 'id integer NOT NULL PRIMARY KEY,' + 'name varchar(80))') +do_query(query, 'CREATE TABLE machines (' + 'id integer NOT NULL PRIMARY KEY,' + 'pid integer,' + 'root_dir varchar(4096))') +do_query(query, 'CREATE TABLE threads (' + 'id integer NOT NULL PRIMARY KEY,' + 'machine_id bigint,' + 'process_id bigint,' + 'pid integer,' + 'tid integer)') +do_query(query, 'CREATE TABLE comms (' + 'id integer NOT NULL PRIMARY KEY,' + 'comm varchar(16),' + 'c_thread_id bigint,' + 'c_time bigint,' + 'exec_flag boolean)') +do_query(query, 'CREATE TABLE comm_threads (' + 'id integer NOT NULL PRIMARY KEY,' + 'comm_id bigint,' + 'thread_id bigint)') +do_query(query, 'CREATE TABLE dsos (' + 'id integer NOT NULL PRIMARY KEY,' + 'machine_id bigint,' + 'short_name varchar(256),' + 'long_name varchar(4096),' + 'build_id varchar(64))') +do_query(query, 'CREATE TABLE symbols (' + 'id integer NOT NULL PRIMARY KEY,' + 'dso_id bigint,' + 'sym_start bigint,' + 'sym_end bigint,' + 'binding integer,' + 'name varchar(2048))') +do_query(query, 'CREATE TABLE branch_types (' + 'id integer NOT NULL PRIMARY KEY,' + 'name varchar(80))') + +if branches: + do_query(query, 'CREATE TABLE samples (' + 'id integer NOT NULL PRIMARY KEY,' + 'evsel_id bigint,' + 'machine_id bigint,' + 'thread_id bigint,' + 'comm_id bigint,' + 'dso_id bigint,' + 'symbol_id bigint,' + 'sym_offset bigint,' + 'ip bigint,' + 'time bigint,' + 'cpu integer,' + 'to_dso_id bigint,' + 'to_symbol_id bigint,' + 'to_sym_offset bigint,' + 'to_ip bigint,' + 'branch_type integer,' + 'in_tx boolean,' + 'call_path_id bigint,' + 'insn_count bigint,' + 'cyc_count bigint)') +else: + do_query(query, 'CREATE TABLE samples (' + 'id integer NOT NULL PRIMARY KEY,' + 'evsel_id bigint,' + 'machine_id bigint,' + 'thread_id bigint,' + 'comm_id bigint,' + 'dso_id bigint,' + 'symbol_id bigint,' + 'sym_offset bigint,' + 'ip bigint,' + 'time bigint,' + 'cpu integer,' + 'to_dso_id bigint,' + 'to_symbol_id bigint,' + 'to_sym_offset bigint,' + 'to_ip bigint,' + 'period bigint,' + 'weight bigint,' + 'transaction_ bigint,' + 'data_src bigint,' + 'branch_type integer,' + 'in_tx boolean,' + 'call_path_id bigint,' + 'insn_count bigint,' + 'cyc_count bigint)') + +if perf_db_export_calls or perf_db_export_callchains: + do_query(query, 'CREATE TABLE call_paths (' + 'id integer NOT NULL PRIMARY KEY,' + 'parent_id bigint,' + 'symbol_id bigint,' + 'ip bigint)') +if perf_db_export_calls: + do_query(query, 'CREATE TABLE calls (' + 'id integer NOT NULL PRIMARY KEY,' + 'thread_id bigint,' + 'comm_id bigint,' + 'call_path_id bigint,' + 'call_time bigint,' + 'return_time bigint,' + 'branch_count bigint,' + 'call_id bigint,' + 'return_id bigint,' + 'parent_call_path_id bigint,' + 'flags integer,' + 'parent_id bigint,' + 'insn_count bigint,' + 'cyc_count bigint)') + +do_query(query, 'CREATE TABLE ptwrite (' + 'id integer NOT NULL PRIMARY KEY,' + 'payload bigint,' + 'exact_ip integer)') + +do_query(query, 'CREATE TABLE cbr (' + 'id integer NOT NULL PRIMARY KEY,' + 'cbr integer,' + 'mhz integer,' + 'percent integer)') + +do_query(query, 'CREATE TABLE mwait (' + 'id integer NOT NULL PRIMARY KEY,' + 'hints integer,' + 'extensions integer)') + +do_query(query, 'CREATE TABLE pwre (' + 'id integer NOT NULL PRIMARY KEY,' + 'cstate integer,' + 'subcstate integer,' + 'hw integer)') + +do_query(query, 'CREATE TABLE exstop (' + 'id integer NOT NULL PRIMARY KEY,' + 'exact_ip integer)') + +do_query(query, 'CREATE TABLE pwrx (' + 'id integer NOT NULL PRIMARY KEY,' + 'deepest_cstate integer,' + 'last_cstate integer,' + 'wake_reason integer)') + +do_query(query, 'CREATE TABLE context_switches (' + 'id integer NOT NULL PRIMARY KEY,' + 'machine_id bigint,' + 'time bigint,' + 'cpu integer,' + 'thread_out_id bigint,' + 'comm_out_id bigint,' + 'thread_in_id bigint,' + 'comm_in_id bigint,' + 'flags integer)') + +# printf was added to sqlite in version 3.8.3 +sqlite_has_printf = False +try: + do_query(query, 'SELECT printf("") FROM machines') + sqlite_has_printf = True +except: + pass + +def emit_to_hex(x): + if sqlite_has_printf: + return 'printf("%x", ' + x + ')' + else: + return x + +do_query(query, 'CREATE VIEW machines_view AS ' + 'SELECT ' + 'id,' + 'pid,' + 'root_dir,' + 'CASE WHEN id=0 THEN \'unknown\' WHEN pid=-1 THEN \'host\' ELSE \'guest\' END AS host_or_guest' + ' FROM machines') + +do_query(query, 'CREATE VIEW dsos_view AS ' + 'SELECT ' + 'id,' + 'machine_id,' + '(SELECT host_or_guest FROM machines_view WHERE id = machine_id) AS host_or_guest,' + 'short_name,' + 'long_name,' + 'build_id' + ' FROM dsos') + +do_query(query, 'CREATE VIEW symbols_view AS ' + 'SELECT ' + 'id,' + 'name,' + '(SELECT short_name FROM dsos WHERE id=dso_id) AS dso,' + 'dso_id,' + 'sym_start,' + 'sym_end,' + 'CASE WHEN binding=0 THEN \'local\' WHEN binding=1 THEN \'global\' ELSE \'weak\' END AS binding' + ' FROM symbols') + +do_query(query, 'CREATE VIEW threads_view AS ' + 'SELECT ' + 'id,' + 'machine_id,' + '(SELECT host_or_guest FROM machines_view WHERE id = machine_id) AS host_or_guest,' + 'process_id,' + 'pid,' + 'tid' + ' FROM threads') + +do_query(query, 'CREATE VIEW comm_threads_view AS ' + 'SELECT ' + 'comm_id,' + '(SELECT comm FROM comms WHERE id = comm_id) AS command,' + 'thread_id,' + '(SELECT pid FROM threads WHERE id = thread_id) AS pid,' + '(SELECT tid FROM threads WHERE id = thread_id) AS tid' + ' FROM comm_threads') + +if perf_db_export_calls or perf_db_export_callchains: + do_query(query, 'CREATE VIEW call_paths_view AS ' + 'SELECT ' + 'c.id,' + + emit_to_hex('c.ip') + ' AS ip,' + 'c.symbol_id,' + '(SELECT name FROM symbols WHERE id = c.symbol_id) AS symbol,' + '(SELECT dso_id FROM symbols WHERE id = c.symbol_id) AS dso_id,' + '(SELECT dso FROM symbols_view WHERE id = c.symbol_id) AS dso_short_name,' + 'c.parent_id,' + + emit_to_hex('p.ip') + ' AS parent_ip,' + 'p.symbol_id AS parent_symbol_id,' + '(SELECT name FROM symbols WHERE id = p.symbol_id) AS parent_symbol,' + '(SELECT dso_id FROM symbols WHERE id = p.symbol_id) AS parent_dso_id,' + '(SELECT dso FROM symbols_view WHERE id = p.symbol_id) AS parent_dso_short_name' + ' FROM call_paths c INNER JOIN call_paths p ON p.id = c.parent_id') +if perf_db_export_calls: + do_query(query, 'CREATE VIEW calls_view AS ' + 'SELECT ' + 'calls.id,' + 'thread_id,' + '(SELECT pid FROM threads WHERE id = thread_id) AS pid,' + '(SELECT tid FROM threads WHERE id = thread_id) AS tid,' + '(SELECT comm FROM comms WHERE id = comm_id) AS command,' + 'call_path_id,' + + emit_to_hex('ip') + ' AS ip,' + 'symbol_id,' + '(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,' + 'call_time,' + 'return_time,' + 'return_time - call_time AS elapsed_time,' + 'branch_count,' + 'insn_count,' + 'cyc_count,' + 'CASE WHEN cyc_count=0 THEN CAST(0 AS FLOAT) ELSE ROUND(CAST(insn_count AS FLOAT) / cyc_count, 2) END AS IPC,' + 'call_id,' + 'return_id,' + 'CASE WHEN flags=0 THEN \'\' WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' WHEN flags=6 THEN \'jump\' ELSE flags END AS flags,' + 'parent_call_path_id,' + 'calls.parent_id' + ' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id') + +do_query(query, 'CREATE VIEW samples_view AS ' + 'SELECT ' + 'id,' + 'time,' + 'cpu,' + '(SELECT pid FROM threads WHERE id = thread_id) AS pid,' + '(SELECT tid FROM threads WHERE id = thread_id) AS tid,' + '(SELECT comm FROM comms WHERE id = comm_id) AS command,' + '(SELECT name FROM selected_events WHERE id = evsel_id) AS event,' + + emit_to_hex('ip') + ' AS ip_hex,' + '(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,' + 'sym_offset,' + '(SELECT short_name FROM dsos WHERE id = dso_id) AS dso_short_name,' + + emit_to_hex('to_ip') + ' AS to_ip_hex,' + '(SELECT name FROM symbols WHERE id = to_symbol_id) AS to_symbol,' + 'to_sym_offset,' + '(SELECT short_name FROM dsos WHERE id = to_dso_id) AS to_dso_short_name,' + '(SELECT name FROM branch_types WHERE id = branch_type) AS branch_type_name,' + 'in_tx,' + 'insn_count,' + 'cyc_count,' + 'CASE WHEN cyc_count=0 THEN CAST(0 AS FLOAT) ELSE ROUND(CAST(insn_count AS FLOAT) / cyc_count, 2) END AS IPC' + ' FROM samples') + +do_query(query, 'CREATE VIEW ptwrite_view AS ' + 'SELECT ' + 'ptwrite.id,' + 'time,' + 'cpu,' + + emit_to_hex('payload') + ' AS payload_hex,' + 'CASE WHEN exact_ip=0 THEN \'False\' ELSE \'True\' END AS exact_ip' + ' FROM ptwrite' + ' INNER JOIN samples ON samples.id = ptwrite.id') + +do_query(query, 'CREATE VIEW cbr_view AS ' + 'SELECT ' + 'cbr.id,' + 'time,' + 'cpu,' + 'cbr,' + 'mhz,' + 'percent' + ' FROM cbr' + ' INNER JOIN samples ON samples.id = cbr.id') + +do_query(query, 'CREATE VIEW mwait_view AS ' + 'SELECT ' + 'mwait.id,' + 'time,' + 'cpu,' + + emit_to_hex('hints') + ' AS hints_hex,' + + emit_to_hex('extensions') + ' AS extensions_hex' + ' FROM mwait' + ' INNER JOIN samples ON samples.id = mwait.id') + +do_query(query, 'CREATE VIEW pwre_view AS ' + 'SELECT ' + 'pwre.id,' + 'time,' + 'cpu,' + 'cstate,' + 'subcstate,' + 'CASE WHEN hw=0 THEN \'False\' ELSE \'True\' END AS hw' + ' FROM pwre' + ' INNER JOIN samples ON samples.id = pwre.id') + +do_query(query, 'CREATE VIEW exstop_view AS ' + 'SELECT ' + 'exstop.id,' + 'time,' + 'cpu,' + 'CASE WHEN exact_ip=0 THEN \'False\' ELSE \'True\' END AS exact_ip' + ' FROM exstop' + ' INNER JOIN samples ON samples.id = exstop.id') + +do_query(query, 'CREATE VIEW pwrx_view AS ' + 'SELECT ' + 'pwrx.id,' + 'time,' + 'cpu,' + 'deepest_cstate,' + 'last_cstate,' + 'CASE WHEN wake_reason=1 THEN \'Interrupt\'' + ' WHEN wake_reason=2 THEN \'Timer Deadline\'' + ' WHEN wake_reason=4 THEN \'Monitored Address\'' + ' WHEN wake_reason=8 THEN \'HW\'' + ' ELSE wake_reason ' + 'END AS wake_reason' + ' FROM pwrx' + ' INNER JOIN samples ON samples.id = pwrx.id') + +do_query(query, 'CREATE VIEW power_events_view AS ' + 'SELECT ' + 'samples.id,' + 'time,' + 'cpu,' + 'selected_events.name AS event,' + 'CASE WHEN selected_events.name=\'cbr\' THEN (SELECT cbr FROM cbr WHERE cbr.id = samples.id) ELSE "" END AS cbr,' + 'CASE WHEN selected_events.name=\'cbr\' THEN (SELECT mhz FROM cbr WHERE cbr.id = samples.id) ELSE "" END AS mhz,' + 'CASE WHEN selected_events.name=\'cbr\' THEN (SELECT percent FROM cbr WHERE cbr.id = samples.id) ELSE "" END AS percent,' + 'CASE WHEN selected_events.name=\'mwait\' THEN (SELECT ' + emit_to_hex('hints') + ' FROM mwait WHERE mwait.id = samples.id) ELSE "" END AS hints_hex,' + 'CASE WHEN selected_events.name=\'mwait\' THEN (SELECT ' + emit_to_hex('extensions') + ' FROM mwait WHERE mwait.id = samples.id) ELSE "" END AS extensions_hex,' + 'CASE WHEN selected_events.name=\'pwre\' THEN (SELECT cstate FROM pwre WHERE pwre.id = samples.id) ELSE "" END AS cstate,' + 'CASE WHEN selected_events.name=\'pwre\' THEN (SELECT subcstate FROM pwre WHERE pwre.id = samples.id) ELSE "" END AS subcstate,' + 'CASE WHEN selected_events.name=\'pwre\' THEN (SELECT hw FROM pwre WHERE pwre.id = samples.id) ELSE "" END AS hw,' + 'CASE WHEN selected_events.name=\'exstop\' THEN (SELECT exact_ip FROM exstop WHERE exstop.id = samples.id) ELSE "" END AS exact_ip,' + 'CASE WHEN selected_events.name=\'pwrx\' THEN (SELECT deepest_cstate FROM pwrx WHERE pwrx.id = samples.id) ELSE "" END AS deepest_cstate,' + 'CASE WHEN selected_events.name=\'pwrx\' THEN (SELECT last_cstate FROM pwrx WHERE pwrx.id = samples.id) ELSE "" END AS last_cstate,' + 'CASE WHEN selected_events.name=\'pwrx\' THEN (SELECT ' + 'CASE WHEN wake_reason=1 THEN \'Interrupt\'' + ' WHEN wake_reason=2 THEN \'Timer Deadline\'' + ' WHEN wake_reason=4 THEN \'Monitored Address\'' + ' WHEN wake_reason=8 THEN \'HW\'' + ' ELSE wake_reason ' + 'END' + ' FROM pwrx WHERE pwrx.id = samples.id) ELSE "" END AS wake_reason' + ' FROM samples' + ' INNER JOIN selected_events ON selected_events.id = evsel_id' + ' WHERE selected_events.name IN (\'cbr\',\'mwait\',\'exstop\',\'pwre\',\'pwrx\')') + +do_query(query, 'CREATE VIEW context_switches_view AS ' + 'SELECT ' + 'context_switches.id,' + 'context_switches.machine_id,' + 'context_switches.time,' + 'context_switches.cpu,' + 'th_out.pid AS pid_out,' + 'th_out.tid AS tid_out,' + 'comm_out.comm AS comm_out,' + 'th_in.pid AS pid_in,' + 'th_in.tid AS tid_in,' + 'comm_in.comm AS comm_in,' + 'CASE WHEN context_switches.flags = 0 THEN \'in\'' + ' WHEN context_switches.flags = 1 THEN \'out\'' + ' WHEN context_switches.flags = 3 THEN \'out preempt\'' + ' ELSE context_switches.flags ' + 'END AS flags' + ' FROM context_switches' + ' INNER JOIN threads AS th_out ON th_out.id = context_switches.thread_out_id' + ' INNER JOIN threads AS th_in ON th_in.id = context_switches.thread_in_id' + ' INNER JOIN comms AS comm_out ON comm_out.id = context_switches.comm_out_id' + ' INNER JOIN comms AS comm_in ON comm_in.id = context_switches.comm_in_id') + +do_query(query, 'END TRANSACTION') + +evsel_query = QSqlQuery(db) +evsel_query.prepare("INSERT INTO selected_events VALUES (?, ?)") +machine_query = QSqlQuery(db) +machine_query.prepare("INSERT INTO machines VALUES (?, ?, ?)") +thread_query = QSqlQuery(db) +thread_query.prepare("INSERT INTO threads VALUES (?, ?, ?, ?, ?)") +comm_query = QSqlQuery(db) +comm_query.prepare("INSERT INTO comms VALUES (?, ?, ?, ?, ?)") +comm_thread_query = QSqlQuery(db) +comm_thread_query.prepare("INSERT INTO comm_threads VALUES (?, ?, ?)") +dso_query = QSqlQuery(db) +dso_query.prepare("INSERT INTO dsos VALUES (?, ?, ?, ?, ?)") +symbol_query = QSqlQuery(db) +symbol_query.prepare("INSERT INTO symbols VALUES (?, ?, ?, ?, ?, ?)") +branch_type_query = QSqlQuery(db) +branch_type_query.prepare("INSERT INTO branch_types VALUES (?, ?)") +sample_query = QSqlQuery(db) +if branches: + sample_query.prepare("INSERT INTO samples VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)") +else: + sample_query.prepare("INSERT INTO samples VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)") +if perf_db_export_calls or perf_db_export_callchains: + call_path_query = QSqlQuery(db) + call_path_query.prepare("INSERT INTO call_paths VALUES (?, ?, ?, ?)") +if perf_db_export_calls: + call_query = QSqlQuery(db) + call_query.prepare("INSERT INTO calls VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)") +ptwrite_query = QSqlQuery(db) +ptwrite_query.prepare("INSERT INTO ptwrite VALUES (?, ?, ?)") +cbr_query = QSqlQuery(db) +cbr_query.prepare("INSERT INTO cbr VALUES (?, ?, ?, ?)") +mwait_query = QSqlQuery(db) +mwait_query.prepare("INSERT INTO mwait VALUES (?, ?, ?)") +pwre_query = QSqlQuery(db) +pwre_query.prepare("INSERT INTO pwre VALUES (?, ?, ?, ?)") +exstop_query = QSqlQuery(db) +exstop_query.prepare("INSERT INTO exstop VALUES (?, ?)") +pwrx_query = QSqlQuery(db) +pwrx_query.prepare("INSERT INTO pwrx VALUES (?, ?, ?, ?)") +context_switch_query = QSqlQuery(db) +context_switch_query.prepare("INSERT INTO context_switches VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)") + +def trace_begin(): + printdate("Writing records...") + do_query(query, 'BEGIN TRANSACTION') + # id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs + evsel_table(0, "unknown") + machine_table(0, 0, "unknown") + thread_table(0, 0, 0, -1, -1) + comm_table(0, "unknown", 0, 0, 0) + dso_table(0, 0, "unknown", "unknown", "") + symbol_table(0, 0, 0, 0, 0, "unknown") + sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) + if perf_db_export_calls or perf_db_export_callchains: + call_path_table(0, 0, 0, 0) + call_return_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) + +unhandled_count = 0 + +def is_table_empty(table_name): + do_query(query, 'SELECT * FROM ' + table_name + ' LIMIT 1'); + if query.next(): + return False + return True + +def drop(table_name): + do_query(query, 'DROP VIEW ' + table_name + '_view'); + do_query(query, 'DROP TABLE ' + table_name); + +def trace_end(): + do_query(query, 'END TRANSACTION') + + printdate("Adding indexes") + if perf_db_export_calls: + do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)') + do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)') + do_query(query, 'ALTER TABLE comms ADD has_calls boolean') + do_query(query, 'UPDATE comms SET has_calls = 1 WHERE comms.id IN (SELECT DISTINCT comm_id FROM calls)') + + printdate("Dropping unused tables") + if is_table_empty("ptwrite"): + drop("ptwrite") + if is_table_empty("mwait") and is_table_empty("pwre") and is_table_empty("exstop") and is_table_empty("pwrx"): + do_query(query, 'DROP VIEW power_events_view'); + drop("mwait") + drop("pwre") + drop("exstop") + drop("pwrx") + if is_table_empty("cbr"): + drop("cbr") + if is_table_empty("context_switches"): + drop("context_switches") + + if (unhandled_count): + printdate("Warning: ", unhandled_count, " unhandled events") + printdate("Done") + +def trace_unhandled(event_name, context, event_fields_dict): + global unhandled_count + unhandled_count += 1 + +def sched__sched_switch(*x): + pass + +def bind_exec(q, n, x): + for xx in x[0:n]: + q.addBindValue(str(xx)) + do_query_(q) + +def evsel_table(*x): + bind_exec(evsel_query, 2, x) + +def machine_table(*x): + bind_exec(machine_query, 3, x) + +def thread_table(*x): + bind_exec(thread_query, 5, x) + +def comm_table(*x): + bind_exec(comm_query, 5, x) + +def comm_thread_table(*x): + bind_exec(comm_thread_query, 3, x) + +def dso_table(*x): + bind_exec(dso_query, 5, x) + +def symbol_table(*x): + bind_exec(symbol_query, 6, x) + +def branch_type_table(*x): + bind_exec(branch_type_query, 2, x) + +def sample_table(*x): + if branches: + for xx in x[0:15]: + sample_query.addBindValue(str(xx)) + for xx in x[19:24]: + sample_query.addBindValue(str(xx)) + do_query_(sample_query) + else: + bind_exec(sample_query, 24, x) + +def call_path_table(*x): + bind_exec(call_path_query, 4, x) + +def call_return_table(*x): + bind_exec(call_query, 14, x) + +def ptwrite(id, raw_buf): + data = struct.unpack_from("> 32) & 0x3 + mwait_query.addBindValue(str(id)) + mwait_query.addBindValue(str(hints)) + mwait_query.addBindValue(str(extensions)) + do_query_(mwait_query) + +def pwre(id, raw_buf): + data = struct.unpack_from("> 7) & 1 + cstate = (payload >> 12) & 0xf + subcstate = (payload >> 8) & 0xf + pwre_query.addBindValue(str(id)) + pwre_query.addBindValue(str(cstate)) + pwre_query.addBindValue(str(subcstate)) + pwre_query.addBindValue(str(hw)) + do_query_(pwre_query) + +def exstop(id, raw_buf): + data = struct.unpack_from("> 4) & 0xf + wake_reason = (payload >> 8) & 0xf + pwrx_query.addBindValue(str(id)) + pwrx_query.addBindValue(str(deepest_cstate)) + pwrx_query.addBindValue(str(last_cstate)) + pwrx_query.addBindValue(str(wake_reason)) + do_query_(pwrx_query) + +def synth_data(id, config, raw_buf, *x): + if config == 0: + ptwrite(id, raw_buf) + elif config == 1: + mwait(id, raw_buf) + elif config == 2: + pwre(id, raw_buf) + elif config == 3: + exstop(id, raw_buf) + elif config == 4: + pwrx(id, raw_buf) + elif config == 5: + cbr(id, raw_buf) + +def context_switch_table(*x): + bind_exec(context_switch_query, 9, x) diff --git a/data/linux-small/tools/perf/scripts/python/exported-sql-viewer.py b/data/linux-small/tools/perf/scripts/python/exported-sql-viewer.py new file mode 100755 index 0000000..13f2d8a --- /dev/null +++ b/data/linux-small/tools/perf/scripts/python/exported-sql-viewer.py @@ -0,0 +1,5027 @@ +#!/usr/bin/env python +# SPDX-License-Identifier: GPL-2.0 +# exported-sql-viewer.py: view data from sql database +# Copyright (c) 2014-2018, Intel Corporation. + +# To use this script you will need to have exported data using either the +# export-to-sqlite.py or the export-to-postgresql.py script. Refer to those +# scripts for details. +# +# Following on from the example in the export scripts, a +# call-graph can be displayed for the pt_example database like this: +# +# python tools/perf/scripts/python/exported-sql-viewer.py pt_example +# +# Note that for PostgreSQL, this script supports connecting to remote databases +# by setting hostname, port, username, password, and dbname e.g. +# +# python tools/perf/scripts/python/exported-sql-viewer.py "hostname=myhost username=myuser password=mypassword dbname=pt_example" +# +# The result is a GUI window with a tree representing a context-sensitive +# call-graph. Expanding a couple of levels of the tree and adjusting column +# widths to suit will display something like: +# +# Call Graph: pt_example +# Call Path Object Count Time(ns) Time(%) Branch Count Branch Count(%) +# v- ls +# v- 2638:2638 +# v- _start ld-2.19.so 1 10074071 100.0 211135 100.0 +# |- unknown unknown 1 13198 0.1 1 0.0 +# >- _dl_start ld-2.19.so 1 1400980 13.9 19637 9.3 +# >- _d_linit_internal ld-2.19.so 1 448152 4.4 11094 5.3 +# v-__libc_start_main@plt ls 1 8211741 81.5 180397 85.4 +# >- _dl_fixup ld-2.19.so 1 7607 0.1 108 0.1 +# >- __cxa_atexit libc-2.19.so 1 11737 0.1 10 0.0 +# >- __libc_csu_init ls 1 10354 0.1 10 0.0 +# |- _setjmp libc-2.19.so 1 0 0.0 4 0.0 +# v- main ls 1 8182043 99.6 180254 99.9 +# +# Points to note: +# The top level is a command name (comm) +# The next level is a thread (pid:tid) +# Subsequent levels are functions +# 'Count' is the number of calls +# 'Time' is the elapsed time until the function returns +# Percentages are relative to the level above +# 'Branch Count' is the total number of branches for that function and all +# functions that it calls + +# There is also a "All branches" report, which displays branches and +# possibly disassembly. However, presently, the only supported disassembler is +# Intel XED, and additionally the object code must be present in perf build ID +# cache. To use Intel XED, libxed.so must be present. To build and install +# libxed.so: +# git clone https://github.com/intelxed/mbuild.git mbuild +# git clone https://github.com/intelxed/xed +# cd xed +# ./mfile.py --share +# sudo ./mfile.py --prefix=/usr/local install +# sudo ldconfig +# +# Example report: +# +# Time CPU Command PID TID Branch Type In Tx Branch +# 8107675239590 2 ls 22011 22011 return from interrupt No ffffffff86a00a67 native_irq_return_iret ([kernel]) -> 7fab593ea260 _start (ld-2.19.so) +# 7fab593ea260 48 89 e7 mov %rsp, %rdi +# 8107675239899 2 ls 22011 22011 hardware interrupt No 7fab593ea260 _start (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel]) +# 8107675241900 2 ls 22011 22011 return from interrupt No ffffffff86a00a67 native_irq_return_iret ([kernel]) -> 7fab593ea260 _start (ld-2.19.so) +# 7fab593ea260 48 89 e7 mov %rsp, %rdi +# 7fab593ea263 e8 c8 06 00 00 callq 0x7fab593ea930 +# 8107675241900 2 ls 22011 22011 call No 7fab593ea263 _start+0x3 (ld-2.19.so) -> 7fab593ea930 _dl_start (ld-2.19.so) +# 7fab593ea930 55 pushq %rbp +# 7fab593ea931 48 89 e5 mov %rsp, %rbp +# 7fab593ea934 41 57 pushq %r15 +# 7fab593ea936 41 56 pushq %r14 +# 7fab593ea938 41 55 pushq %r13 +# 7fab593ea93a 41 54 pushq %r12 +# 7fab593ea93c 53 pushq %rbx +# 7fab593ea93d 48 89 fb mov %rdi, %rbx +# 7fab593ea940 48 83 ec 68 sub $0x68, %rsp +# 7fab593ea944 0f 31 rdtsc +# 7fab593ea946 48 c1 e2 20 shl $0x20, %rdx +# 7fab593ea94a 89 c0 mov %eax, %eax +# 7fab593ea94c 48 09 c2 or %rax, %rdx +# 7fab593ea94f 48 8b 05 1a 15 22 00 movq 0x22151a(%rip), %rax +# 8107675242232 2 ls 22011 22011 hardware interrupt No 7fab593ea94f _dl_start+0x1f (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel]) +# 8107675242900 2 ls 22011 22011 return from interrupt No ffffffff86a00a67 native_irq_return_iret ([kernel]) -> 7fab593ea94f _dl_start+0x1f (ld-2.19.so) +# 7fab593ea94f 48 8b 05 1a 15 22 00 movq 0x22151a(%rip), %rax +# 7fab593ea956 48 89 15 3b 13 22 00 movq %rdx, 0x22133b(%rip) +# 8107675243232 2 ls 22011 22011 hardware interrupt No 7fab593ea956 _dl_start+0x26 (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel]) + +from __future__ import print_function + +import sys +# Only change warnings if the python -W option was not used +if not sys.warnoptions: + import warnings + # PySide2 causes deprecation warnings, ignore them. + warnings.filterwarnings("ignore", category=DeprecationWarning) +import argparse +import weakref +import threading +import string +try: + # Python2 + import cPickle as pickle + # size of pickled integer big enough for record size + glb_nsz = 8 +except ImportError: + import pickle + glb_nsz = 16 +import re +import os +import random +import copy +import math +from libxed import LibXED + +pyside_version_1 = True +if not "--pyside-version-1" in sys.argv: + try: + from PySide2.QtCore import * + from PySide2.QtGui import * + from PySide2.QtSql import * + from PySide2.QtWidgets import * + pyside_version_1 = False + except: + pass + +if pyside_version_1: + from PySide.QtCore import * + from PySide.QtGui import * + from PySide.QtSql import * + +from decimal import Decimal, ROUND_HALF_UP +from ctypes import CDLL, Structure, create_string_buffer, addressof, sizeof, \ + c_void_p, c_bool, c_byte, c_char, c_int, c_uint, c_longlong, c_ulonglong +from multiprocessing import Process, Array, Value, Event + +# xrange is range in Python3 +try: + xrange +except NameError: + xrange = range + +def printerr(*args, **keyword_args): + print(*args, file=sys.stderr, **keyword_args) + +# Data formatting helpers + +def tohex(ip): + if ip < 0: + ip += 1 << 64 + return "%x" % ip + +def offstr(offset): + if offset: + return "+0x%x" % offset + return "" + +def dsoname(name): + if name == "[kernel.kallsyms]": + return "[kernel]" + return name + +def findnth(s, sub, n, offs=0): + pos = s.find(sub) + if pos < 0: + return pos + if n <= 1: + return offs + pos + return findnth(s[pos + 1:], sub, n - 1, offs + pos + 1) + +# Percent to one decimal place + +def PercentToOneDP(n, d): + if not d: + return "0.0" + x = (n * Decimal(100)) / d + return str(x.quantize(Decimal(".1"), rounding=ROUND_HALF_UP)) + +# Helper for queries that must not fail + +def QueryExec(query, stmt): + ret = query.exec_(stmt) + if not ret: + raise Exception("Query failed: " + query.lastError().text()) + +# Background thread + +class Thread(QThread): + + done = Signal(object) + + def __init__(self, task, param=None, parent=None): + super(Thread, self).__init__(parent) + self.task = task + self.param = param + + def run(self): + while True: + if self.param is None: + done, result = self.task() + else: + done, result = self.task(self.param) + self.done.emit(result) + if done: + break + +# Tree data model + +class TreeModel(QAbstractItemModel): + + def __init__(self, glb, params, parent=None): + super(TreeModel, self).__init__(parent) + self.glb = glb + self.params = params + self.root = self.GetRoot() + self.last_row_read = 0 + + def Item(self, parent): + if parent.isValid(): + return parent.internalPointer() + else: + return self.root + + def rowCount(self, parent): + result = self.Item(parent).childCount() + if result < 0: + result = 0 + self.dataChanged.emit(parent, parent) + return result + + def hasChildren(self, parent): + return self.Item(parent).hasChildren() + + def headerData(self, section, orientation, role): + if role == Qt.TextAlignmentRole: + return self.columnAlignment(section) + if role != Qt.DisplayRole: + return None + if orientation != Qt.Horizontal: + return None + return self.columnHeader(section) + + def parent(self, child): + child_item = child.internalPointer() + if child_item is self.root: + return QModelIndex() + parent_item = child_item.getParentItem() + return self.createIndex(parent_item.getRow(), 0, parent_item) + + def index(self, row, column, parent): + child_item = self.Item(parent).getChildItem(row) + return self.createIndex(row, column, child_item) + + def DisplayData(self, item, index): + return item.getData(index.column()) + + def FetchIfNeeded(self, row): + if row > self.last_row_read: + self.last_row_read = row + if row + 10 >= self.root.child_count: + self.fetcher.Fetch(glb_chunk_sz) + + def columnAlignment(self, column): + return Qt.AlignLeft + + def columnFont(self, column): + return None + + def data(self, index, role): + if role == Qt.TextAlignmentRole: + return self.columnAlignment(index.column()) + if role == Qt.FontRole: + return self.columnFont(index.column()) + if role != Qt.DisplayRole: + return None + item = index.internalPointer() + return self.DisplayData(item, index) + +# Table data model + +class TableModel(QAbstractTableModel): + + def __init__(self, parent=None): + super(TableModel, self).__init__(parent) + self.child_count = 0 + self.child_items = [] + self.last_row_read = 0 + + def Item(self, parent): + if parent.isValid(): + return parent.internalPointer() + else: + return self + + def rowCount(self, parent): + return self.child_count + + def headerData(self, section, orientation, role): + if role == Qt.TextAlignmentRole: + return self.columnAlignment(section) + if role != Qt.DisplayRole: + return None + if orientation != Qt.Horizontal: + return None + return self.columnHeader(section) + + def index(self, row, column, parent): + return self.createIndex(row, column, self.child_items[row]) + + def DisplayData(self, item, index): + return item.getData(index.column()) + + def FetchIfNeeded(self, row): + if row > self.last_row_read: + self.last_row_read = row + if row + 10 >= self.child_count: + self.fetcher.Fetch(glb_chunk_sz) + + def columnAlignment(self, column): + return Qt.AlignLeft + + def columnFont(self, column): + return None + + def data(self, index, role): + if role == Qt.TextAlignmentRole: + return self.columnAlignment(index.column()) + if role == Qt.FontRole: + return self.columnFont(index.column()) + if role != Qt.DisplayRole: + return None + item = index.internalPointer() + return self.DisplayData(item, index) + +# Model cache + +model_cache = weakref.WeakValueDictionary() +model_cache_lock = threading.Lock() + +def LookupCreateModel(model_name, create_fn): + model_cache_lock.acquire() + try: + model = model_cache[model_name] + except: + model = None + if model is None: + model = create_fn() + model_cache[model_name] = model + model_cache_lock.release() + return model + +def LookupModel(model_name): + model_cache_lock.acquire() + try: + model = model_cache[model_name] + except: + model = None + model_cache_lock.release() + return model + +# Find bar + +class FindBar(): + + def __init__(self, parent, finder, is_reg_expr=False): + self.finder = finder + self.context = [] + self.last_value = None + self.last_pattern = None + + label = QLabel("Find:") + label.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed) + + self.textbox = QComboBox() + self.textbox.setEditable(True) + self.textbox.currentIndexChanged.connect(self.ValueChanged) + + self.progress = QProgressBar() + self.progress.setRange(0, 0) + self.progress.hide() + + if is_reg_expr: + self.pattern = QCheckBox("Regular Expression") + else: + self.pattern = QCheckBox("Pattern") + self.pattern.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed) + + self.next_button = QToolButton() + self.next_button.setIcon(parent.style().standardIcon(QStyle.SP_ArrowDown)) + self.next_button.released.connect(lambda: self.NextPrev(1)) + + self.prev_button = QToolButton() + self.prev_button.setIcon(parent.style().standardIcon(QStyle.SP_ArrowUp)) + self.prev_button.released.connect(lambda: self.NextPrev(-1)) + + self.close_button = QToolButton() + self.close_button.setIcon(parent.style().standardIcon(QStyle.SP_DockWidgetCloseButton)) + self.close_button.released.connect(self.Deactivate) + + self.hbox = QHBoxLayout() + self.hbox.setContentsMargins(0, 0, 0, 0) + + self.hbox.addWidget(label) + self.hbox.addWidget(self.textbox) + self.hbox.addWidget(self.progress) + self.hbox.addWidget(self.pattern) + self.hbox.addWidget(self.next_button) + self.hbox.addWidget(self.prev_button) + self.hbox.addWidget(self.close_button) + + self.bar = QWidget() + self.bar.setLayout(self.hbox) + self.bar.hide() + + def Widget(self): + return self.bar + + def Activate(self): + self.bar.show() + self.textbox.lineEdit().selectAll() + self.textbox.setFocus() + + def Deactivate(self): + self.bar.hide() + + def Busy(self): + self.textbox.setEnabled(False) + self.pattern.hide() + self.next_button.hide() + self.prev_button.hide() + self.progress.show() + + def Idle(self): + self.textbox.setEnabled(True) + self.progress.hide() + self.pattern.show() + self.next_button.show() + self.prev_button.show() + + def Find(self, direction): + value = self.textbox.currentText() + pattern = self.pattern.isChecked() + self.last_value = value + self.last_pattern = pattern + self.finder.Find(value, direction, pattern, self.context) + + def ValueChanged(self): + value = self.textbox.currentText() + pattern = self.pattern.isChecked() + index = self.textbox.currentIndex() + data = self.textbox.itemData(index) + # Store the pattern in the combo box to keep it with the text value + if data == None: + self.textbox.setItemData(index, pattern) + else: + self.pattern.setChecked(data) + self.Find(0) + + def NextPrev(self, direction): + value = self.textbox.currentText() + pattern = self.pattern.isChecked() + if value != self.last_value: + index = self.textbox.findText(value) + # Allow for a button press before the value has been added to the combo box + if index < 0: + index = self.textbox.count() + self.textbox.addItem(value, pattern) + self.textbox.setCurrentIndex(index) + return + else: + self.textbox.setItemData(index, pattern) + elif pattern != self.last_pattern: + # Keep the pattern recorded in the combo box up to date + index = self.textbox.currentIndex() + self.textbox.setItemData(index, pattern) + self.Find(direction) + + def NotFound(self): + QMessageBox.information(self.bar, "Find", "'" + self.textbox.currentText() + "' not found") + +# Context-sensitive call graph data model item base + +class CallGraphLevelItemBase(object): + + def __init__(self, glb, params, row, parent_item): + self.glb = glb + self.params = params + self.row = row + self.parent_item = parent_item + self.query_done = False + self.child_count = 0 + self.child_items = [] + if parent_item: + self.level = parent_item.level + 1 + else: + self.level = 0 + + def getChildItem(self, row): + return self.child_items[row] + + def getParentItem(self): + return self.parent_item + + def getRow(self): + return self.row + + def childCount(self): + if not self.query_done: + self.Select() + if not self.child_count: + return -1 + return self.child_count + + def hasChildren(self): + if not self.query_done: + return True + return self.child_count > 0 + + def getData(self, column): + return self.data[column] + +# Context-sensitive call graph data model level 2+ item base + +class CallGraphLevelTwoPlusItemBase(CallGraphLevelItemBase): + + def __init__(self, glb, params, row, comm_id, thread_id, call_path_id, time, insn_cnt, cyc_cnt, branch_count, parent_item): + super(CallGraphLevelTwoPlusItemBase, self).__init__(glb, params, row, parent_item) + self.comm_id = comm_id + self.thread_id = thread_id + self.call_path_id = call_path_id + self.insn_cnt = insn_cnt + self.cyc_cnt = cyc_cnt + self.branch_count = branch_count + self.time = time + + def Select(self): + self.query_done = True + query = QSqlQuery(self.glb.db) + if self.params.have_ipc: + ipc_str = ", SUM(insn_count), SUM(cyc_count)" + else: + ipc_str = "" + QueryExec(query, "SELECT call_path_id, name, short_name, COUNT(calls.id), SUM(return_time - call_time)" + ipc_str + ", SUM(branch_count)" + " FROM calls" + " INNER JOIN call_paths ON calls.call_path_id = call_paths.id" + " INNER JOIN symbols ON call_paths.symbol_id = symbols.id" + " INNER JOIN dsos ON symbols.dso_id = dsos.id" + " WHERE parent_call_path_id = " + str(self.call_path_id) + + " AND comm_id = " + str(self.comm_id) + + " AND thread_id = " + str(self.thread_id) + + " GROUP BY call_path_id, name, short_name" + " ORDER BY call_path_id") + while query.next(): + if self.params.have_ipc: + insn_cnt = int(query.value(5)) + cyc_cnt = int(query.value(6)) + branch_count = int(query.value(7)) + else: + insn_cnt = 0 + cyc_cnt = 0 + branch_count = int(query.value(5)) + child_item = CallGraphLevelThreeItem(self.glb, self.params, self.child_count, self.comm_id, self.thread_id, query.value(0), query.value(1), query.value(2), query.value(3), int(query.value(4)), insn_cnt, cyc_cnt, branch_count, self) + self.child_items.append(child_item) + self.child_count += 1 + +# Context-sensitive call graph data model level three item + +class CallGraphLevelThreeItem(CallGraphLevelTwoPlusItemBase): + + def __init__(self, glb, params, row, comm_id, thread_id, call_path_id, name, dso, count, time, insn_cnt, cyc_cnt, branch_count, parent_item): + super(CallGraphLevelThreeItem, self).__init__(glb, params, row, comm_id, thread_id, call_path_id, time, insn_cnt, cyc_cnt, branch_count, parent_item) + dso = dsoname(dso) + if self.params.have_ipc: + insn_pcnt = PercentToOneDP(insn_cnt, parent_item.insn_cnt) + cyc_pcnt = PercentToOneDP(cyc_cnt, parent_item.cyc_cnt) + br_pcnt = PercentToOneDP(branch_count, parent_item.branch_count) + ipc = CalcIPC(cyc_cnt, insn_cnt) + self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(insn_cnt), insn_pcnt, str(cyc_cnt), cyc_pcnt, ipc, str(branch_count), br_pcnt ] + else: + self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(branch_count), PercentToOneDP(branch_count, parent_item.branch_count) ] + self.dbid = call_path_id + +# Context-sensitive call graph data model level two item + +class CallGraphLevelTwoItem(CallGraphLevelTwoPlusItemBase): + + def __init__(self, glb, params, row, comm_id, thread_id, pid, tid, parent_item): + super(CallGraphLevelTwoItem, self).__init__(glb, params, row, comm_id, thread_id, 1, 0, 0, 0, 0, parent_item) + if self.params.have_ipc: + self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", "", "", "", "", "", ""] + else: + self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", ""] + self.dbid = thread_id + + def Select(self): + super(CallGraphLevelTwoItem, self).Select() + for child_item in self.child_items: + self.time += child_item.time + self.insn_cnt += child_item.insn_cnt + self.cyc_cnt += child_item.cyc_cnt + self.branch_count += child_item.branch_count + for child_item in self.child_items: + child_item.data[4] = PercentToOneDP(child_item.time, self.time) + if self.params.have_ipc: + child_item.data[6] = PercentToOneDP(child_item.insn_cnt, self.insn_cnt) + child_item.data[8] = PercentToOneDP(child_item.cyc_cnt, self.cyc_cnt) + child_item.data[11] = PercentToOneDP(child_item.branch_count, self.branch_count) + else: + child_item.data[6] = PercentToOneDP(child_item.branch_count, self.branch_count) + +# Context-sensitive call graph data model level one item + +class CallGraphLevelOneItem(CallGraphLevelItemBase): + + def __init__(self, glb, params, row, comm_id, comm, parent_item): + super(CallGraphLevelOneItem, self).__init__(glb, params, row, parent_item) + if self.params.have_ipc: + self.data = [comm, "", "", "", "", "", "", "", "", "", "", ""] + else: + self.data = [comm, "", "", "", "", "", ""] + self.dbid = comm_id + + def Select(self): + self.query_done = True + query = QSqlQuery(self.glb.db) + QueryExec(query, "SELECT thread_id, pid, tid" + " FROM comm_threads" + " INNER JOIN threads ON thread_id = threads.id" + " WHERE comm_id = " + str(self.dbid)) + while query.next(): + child_item = CallGraphLevelTwoItem(self.glb, self.params, self.child_count, self.dbid, query.value(0), query.value(1), query.value(2), self) + self.child_items.append(child_item) + self.child_count += 1 + +# Context-sensitive call graph data model root item + +class CallGraphRootItem(CallGraphLevelItemBase): + + def __init__(self, glb, params): + super(CallGraphRootItem, self).__init__(glb, params, 0, None) + self.dbid = 0 + self.query_done = True + if_has_calls = "" + if IsSelectable(glb.db, "comms", columns = "has_calls"): + if_has_calls = " WHERE has_calls = " + glb.dbref.TRUE + query = QSqlQuery(glb.db) + QueryExec(query, "SELECT id, comm FROM comms" + if_has_calls) + while query.next(): + if not query.value(0): + continue + child_item = CallGraphLevelOneItem(glb, params, self.child_count, query.value(0), query.value(1), self) + self.child_items.append(child_item) + self.child_count += 1 + +# Call graph model parameters + +class CallGraphModelParams(): + + def __init__(self, glb, parent=None): + self.have_ipc = IsSelectable(glb.db, "calls", columns = "insn_count, cyc_count") + +# Context-sensitive call graph data model base + +class CallGraphModelBase(TreeModel): + + def __init__(self, glb, parent=None): + super(CallGraphModelBase, self).__init__(glb, CallGraphModelParams(glb), parent) + + def FindSelect(self, value, pattern, query): + if pattern: + # postgresql and sqlite pattern patching differences: + # postgresql LIKE is case sensitive but sqlite LIKE is not + # postgresql LIKE allows % and _ to be escaped with \ but sqlite LIKE does not + # postgresql supports ILIKE which is case insensitive + # sqlite supports GLOB (text only) which uses * and ? and is case sensitive + if not self.glb.dbref.is_sqlite3: + # Escape % and _ + s = value.replace("%", "\%") + s = s.replace("_", "\_") + # Translate * and ? into SQL LIKE pattern characters % and _ + trans = string.maketrans("*?", "%_") + match = " LIKE '" + str(s).translate(trans) + "'" + else: + match = " GLOB '" + str(value) + "'" + else: + match = " = '" + str(value) + "'" + self.DoFindSelect(query, match) + + def Found(self, query, found): + if found: + return self.FindPath(query) + return [] + + def FindValue(self, value, pattern, query, last_value, last_pattern): + if last_value == value and pattern == last_pattern: + found = query.first() + else: + self.FindSelect(value, pattern, query) + found = query.next() + return self.Found(query, found) + + def FindNext(self, query): + found = query.next() + if not found: + found = query.first() + return self.Found(query, found) + + def FindPrev(self, query): + found = query.previous() + if not found: + found = query.last() + return self.Found(query, found) + + def FindThread(self, c): + if c.direction == 0 or c.value != c.last_value or c.pattern != c.last_pattern: + ids = self.FindValue(c.value, c.pattern, c.query, c.last_value, c.last_pattern) + elif c.direction > 0: + ids = self.FindNext(c.query) + else: + ids = self.FindPrev(c.query) + return (True, ids) + + def Find(self, value, direction, pattern, context, callback): + class Context(): + def __init__(self, *x): + self.value, self.direction, self.pattern, self.query, self.last_value, self.last_pattern = x + def Update(self, *x): + self.value, self.direction, self.pattern, self.last_value, self.last_pattern = x + (self.value, self.pattern) + if len(context): + context[0].Update(value, direction, pattern) + else: + context.append(Context(value, direction, pattern, QSqlQuery(self.glb.db), None, None)) + # Use a thread so the UI is not blocked during the SELECT + thread = Thread(self.FindThread, context[0]) + thread.done.connect(lambda ids, t=thread, c=callback: self.FindDone(t, c, ids), Qt.QueuedConnection) + thread.start() + + def FindDone(self, thread, callback, ids): + callback(ids) + +# Context-sensitive call graph data model + +class CallGraphModel(CallGraphModelBase): + + def __init__(self, glb, parent=None): + super(CallGraphModel, self).__init__(glb, parent) + + def GetRoot(self): + return CallGraphRootItem(self.glb, self.params) + + def columnCount(self, parent=None): + if self.params.have_ipc: + return 12 + else: + return 7 + + def columnHeader(self, column): + if self.params.have_ipc: + headers = ["Call Path", "Object", "Count ", "Time (ns) ", "Time (%) ", "Insn Cnt", "Insn Cnt (%)", "Cyc Cnt", "Cyc Cnt (%)", "IPC", "Branch Count ", "Branch Count (%) "] + else: + headers = ["Call Path", "Object", "Count ", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "] + return headers[column] + + def columnAlignment(self, column): + if self.params.have_ipc: + alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ] + else: + alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ] + return alignment[column] + + def DoFindSelect(self, query, match): + QueryExec(query, "SELECT call_path_id, comm_id, thread_id" + " FROM calls" + " INNER JOIN call_paths ON calls.call_path_id = call_paths.id" + " INNER JOIN symbols ON call_paths.symbol_id = symbols.id" + " WHERE calls.id <> 0" + " AND symbols.name" + match + + " GROUP BY comm_id, thread_id, call_path_id" + " ORDER BY comm_id, thread_id, call_path_id") + + def FindPath(self, query): + # Turn the query result into a list of ids that the tree view can walk + # to open the tree at the right place. + ids = [] + parent_id = query.value(0) + while parent_id: + ids.insert(0, parent_id) + q2 = QSqlQuery(self.glb.db) + QueryExec(q2, "SELECT parent_id" + " FROM call_paths" + " WHERE id = " + str(parent_id)) + if not q2.next(): + break + parent_id = q2.value(0) + # The call path root is not used + if ids[0] == 1: + del ids[0] + ids.insert(0, query.value(2)) + ids.insert(0, query.value(1)) + return ids + +# Call tree data model level 2+ item base + +class CallTreeLevelTwoPlusItemBase(CallGraphLevelItemBase): + + def __init__(self, glb, params, row, comm_id, thread_id, calls_id, call_time, time, insn_cnt, cyc_cnt, branch_count, parent_item): + super(CallTreeLevelTwoPlusItemBase, self).__init__(glb, params, row, parent_item) + self.comm_id = comm_id + self.thread_id = thread_id + self.calls_id = calls_id + self.call_time = call_time + self.time = time + self.insn_cnt = insn_cnt + self.cyc_cnt = cyc_cnt + self.branch_count = branch_count + + def Select(self): + self.query_done = True + if self.calls_id == 0: + comm_thread = " AND comm_id = " + str(self.comm_id) + " AND thread_id = " + str(self.thread_id) + else: + comm_thread = "" + if self.params.have_ipc: + ipc_str = ", insn_count, cyc_count" + else: + ipc_str = "" + query = QSqlQuery(self.glb.db) + QueryExec(query, "SELECT calls.id, name, short_name, call_time, return_time - call_time" + ipc_str + ", branch_count" + " FROM calls" + " INNER JOIN call_paths ON calls.call_path_id = call_paths.id" + " INNER JOIN symbols ON call_paths.symbol_id = symbols.id" + " INNER JOIN dsos ON symbols.dso_id = dsos.id" + " WHERE calls.parent_id = " + str(self.calls_id) + comm_thread + + " ORDER BY call_time, calls.id") + while query.next(): + if self.params.have_ipc: + insn_cnt = int(query.value(5)) + cyc_cnt = int(query.value(6)) + branch_count = int(query.value(7)) + else: + insn_cnt = 0 + cyc_cnt = 0 + branch_count = int(query.value(5)) + child_item = CallTreeLevelThreeItem(self.glb, self.params, self.child_count, self.comm_id, self.thread_id, query.value(0), query.value(1), query.value(2), query.value(3), int(query.value(4)), insn_cnt, cyc_cnt, branch_count, self) + self.child_items.append(child_item) + self.child_count += 1 + +# Call tree data model level three item + +class CallTreeLevelThreeItem(CallTreeLevelTwoPlusItemBase): + + def __init__(self, glb, params, row, comm_id, thread_id, calls_id, name, dso, call_time, time, insn_cnt, cyc_cnt, branch_count, parent_item): + super(CallTreeLevelThreeItem, self).__init__(glb, params, row, comm_id, thread_id, calls_id, call_time, time, insn_cnt, cyc_cnt, branch_count, parent_item) + dso = dsoname(dso) + if self.params.have_ipc: + insn_pcnt = PercentToOneDP(insn_cnt, parent_item.insn_cnt) + cyc_pcnt = PercentToOneDP(cyc_cnt, parent_item.cyc_cnt) + br_pcnt = PercentToOneDP(branch_count, parent_item.branch_count) + ipc = CalcIPC(cyc_cnt, insn_cnt) + self.data = [ name, dso, str(call_time), str(time), PercentToOneDP(time, parent_item.time), str(insn_cnt), insn_pcnt, str(cyc_cnt), cyc_pcnt, ipc, str(branch_count), br_pcnt ] + else: + self.data = [ name, dso, str(call_time), str(time), PercentToOneDP(time, parent_item.time), str(branch_count), PercentToOneDP(branch_count, parent_item.branch_count) ] + self.dbid = calls_id + +# Call tree data model level two item + +class CallTreeLevelTwoItem(CallTreeLevelTwoPlusItemBase): + + def __init__(self, glb, params, row, comm_id, thread_id, pid, tid, parent_item): + super(CallTreeLevelTwoItem, self).__init__(glb, params, row, comm_id, thread_id, 0, 0, 0, 0, 0, 0, parent_item) + if self.params.have_ipc: + self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", "", "", "", "", "", ""] + else: + self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", ""] + self.dbid = thread_id + + def Select(self): + super(CallTreeLevelTwoItem, self).Select() + for child_item in self.child_items: + self.time += child_item.time + self.insn_cnt += child_item.insn_cnt + self.cyc_cnt += child_item.cyc_cnt + self.branch_count += child_item.branch_count + for child_item in self.child_items: + child_item.data[4] = PercentToOneDP(child_item.time, self.time) + if self.params.have_ipc: + child_item.data[6] = PercentToOneDP(child_item.insn_cnt, self.insn_cnt) + child_item.data[8] = PercentToOneDP(child_item.cyc_cnt, self.cyc_cnt) + child_item.data[11] = PercentToOneDP(child_item.branch_count, self.branch_count) + else: + child_item.data[6] = PercentToOneDP(child_item.branch_count, self.branch_count) + +# Call tree data model level one item + +class CallTreeLevelOneItem(CallGraphLevelItemBase): + + def __init__(self, glb, params, row, comm_id, comm, parent_item): + super(CallTreeLevelOneItem, self).__init__(glb, params, row, parent_item) + if self.params.have_ipc: + self.data = [comm, "", "", "", "", "", "", "", "", "", "", ""] + else: + self.data = [comm, "", "", "", "", "", ""] + self.dbid = comm_id + + def Select(self): + self.query_done = True + query = QSqlQuery(self.glb.db) + QueryExec(query, "SELECT thread_id, pid, tid" + " FROM comm_threads" + " INNER JOIN threads ON thread_id = threads.id" + " WHERE comm_id = " + str(self.dbid)) + while query.next(): + child_item = CallTreeLevelTwoItem(self.glb, self.params, self.child_count, self.dbid, query.value(0), query.value(1), query.value(2), self) + self.child_items.append(child_item) + self.child_count += 1 + +# Call tree data model root item + +class CallTreeRootItem(CallGraphLevelItemBase): + + def __init__(self, glb, params): + super(CallTreeRootItem, self).__init__(glb, params, 0, None) + self.dbid = 0 + self.query_done = True + if_has_calls = "" + if IsSelectable(glb.db, "comms", columns = "has_calls"): + if_has_calls = " WHERE has_calls = " + glb.dbref.TRUE + query = QSqlQuery(glb.db) + QueryExec(query, "SELECT id, comm FROM comms" + if_has_calls) + while query.next(): + if not query.value(0): + continue + child_item = CallTreeLevelOneItem(glb, params, self.child_count, query.value(0), query.value(1), self) + self.child_items.append(child_item) + self.child_count += 1 + +# Call Tree data model + +class CallTreeModel(CallGraphModelBase): + + def __init__(self, glb, parent=None): + super(CallTreeModel, self).__init__(glb, parent) + + def GetRoot(self): + return CallTreeRootItem(self.glb, self.params) + + def columnCount(self, parent=None): + if self.params.have_ipc: + return 12 + else: + return 7 + + def columnHeader(self, column): + if self.params.have_ipc: + headers = ["Call Path", "Object", "Call Time", "Time (ns) ", "Time (%) ", "Insn Cnt", "Insn Cnt (%)", "Cyc Cnt", "Cyc Cnt (%)", "IPC", "Branch Count ", "Branch Count (%) "] + else: + headers = ["Call Path", "Object", "Call Time", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "] + return headers[column] + + def columnAlignment(self, column): + if self.params.have_ipc: + alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ] + else: + alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ] + return alignment[column] + + def DoFindSelect(self, query, match): + QueryExec(query, "SELECT calls.id, comm_id, thread_id" + " FROM calls" + " INNER JOIN call_paths ON calls.call_path_id = call_paths.id" + " INNER JOIN symbols ON call_paths.symbol_id = symbols.id" + " WHERE calls.id <> 0" + " AND symbols.name" + match + + " ORDER BY comm_id, thread_id, call_time, calls.id") + + def FindPath(self, query): + # Turn the query result into a list of ids that the tree view can walk + # to open the tree at the right place. + ids = [] + parent_id = query.value(0) + while parent_id: + ids.insert(0, parent_id) + q2 = QSqlQuery(self.glb.db) + QueryExec(q2, "SELECT parent_id" + " FROM calls" + " WHERE id = " + str(parent_id)) + if not q2.next(): + break + parent_id = q2.value(0) + ids.insert(0, query.value(2)) + ids.insert(0, query.value(1)) + return ids + +# Vertical layout + +class HBoxLayout(QHBoxLayout): + + def __init__(self, *children): + super(HBoxLayout, self).__init__() + + self.layout().setContentsMargins(0, 0, 0, 0) + for child in children: + if child.isWidgetType(): + self.layout().addWidget(child) + else: + self.layout().addLayout(child) + +# Horizontal layout + +class VBoxLayout(QVBoxLayout): + + def __init__(self, *children): + super(VBoxLayout, self).__init__() + + self.layout().setContentsMargins(0, 0, 0, 0) + for child in children: + if child.isWidgetType(): + self.layout().addWidget(child) + else: + self.layout().addLayout(child) + +# Vertical layout widget + +class VBox(): + + def __init__(self, *children): + self.vbox = QWidget() + self.vbox.setLayout(VBoxLayout(*children)) + + def Widget(self): + return self.vbox + +# Tree window base + +class TreeWindowBase(QMdiSubWindow): + + def __init__(self, parent=None): + super(TreeWindowBase, self).__init__(parent) + + self.model = None + self.find_bar = None + + self.view = QTreeView() + self.view.setSelectionMode(QAbstractItemView.ContiguousSelection) + self.view.CopyCellsToClipboard = CopyTreeCellsToClipboard + + self.context_menu = TreeContextMenu(self.view) + + def DisplayFound(self, ids): + if not len(ids): + return False + parent = QModelIndex() + for dbid in ids: + found = False + n = self.model.rowCount(parent) + for row in xrange(n): + child = self.model.index(row, 0, parent) + if child.internalPointer().dbid == dbid: + found = True + self.view.setExpanded(parent, True) + self.view.setCurrentIndex(child) + parent = child + break + if not found: + break + return found + + def Find(self, value, direction, pattern, context): + self.view.setFocus() + self.find_bar.Busy() + self.model.Find(value, direction, pattern, context, self.FindDone) + + def FindDone(self, ids): + found = True + if not self.DisplayFound(ids): + found = False + self.find_bar.Idle() + if not found: + self.find_bar.NotFound() + + +# Context-sensitive call graph window + +class CallGraphWindow(TreeWindowBase): + + def __init__(self, glb, parent=None): + super(CallGraphWindow, self).__init__(parent) + + self.model = LookupCreateModel("Context-Sensitive Call Graph", lambda x=glb: CallGraphModel(x)) + + self.view.setModel(self.model) + + for c, w in ((0, 250), (1, 100), (2, 60), (3, 70), (4, 70), (5, 100)): + self.view.setColumnWidth(c, w) + + self.find_bar = FindBar(self, self) + + self.vbox = VBox(self.view, self.find_bar.Widget()) + + self.setWidget(self.vbox.Widget()) + + AddSubWindow(glb.mainwindow.mdi_area, self, "Context-Sensitive Call Graph") + +# Call tree window + +class CallTreeWindow(TreeWindowBase): + + def __init__(self, glb, parent=None, thread_at_time=None): + super(CallTreeWindow, self).__init__(parent) + + self.model = LookupCreateModel("Call Tree", lambda x=glb: CallTreeModel(x)) + + self.view.setModel(self.model) + + for c, w in ((0, 230), (1, 100), (2, 100), (3, 70), (4, 70), (5, 100)): + self.view.setColumnWidth(c, w) + + self.find_bar = FindBar(self, self) + + self.vbox = VBox(self.view, self.find_bar.Widget()) + + self.setWidget(self.vbox.Widget()) + + AddSubWindow(glb.mainwindow.mdi_area, self, "Call Tree") + + if thread_at_time: + self.DisplayThreadAtTime(*thread_at_time) + + def DisplayThreadAtTime(self, comm_id, thread_id, time): + parent = QModelIndex() + for dbid in (comm_id, thread_id): + found = False + n = self.model.rowCount(parent) + for row in xrange(n): + child = self.model.index(row, 0, parent) + if child.internalPointer().dbid == dbid: + found = True + self.view.setExpanded(parent, True) + self.view.setCurrentIndex(child) + parent = child + break + if not found: + return + found = False + while True: + n = self.model.rowCount(parent) + if not n: + return + last_child = None + for row in xrange(n): + self.view.setExpanded(parent, True) + child = self.model.index(row, 0, parent) + child_call_time = child.internalPointer().call_time + if child_call_time < time: + last_child = child + elif child_call_time == time: + self.view.setCurrentIndex(child) + return + elif child_call_time > time: + break + if not last_child: + if not found: + child = self.model.index(0, 0, parent) + self.view.setExpanded(parent, True) + self.view.setCurrentIndex(child) + return + found = True + self.view.setExpanded(parent, True) + self.view.setCurrentIndex(last_child) + parent = last_child + +# ExecComm() gets the comm_id of the command string that was set when the process exec'd i.e. the program name + +def ExecComm(db, thread_id, time): + query = QSqlQuery(db) + QueryExec(query, "SELECT comm_threads.comm_id, comms.c_time, comms.exec_flag" + " FROM comm_threads" + " INNER JOIN comms ON comms.id = comm_threads.comm_id" + " WHERE comm_threads.thread_id = " + str(thread_id) + + " ORDER BY comms.c_time, comms.id") + first = None + last = None + while query.next(): + if first is None: + first = query.value(0) + if query.value(2) and Decimal(query.value(1)) <= Decimal(time): + last = query.value(0) + if not(last is None): + return last + return first + +# Container for (x, y) data + +class XY(): + def __init__(self, x=0, y=0): + self.x = x + self.y = y + + def __str__(self): + return "XY({}, {})".format(str(self.x), str(self.y)) + +# Container for sub-range data + +class Subrange(): + def __init__(self, lo=0, hi=0): + self.lo = lo + self.hi = hi + + def __str__(self): + return "Subrange({}, {})".format(str(self.lo), str(self.hi)) + +# Graph data region base class + +class GraphDataRegion(object): + + def __init__(self, key, title = "", ordinal = ""): + self.key = key + self.title = title + self.ordinal = ordinal + +# Function to sort GraphDataRegion + +def GraphDataRegionOrdinal(data_region): + return data_region.ordinal + +# Attributes for a graph region + +class GraphRegionAttribute(): + + def __init__(self, colour): + self.colour = colour + +# Switch graph data region represents a task + +class SwitchGraphDataRegion(GraphDataRegion): + + def __init__(self, key, exec_comm_id, pid, tid, comm, thread_id, comm_id): + super(SwitchGraphDataRegion, self).__init__(key) + + self.title = str(pid) + " / " + str(tid) + " " + comm + # Order graph legend within exec comm by pid / tid / time + self.ordinal = str(pid).rjust(16) + str(exec_comm_id).rjust(8) + str(tid).rjust(16) + self.exec_comm_id = exec_comm_id + self.pid = pid + self.tid = tid + self.comm = comm + self.thread_id = thread_id + self.comm_id = comm_id + +# Graph data point + +class GraphDataPoint(): + + def __init__(self, data, index, x, y, altx=None, alty=None, hregion=None, vregion=None): + self.data = data + self.index = index + self.x = x + self.y = y + self.altx = altx + self.alty = alty + self.hregion = hregion + self.vregion = vregion + +# Graph data (single graph) base class + +class GraphData(object): + + def __init__(self, collection, xbase=Decimal(0), ybase=Decimal(0)): + self.collection = collection + self.points = [] + self.xbase = xbase + self.ybase = ybase + self.title = "" + + def AddPoint(self, x, y, altx=None, alty=None, hregion=None, vregion=None): + index = len(self.points) + + x = float(Decimal(x) - self.xbase) + y = float(Decimal(y) - self.ybase) + + self.points.append(GraphDataPoint(self, index, x, y, altx, alty, hregion, vregion)) + + def XToData(self, x): + return Decimal(x) + self.xbase + + def YToData(self, y): + return Decimal(y) + self.ybase + +# Switch graph data (for one CPU) + +class SwitchGraphData(GraphData): + + def __init__(self, db, collection, cpu, xbase): + super(SwitchGraphData, self).__init__(collection, xbase) + + self.cpu = cpu + self.title = "CPU " + str(cpu) + self.SelectSwitches(db) + + def SelectComms(self, db, thread_id, last_comm_id, start_time, end_time): + query = QSqlQuery(db) + QueryExec(query, "SELECT id, c_time" + " FROM comms" + " WHERE c_thread_id = " + str(thread_id) + + " AND exec_flag = " + self.collection.glb.dbref.TRUE + + " AND c_time >= " + str(start_time) + + " AND c_time <= " + str(end_time) + + " ORDER BY c_time, id") + while query.next(): + comm_id = query.value(0) + if comm_id == last_comm_id: + continue + time = query.value(1) + hregion = self.HRegion(db, thread_id, comm_id, time) + self.AddPoint(time, 1000, None, None, hregion) + + def SelectSwitches(self, db): + last_time = None + last_comm_id = None + last_thread_id = None + query = QSqlQuery(db) + QueryExec(query, "SELECT time, thread_out_id, thread_in_id, comm_out_id, comm_in_id, flags" + " FROM context_switches" + " WHERE machine_id = " + str(self.collection.machine_id) + + " AND cpu = " + str(self.cpu) + + " ORDER BY time, id") + while query.next(): + flags = int(query.value(5)) + if flags & 1: + # Schedule-out: detect and add exec's + if last_thread_id == query.value(1) and last_comm_id is not None and last_comm_id != query.value(3): + self.SelectComms(db, last_thread_id, last_comm_id, last_time, query.value(0)) + continue + # Schedule-in: add data point + if len(self.points) == 0: + start_time = self.collection.glb.StartTime(self.collection.machine_id) + hregion = self.HRegion(db, query.value(1), query.value(3), start_time) + self.AddPoint(start_time, 1000, None, None, hregion) + time = query.value(0) + comm_id = query.value(4) + thread_id = query.value(2) + hregion = self.HRegion(db, thread_id, comm_id, time) + self.AddPoint(time, 1000, None, None, hregion) + last_time = time + last_comm_id = comm_id + last_thread_id = thread_id + + def NewHRegion(self, db, key, thread_id, comm_id, time): + exec_comm_id = ExecComm(db, thread_id, time) + query = QSqlQuery(db) + QueryExec(query, "SELECT pid, tid FROM threads WHERE id = " + str(thread_id)) + if query.next(): + pid = query.value(0) + tid = query.value(1) + else: + pid = -1 + tid = -1 + query = QSqlQuery(db) + QueryExec(query, "SELECT comm FROM comms WHERE id = " + str(comm_id)) + if query.next(): + comm = query.value(0) + else: + comm = "" + return SwitchGraphDataRegion(key, exec_comm_id, pid, tid, comm, thread_id, comm_id) + + def HRegion(self, db, thread_id, comm_id, time): + key = str(thread_id) + ":" + str(comm_id) + hregion = self.collection.LookupHRegion(key) + if hregion is None: + hregion = self.NewHRegion(db, key, thread_id, comm_id, time) + self.collection.AddHRegion(key, hregion) + return hregion + +# Graph data collection (multiple related graphs) base class + +class GraphDataCollection(object): + + def __init__(self, glb): + self.glb = glb + self.data = [] + self.hregions = {} + self.xrangelo = None + self.xrangehi = None + self.yrangelo = None + self.yrangehi = None + self.dp = XY(0, 0) + + def AddGraphData(self, data): + self.data.append(data) + + def LookupHRegion(self, key): + if key in self.hregions: + return self.hregions[key] + return None + + def AddHRegion(self, key, hregion): + self.hregions[key] = hregion + +# Switch graph data collection (SwitchGraphData for each CPU) + +class SwitchGraphDataCollection(GraphDataCollection): + + def __init__(self, glb, db, machine_id): + super(SwitchGraphDataCollection, self).__init__(glb) + + self.machine_id = machine_id + self.cpus = self.SelectCPUs(db) + + self.xrangelo = glb.StartTime(machine_id) + self.xrangehi = glb.FinishTime(machine_id) + + self.yrangelo = Decimal(0) + self.yrangehi = Decimal(1000) + + for cpu in self.cpus: + self.AddGraphData(SwitchGraphData(db, self, cpu, self.xrangelo)) + + def SelectCPUs(self, db): + cpus = [] + query = QSqlQuery(db) + QueryExec(query, "SELECT DISTINCT cpu" + " FROM context_switches" + " WHERE machine_id = " + str(self.machine_id)) + while query.next(): + cpus.append(int(query.value(0))) + return sorted(cpus) + +# Switch graph data graphics item displays the graphed data + +class SwitchGraphDataGraphicsItem(QGraphicsItem): + + def __init__(self, data, graph_width, graph_height, attrs, event_handler, parent=None): + super(SwitchGraphDataGraphicsItem, self).__init__(parent) + + self.data = data + self.graph_width = graph_width + self.graph_height = graph_height + self.attrs = attrs + self.event_handler = event_handler + self.setAcceptHoverEvents(True) + + def boundingRect(self): + return QRectF(0, 0, self.graph_width, self.graph_height) + + def PaintPoint(self, painter, last, x): + if not(last is None or last.hregion.pid == 0 or x < self.attrs.subrange.x.lo): + if last.x < self.attrs.subrange.x.lo: + x0 = self.attrs.subrange.x.lo + else: + x0 = last.x + if x > self.attrs.subrange.x.hi: + x1 = self.attrs.subrange.x.hi + else: + x1 = x - 1 + x0 = self.attrs.XToPixel(x0) + x1 = self.attrs.XToPixel(x1) + + y0 = self.attrs.YToPixel(last.y) + + colour = self.attrs.region_attributes[last.hregion.key].colour + + width = x1 - x0 + 1 + if width < 2: + painter.setPen(colour) + painter.drawLine(x0, self.graph_height - y0, x0, self.graph_height) + else: + painter.fillRect(x0, self.graph_height - y0, width, self.graph_height - 1, colour) + + def paint(self, painter, option, widget): + last = None + for point in self.data.points: + self.PaintPoint(painter, last, point.x) + if point.x > self.attrs.subrange.x.hi: + break; + last = point + self.PaintPoint(painter, last, self.attrs.subrange.x.hi + 1) + + def BinarySearchPoint(self, target): + lower_pos = 0 + higher_pos = len(self.data.points) + while True: + pos = int((lower_pos + higher_pos) / 2) + val = self.data.points[pos].x + if target >= val: + lower_pos = pos + else: + higher_pos = pos + if higher_pos <= lower_pos + 1: + return lower_pos + + def XPixelToData(self, x): + x = self.attrs.PixelToX(x) + if x < self.data.points[0].x: + x = 0 + pos = 0 + low = True + else: + pos = self.BinarySearchPoint(x) + low = False + return (low, pos, self.data.XToData(x)) + + def EventToData(self, event): + no_data = (None,) * 4 + if len(self.data.points) < 1: + return no_data + x = event.pos().x() + if x < 0: + return no_data + low0, pos0, time_from = self.XPixelToData(x) + low1, pos1, time_to = self.XPixelToData(x + 1) + hregions = set() + hregion_times = [] + if not low1: + for i in xrange(pos0, pos1 + 1): + hregion = self.data.points[i].hregion + hregions.add(hregion) + if i == pos0: + time = time_from + else: + time = self.data.XToData(self.data.points[i].x) + hregion_times.append((hregion, time)) + return (time_from, time_to, hregions, hregion_times) + + def hoverMoveEvent(self, event): + time_from, time_to, hregions, hregion_times = self.EventToData(event) + if time_from is not None: + self.event_handler.PointEvent(self.data.cpu, time_from, time_to, hregions) + + def hoverLeaveEvent(self, event): + self.event_handler.NoPointEvent() + + def mousePressEvent(self, event): + if event.button() != Qt.RightButton: + super(SwitchGraphDataGraphicsItem, self).mousePressEvent(event) + return + time_from, time_to, hregions, hregion_times = self.EventToData(event) + if hregion_times: + self.event_handler.RightClickEvent(self.data.cpu, hregion_times, event.screenPos()) + +# X-axis graphics item + +class XAxisGraphicsItem(QGraphicsItem): + + def __init__(self, width, parent=None): + super(XAxisGraphicsItem, self).__init__(parent) + + self.width = width + self.max_mark_sz = 4 + self.height = self.max_mark_sz + 1 + + def boundingRect(self): + return QRectF(0, 0, self.width, self.height) + + def Step(self): + attrs = self.parentItem().attrs + subrange = attrs.subrange.x + t = subrange.hi - subrange.lo + s = (3.0 * t) / self.width + n = 1.0 + while s > n: + n = n * 10.0 + return n + + def PaintMarks(self, painter, at_y, lo, hi, step, i): + attrs = self.parentItem().attrs + x = lo + while x <= hi: + xp = attrs.XToPixel(x) + if i % 10: + if i % 5: + sz = 1 + else: + sz = 2 + else: + sz = self.max_mark_sz + i = 0 + painter.drawLine(xp, at_y, xp, at_y + sz) + x += step + i += 1 + + def paint(self, painter, option, widget): + # Using QPainter::drawLine(int x1, int y1, int x2, int y2) so x2 = width -1 + painter.drawLine(0, 0, self.width - 1, 0) + n = self.Step() + attrs = self.parentItem().attrs + subrange = attrs.subrange.x + if subrange.lo: + x_offset = n - (subrange.lo % n) + else: + x_offset = 0.0 + x = subrange.lo + x_offset + i = (x / n) % 10 + self.PaintMarks(painter, 0, x, subrange.hi, n, i) + + def ScaleDimensions(self): + n = self.Step() + attrs = self.parentItem().attrs + lo = attrs.subrange.x.lo + hi = (n * 10.0) + lo + width = attrs.XToPixel(hi) + if width > 500: + width = 0 + return (n, lo, hi, width) + + def PaintScale(self, painter, at_x, at_y): + n, lo, hi, width = self.ScaleDimensions() + if not width: + return + painter.drawLine(at_x, at_y, at_x + width, at_y) + self.PaintMarks(painter, at_y, lo, hi, n, 0) + + def ScaleWidth(self): + n, lo, hi, width = self.ScaleDimensions() + return width + + def ScaleHeight(self): + return self.height + + def ScaleUnit(self): + return self.Step() * 10 + +# Scale graphics item base class + +class ScaleGraphicsItem(QGraphicsItem): + + def __init__(self, axis, parent=None): + super(ScaleGraphicsItem, self).__init__(parent) + self.axis = axis + + def boundingRect(self): + scale_width = self.axis.ScaleWidth() + if not scale_width: + return QRectF() + return QRectF(0, 0, self.axis.ScaleWidth() + 100, self.axis.ScaleHeight()) + + def paint(self, painter, option, widget): + scale_width = self.axis.ScaleWidth() + if not scale_width: + return + self.axis.PaintScale(painter, 0, 5) + x = scale_width + 4 + painter.drawText(QPointF(x, 10), self.Text()) + + def Unit(self): + return self.axis.ScaleUnit() + + def Text(self): + return "" + +# Switch graph scale graphics item + +class SwitchScaleGraphicsItem(ScaleGraphicsItem): + + def __init__(self, axis, parent=None): + super(SwitchScaleGraphicsItem, self).__init__(axis, parent) + + def Text(self): + unit = self.Unit() + if unit >= 1000000000: + unit = int(unit / 1000000000) + us = "s" + elif unit >= 1000000: + unit = int(unit / 1000000) + us = "ms" + elif unit >= 1000: + unit = int(unit / 1000) + us = "us" + else: + unit = int(unit) + us = "ns" + return " = " + str(unit) + " " + us + +# Switch graph graphics item contains graph title, scale, x/y-axis, and the graphed data + +class SwitchGraphGraphicsItem(QGraphicsItem): + + def __init__(self, collection, data, attrs, event_handler, first, parent=None): + super(SwitchGraphGraphicsItem, self).__init__(parent) + self.collection = collection + self.data = data + self.attrs = attrs + self.event_handler = event_handler + + margin = 20 + title_width = 50 + + self.title_graphics = QGraphicsSimpleTextItem(data.title, self) + + self.title_graphics.setPos(margin, margin) + graph_width = attrs.XToPixel(attrs.subrange.x.hi) + 1 + graph_height = attrs.YToPixel(attrs.subrange.y.hi) + 1 + + self.graph_origin_x = margin + title_width + margin + self.graph_origin_y = graph_height + margin + + x_axis_size = 1 + y_axis_size = 1 + self.yline = QGraphicsLineItem(0, 0, 0, graph_height, self) + + self.x_axis = XAxisGraphicsItem(graph_width, self) + self.x_axis.setPos(self.graph_origin_x, self.graph_origin_y + 1) + + if first: + self.scale_item = SwitchScaleGraphicsItem(self.x_axis, self) + self.scale_item.setPos(self.graph_origin_x, self.graph_origin_y + 10) + + self.yline.setPos(self.graph_origin_x - y_axis_size, self.graph_origin_y - graph_height) + + self.axis_point = QGraphicsLineItem(0, 0, 0, 0, self) + self.axis_point.setPos(self.graph_origin_x - 1, self.graph_origin_y +1) + + self.width = self.graph_origin_x + graph_width + margin + self.height = self.graph_origin_y + margin + + self.graph = SwitchGraphDataGraphicsItem(data, graph_width, graph_height, attrs, event_handler, self) + self.graph.setPos(self.graph_origin_x, self.graph_origin_y - graph_height) + + if parent and 'EnableRubberBand' in dir(parent): + parent.EnableRubberBand(self.graph_origin_x, self.graph_origin_x + graph_width - 1, self) + + def boundingRect(self): + return QRectF(0, 0, self.width, self.height) + + def paint(self, painter, option, widget): + pass + + def RBXToPixel(self, x): + return self.attrs.PixelToX(x - self.graph_origin_x) + + def RBXRangeToPixel(self, x0, x1): + return (self.RBXToPixel(x0), self.RBXToPixel(x1 + 1)) + + def RBPixelToTime(self, x): + if x < self.data.points[0].x: + return self.data.XToData(0) + return self.data.XToData(x) + + def RBEventTimes(self, x0, x1): + x0, x1 = self.RBXRangeToPixel(x0, x1) + time_from = self.RBPixelToTime(x0) + time_to = self.RBPixelToTime(x1) + return (time_from, time_to) + + def RBEvent(self, x0, x1): + time_from, time_to = self.RBEventTimes(x0, x1) + self.event_handler.RangeEvent(time_from, time_to) + + def RBMoveEvent(self, x0, x1): + if x1 < x0: + x0, x1 = x1, x0 + self.RBEvent(x0, x1) + + def RBReleaseEvent(self, x0, x1, selection_state): + if x1 < x0: + x0, x1 = x1, x0 + x0, x1 = self.RBXRangeToPixel(x0, x1) + self.event_handler.SelectEvent(x0, x1, selection_state) + +# Graphics item to draw a vertical bracket (used to highlight "forward" sub-range) + +class VerticalBracketGraphicsItem(QGraphicsItem): + + def __init__(self, parent=None): + super(VerticalBracketGraphicsItem, self).__init__(parent) + + self.width = 0 + self.height = 0 + self.hide() + + def SetSize(self, width, height): + self.width = width + 1 + self.height = height + 1 + + def boundingRect(self): + return QRectF(0, 0, self.width, self.height) + + def paint(self, painter, option, widget): + colour = QColor(255, 255, 0, 32) + painter.fillRect(0, 0, self.width, self.height, colour) + x1 = self.width - 1 + y1 = self.height - 1 + painter.drawLine(0, 0, x1, 0) + painter.drawLine(0, 0, 0, 3) + painter.drawLine(x1, 0, x1, 3) + painter.drawLine(0, y1, x1, y1) + painter.drawLine(0, y1, 0, y1 - 3) + painter.drawLine(x1, y1, x1, y1 - 3) + +# Graphics item to contain graphs arranged vertically + +class VertcalGraphSetGraphicsItem(QGraphicsItem): + + def __init__(self, collection, attrs, event_handler, child_class, parent=None): + super(VertcalGraphSetGraphicsItem, self).__init__(parent) + + self.collection = collection + + self.top = 10 + + self.width = 0 + self.height = self.top + + self.rubber_band = None + self.rb_enabled = False + + first = True + for data in collection.data: + child = child_class(collection, data, attrs, event_handler, first, self) + child.setPos(0, self.height + 1) + rect = child.boundingRect() + if rect.right() > self.width: + self.width = rect.right() + self.height = self.height + rect.bottom() + 1 + first = False + + self.bracket = VerticalBracketGraphicsItem(self) + + def EnableRubberBand(self, xlo, xhi, rb_event_handler): + if self.rb_enabled: + return + self.rb_enabled = True + self.rb_in_view = False + self.setAcceptedMouseButtons(Qt.LeftButton) + self.rb_xlo = xlo + self.rb_xhi = xhi + self.rb_event_handler = rb_event_handler + self.mousePressEvent = self.MousePressEvent + self.mouseMoveEvent = self.MouseMoveEvent + self.mouseReleaseEvent = self.MouseReleaseEvent + + def boundingRect(self): + return QRectF(0, 0, self.width, self.height) + + def paint(self, painter, option, widget): + pass + + def RubberBandParent(self): + scene = self.scene() + view = scene.views()[0] + viewport = view.viewport() + return viewport + + def RubberBandSetGeometry(self, rect): + scene_rectf = self.mapRectToScene(QRectF(rect)) + scene = self.scene() + view = scene.views()[0] + poly = view.mapFromScene(scene_rectf) + self.rubber_band.setGeometry(poly.boundingRect()) + + def SetSelection(self, selection_state): + if self.rubber_band: + if selection_state: + self.RubberBandSetGeometry(selection_state) + self.rubber_band.show() + else: + self.rubber_band.hide() + + def SetBracket(self, rect): + if rect: + x, y, width, height = rect.x(), rect.y(), rect.width(), rect.height() + self.bracket.setPos(x, y) + self.bracket.SetSize(width, height) + self.bracket.show() + else: + self.bracket.hide() + + def RubberBandX(self, event): + x = event.pos().toPoint().x() + if x < self.rb_xlo: + x = self.rb_xlo + elif x > self.rb_xhi: + x = self.rb_xhi + else: + self.rb_in_view = True + return x + + def RubberBandRect(self, x): + if self.rb_origin.x() <= x: + width = x - self.rb_origin.x() + rect = QRect(self.rb_origin, QSize(width, self.height)) + else: + width = self.rb_origin.x() - x + top_left = QPoint(self.rb_origin.x() - width, self.rb_origin.y()) + rect = QRect(top_left, QSize(width, self.height)) + return rect + + def MousePressEvent(self, event): + self.rb_in_view = False + x = self.RubberBandX(event) + self.rb_origin = QPoint(x, self.top) + if self.rubber_band is None: + self.rubber_band = QRubberBand(QRubberBand.Rectangle, self.RubberBandParent()) + self.RubberBandSetGeometry(QRect(self.rb_origin, QSize(0, self.height))) + if self.rb_in_view: + self.rubber_band.show() + self.rb_event_handler.RBMoveEvent(x, x) + else: + self.rubber_band.hide() + + def MouseMoveEvent(self, event): + x = self.RubberBandX(event) + rect = self.RubberBandRect(x) + self.RubberBandSetGeometry(rect) + if self.rb_in_view: + self.rubber_band.show() + self.rb_event_handler.RBMoveEvent(self.rb_origin.x(), x) + + def MouseReleaseEvent(self, event): + x = self.RubberBandX(event) + if self.rb_in_view: + selection_state = self.RubberBandRect(x) + else: + selection_state = None + self.rb_event_handler.RBReleaseEvent(self.rb_origin.x(), x, selection_state) + +# Switch graph legend data model + +class SwitchGraphLegendModel(QAbstractTableModel): + + def __init__(self, collection, region_attributes, parent=None): + super(SwitchGraphLegendModel, self).__init__(parent) + + self.region_attributes = region_attributes + + self.child_items = sorted(collection.hregions.values(), key=GraphDataRegionOrdinal) + self.child_count = len(self.child_items) + + self.highlight_set = set() + + self.column_headers = ("pid", "tid", "comm") + + def rowCount(self, parent): + return self.child_count + + def headerData(self, section, orientation, role): + if role != Qt.DisplayRole: + return None + if orientation != Qt.Horizontal: + return None + return self.columnHeader(section) + + def index(self, row, column, parent): + return self.createIndex(row, column, self.child_items[row]) + + def columnCount(self, parent=None): + return len(self.column_headers) + + def columnHeader(self, column): + return self.column_headers[column] + + def data(self, index, role): + if role == Qt.BackgroundRole: + child = self.child_items[index.row()] + if child in self.highlight_set: + return self.region_attributes[child.key].colour + return None + if role == Qt.ForegroundRole: + child = self.child_items[index.row()] + if child in self.highlight_set: + return QColor(255, 255, 255) + return self.region_attributes[child.key].colour + if role != Qt.DisplayRole: + return None + hregion = self.child_items[index.row()] + col = index.column() + if col == 0: + return hregion.pid + if col == 1: + return hregion.tid + if col == 2: + return hregion.comm + return None + + def SetHighlight(self, row, set_highlight): + child = self.child_items[row] + top_left = self.createIndex(row, 0, child) + bottom_right = self.createIndex(row, len(self.column_headers) - 1, child) + self.dataChanged.emit(top_left, bottom_right) + + def Highlight(self, highlight_set): + for row in xrange(self.child_count): + child = self.child_items[row] + if child in self.highlight_set: + if child not in highlight_set: + self.SetHighlight(row, False) + elif child in highlight_set: + self.SetHighlight(row, True) + self.highlight_set = highlight_set + +# Switch graph legend is a table + +class SwitchGraphLegend(QWidget): + + def __init__(self, collection, region_attributes, parent=None): + super(SwitchGraphLegend, self).__init__(parent) + + self.data_model = SwitchGraphLegendModel(collection, region_attributes) + + self.model = QSortFilterProxyModel() + self.model.setSourceModel(self.data_model) + + self.view = QTableView() + self.view.setModel(self.model) + self.view.setEditTriggers(QAbstractItemView.NoEditTriggers) + self.view.verticalHeader().setVisible(False) + self.view.sortByColumn(-1, Qt.AscendingOrder) + self.view.setSortingEnabled(True) + self.view.resizeColumnsToContents() + self.view.resizeRowsToContents() + + self.vbox = VBoxLayout(self.view) + self.setLayout(self.vbox) + + sz1 = self.view.columnWidth(0) + self.view.columnWidth(1) + self.view.columnWidth(2) + 2 + sz1 = sz1 + self.view.verticalScrollBar().sizeHint().width() + self.saved_size = sz1 + + def resizeEvent(self, event): + self.saved_size = self.size().width() + super(SwitchGraphLegend, self).resizeEvent(event) + + def Highlight(self, highlight_set): + self.data_model.Highlight(highlight_set) + self.update() + + def changeEvent(self, event): + if event.type() == QEvent.FontChange: + self.view.resizeRowsToContents() + self.view.resizeColumnsToContents() + # Need to resize rows again after column resize + self.view.resizeRowsToContents() + super(SwitchGraphLegend, self).changeEvent(event) + +# Random colour generation + +def RGBColourTooLight(r, g, b): + if g > 230: + return True + if g <= 160: + return False + if r <= 180 and g <= 180: + return False + if r < 60: + return False + return True + +def GenerateColours(x): + cs = [0] + for i in xrange(1, x): + cs.append(int((255.0 / i) + 0.5)) + colours = [] + for r in cs: + for g in cs: + for b in cs: + # Exclude black and colours that look too light against a white background + if (r, g, b) == (0, 0, 0) or RGBColourTooLight(r, g, b): + continue + colours.append(QColor(r, g, b)) + return colours + +def GenerateNColours(n): + for x in xrange(2, n + 2): + colours = GenerateColours(x) + if len(colours) >= n: + return colours + return [] + +def GenerateNRandomColours(n, seed): + colours = GenerateNColours(n) + random.seed(seed) + random.shuffle(colours) + return colours + +# Graph attributes, in particular the scale and subrange that change when zooming + +class GraphAttributes(): + + def __init__(self, scale, subrange, region_attributes, dp): + self.scale = scale + self.subrange = subrange + self.region_attributes = region_attributes + # Rounding avoids errors due to finite floating point precision + self.dp = dp # data decimal places + self.Update() + + def XToPixel(self, x): + return int(round((x - self.subrange.x.lo) * self.scale.x, self.pdp.x)) + + def YToPixel(self, y): + return int(round((y - self.subrange.y.lo) * self.scale.y, self.pdp.y)) + + def PixelToXRounded(self, px): + return round((round(px, 0) / self.scale.x), self.dp.x) + self.subrange.x.lo + + def PixelToYRounded(self, py): + return round((round(py, 0) / self.scale.y), self.dp.y) + self.subrange.y.lo + + def PixelToX(self, px): + x = self.PixelToXRounded(px) + if self.pdp.x == 0: + rt = self.XToPixel(x) + if rt > px: + return x - 1 + return x + + def PixelToY(self, py): + y = self.PixelToYRounded(py) + if self.pdp.y == 0: + rt = self.YToPixel(y) + if rt > py: + return y - 1 + return y + + def ToPDP(self, dp, scale): + # Calculate pixel decimal places: + # (10 ** dp) is the minimum delta in the data + # scale it to get the minimum delta in pixels + # log10 gives the number of decimals places negatively + # subtrace 1 to divide by 10 + # round to the lower negative number + # change the sign to get the number of decimals positively + x = math.log10((10 ** dp) * scale) + if x < 0: + x -= 1 + x = -int(math.floor(x) - 0.1) + else: + x = 0 + return x + + def Update(self): + x = self.ToPDP(self.dp.x, self.scale.x) + y = self.ToPDP(self.dp.y, self.scale.y) + self.pdp = XY(x, y) # pixel decimal places + +# Switch graph splitter which divides the CPU graphs from the legend + +class SwitchGraphSplitter(QSplitter): + + def __init__(self, parent=None): + super(SwitchGraphSplitter, self).__init__(parent) + + self.first_time = False + + def resizeEvent(self, ev): + if self.first_time: + self.first_time = False + sz1 = self.widget(1).view.columnWidth(0) + self.widget(1).view.columnWidth(1) + self.widget(1).view.columnWidth(2) + 2 + sz1 = sz1 + self.widget(1).view.verticalScrollBar().sizeHint().width() + sz0 = self.size().width() - self.handleWidth() - sz1 + self.setSizes([sz0, sz1]) + elif not(self.widget(1).saved_size is None): + sz1 = self.widget(1).saved_size + sz0 = self.size().width() - self.handleWidth() - sz1 + self.setSizes([sz0, sz1]) + super(SwitchGraphSplitter, self).resizeEvent(ev) + +# Graph widget base class + +class GraphWidget(QWidget): + + graph_title_changed = Signal(object) + + def __init__(self, parent=None): + super(GraphWidget, self).__init__(parent) + + def GraphTitleChanged(self, title): + self.graph_title_changed.emit(title) + + def Title(self): + return "" + +# Display time in s, ms, us or ns + +def ToTimeStr(val): + val = Decimal(val) + if val >= 1000000000: + return "{} s".format((val / 1000000000).quantize(Decimal("0.000000001"))) + if val >= 1000000: + return "{} ms".format((val / 1000000).quantize(Decimal("0.000001"))) + if val >= 1000: + return "{} us".format((val / 1000).quantize(Decimal("0.001"))) + return "{} ns".format(val.quantize(Decimal("1"))) + +# Switch (i.e. context switch i.e. Time Chart by CPU) graph widget which contains the CPU graphs and the legend and control buttons + +class SwitchGraphWidget(GraphWidget): + + def __init__(self, glb, collection, parent=None): + super(SwitchGraphWidget, self).__init__(parent) + + self.glb = glb + self.collection = collection + + self.back_state = [] + self.forward_state = [] + self.selection_state = (None, None) + self.fwd_rect = None + self.start_time = self.glb.StartTime(collection.machine_id) + + i = 0 + hregions = collection.hregions.values() + colours = GenerateNRandomColours(len(hregions), 1013) + region_attributes = {} + for hregion in hregions: + if hregion.pid == 0 and hregion.tid == 0: + region_attributes[hregion.key] = GraphRegionAttribute(QColor(0, 0, 0)) + else: + region_attributes[hregion.key] = GraphRegionAttribute(colours[i]) + i = i + 1 + + # Default to entire range + xsubrange = Subrange(0.0, float(collection.xrangehi - collection.xrangelo) + 1.0) + ysubrange = Subrange(0.0, float(collection.yrangehi - collection.yrangelo) + 1.0) + subrange = XY(xsubrange, ysubrange) + + scale = self.GetScaleForRange(subrange) + + self.attrs = GraphAttributes(scale, subrange, region_attributes, collection.dp) + + self.item = VertcalGraphSetGraphicsItem(collection, self.attrs, self, SwitchGraphGraphicsItem) + + self.scene = QGraphicsScene() + self.scene.addItem(self.item) + + self.view = QGraphicsView(self.scene) + self.view.centerOn(0, 0) + self.view.setAlignment(Qt.AlignLeft | Qt.AlignTop) + + self.legend = SwitchGraphLegend(collection, region_attributes) + + self.splitter = SwitchGraphSplitter() + self.splitter.addWidget(self.view) + self.splitter.addWidget(self.legend) + + self.point_label = QLabel("") + self.point_label.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Fixed) + + self.back_button = QToolButton() + self.back_button.setIcon(self.style().standardIcon(QStyle.SP_ArrowLeft)) + self.back_button.setDisabled(True) + self.back_button.released.connect(lambda: self.Back()) + + self.forward_button = QToolButton() + self.forward_button.setIcon(self.style().standardIcon(QStyle.SP_ArrowRight)) + self.forward_button.setDisabled(True) + self.forward_button.released.connect(lambda: self.Forward()) + + self.zoom_button = QToolButton() + self.zoom_button.setText("Zoom") + self.zoom_button.setDisabled(True) + self.zoom_button.released.connect(lambda: self.Zoom()) + + self.hbox = HBoxLayout(self.back_button, self.forward_button, self.zoom_button, self.point_label) + + self.vbox = VBoxLayout(self.splitter, self.hbox) + + self.setLayout(self.vbox) + + def GetScaleForRangeX(self, xsubrange): + # Default graph 1000 pixels wide + dflt = 1000.0 + r = xsubrange.hi - xsubrange.lo + return dflt / r + + def GetScaleForRangeY(self, ysubrange): + # Default graph 50 pixels high + dflt = 50.0 + r = ysubrange.hi - ysubrange.lo + return dflt / r + + def GetScaleForRange(self, subrange): + # Default graph 1000 pixels wide, 50 pixels high + xscale = self.GetScaleForRangeX(subrange.x) + yscale = self.GetScaleForRangeY(subrange.y) + return XY(xscale, yscale) + + def PointEvent(self, cpu, time_from, time_to, hregions): + text = "CPU: " + str(cpu) + time_from = time_from.quantize(Decimal(1)) + rel_time_from = time_from - self.glb.StartTime(self.collection.machine_id) + text = text + " Time: " + str(time_from) + " (+" + ToTimeStr(rel_time_from) + ")" + self.point_label.setText(text) + self.legend.Highlight(hregions) + + def RightClickEvent(self, cpu, hregion_times, pos): + if not IsSelectable(self.glb.db, "calls", "WHERE parent_id >= 0"): + return + menu = QMenu(self.view) + for hregion, time in hregion_times: + thread_at_time = (hregion.exec_comm_id, hregion.thread_id, time) + menu_text = "Show Call Tree for {} {}:{} at {}".format(hregion.comm, hregion.pid, hregion.tid, time) + menu.addAction(CreateAction(menu_text, "Show Call Tree", lambda a=None, args=thread_at_time: self.RightClickSelect(args), self.view)) + menu.exec_(pos) + + def RightClickSelect(self, args): + CallTreeWindow(self.glb, self.glb.mainwindow, thread_at_time=args) + + def NoPointEvent(self): + self.point_label.setText("") + self.legend.Highlight({}) + + def RangeEvent(self, time_from, time_to): + time_from = time_from.quantize(Decimal(1)) + time_to = time_to.quantize(Decimal(1)) + if time_to <= time_from: + self.point_label.setText("") + return + rel_time_from = time_from - self.start_time + rel_time_to = time_to - self.start_time + text = " Time: " + str(time_from) + " (+" + ToTimeStr(rel_time_from) + ") to: " + str(time_to) + " (+" + ToTimeStr(rel_time_to) + ")" + text = text + " duration: " + ToTimeStr(time_to - time_from) + self.point_label.setText(text) + + def BackState(self): + return (self.attrs.subrange, self.attrs.scale, self.selection_state, self.fwd_rect) + + def PushBackState(self): + state = copy.deepcopy(self.BackState()) + self.back_state.append(state) + self.back_button.setEnabled(True) + + def PopBackState(self): + self.attrs.subrange, self.attrs.scale, self.selection_state, self.fwd_rect = self.back_state.pop() + self.attrs.Update() + if not self.back_state: + self.back_button.setDisabled(True) + + def PushForwardState(self): + state = copy.deepcopy(self.BackState()) + self.forward_state.append(state) + self.forward_button.setEnabled(True) + + def PopForwardState(self): + self.attrs.subrange, self.attrs.scale, self.selection_state, self.fwd_rect = self.forward_state.pop() + self.attrs.Update() + if not self.forward_state: + self.forward_button.setDisabled(True) + + def Title(self): + time_from = self.collection.xrangelo + Decimal(self.attrs.subrange.x.lo) + time_to = self.collection.xrangelo + Decimal(self.attrs.subrange.x.hi) + rel_time_from = time_from - self.start_time + rel_time_to = time_to - self.start_time + title = "+" + ToTimeStr(rel_time_from) + " to +" + ToTimeStr(rel_time_to) + title = title + " (" + ToTimeStr(time_to - time_from) + ")" + return title + + def Update(self): + selected_subrange, selection_state = self.selection_state + self.item.SetSelection(selection_state) + self.item.SetBracket(self.fwd_rect) + self.zoom_button.setDisabled(selected_subrange is None) + self.GraphTitleChanged(self.Title()) + self.item.update(self.item.boundingRect()) + + def Back(self): + if not self.back_state: + return + self.PushForwardState() + self.PopBackState() + self.Update() + + def Forward(self): + if not self.forward_state: + return + self.PushBackState() + self.PopForwardState() + self.Update() + + def SelectEvent(self, x0, x1, selection_state): + if selection_state is None: + selected_subrange = None + else: + if x1 - x0 < 1.0: + x1 += 1.0 + selected_subrange = Subrange(x0, x1) + self.selection_state = (selected_subrange, selection_state) + self.zoom_button.setDisabled(selected_subrange is None) + + def Zoom(self): + selected_subrange, selection_state = self.selection_state + if selected_subrange is None: + return + self.fwd_rect = selection_state + self.item.SetSelection(None) + self.PushBackState() + self.attrs.subrange.x = selected_subrange + self.forward_state = [] + self.forward_button.setDisabled(True) + self.selection_state = (None, None) + self.fwd_rect = None + self.attrs.scale.x = self.GetScaleForRangeX(self.attrs.subrange.x) + self.attrs.Update() + self.Update() + +# Slow initialization - perform non-GUI initialization in a separate thread and put up a modal message box while waiting + +class SlowInitClass(): + + def __init__(self, glb, title, init_fn): + self.init_fn = init_fn + self.done = False + self.result = None + + self.msg_box = QMessageBox(glb.mainwindow) + self.msg_box.setText("Initializing " + title + ". Please wait.") + self.msg_box.setWindowTitle("Initializing " + title) + self.msg_box.setWindowIcon(glb.mainwindow.style().standardIcon(QStyle.SP_MessageBoxInformation)) + + self.init_thread = Thread(self.ThreadFn, glb) + self.init_thread.done.connect(lambda: self.Done(), Qt.QueuedConnection) + + self.init_thread.start() + + def Done(self): + self.msg_box.done(0) + + def ThreadFn(self, glb): + conn_name = "SlowInitClass" + str(os.getpid()) + db, dbname = glb.dbref.Open(conn_name) + self.result = self.init_fn(db) + self.done = True + return (True, 0) + + def Result(self): + while not self.done: + self.msg_box.exec_() + self.init_thread.wait() + return self.result + +def SlowInit(glb, title, init_fn): + init = SlowInitClass(glb, title, init_fn) + return init.Result() + +# Time chart by CPU window + +class TimeChartByCPUWindow(QMdiSubWindow): + + def __init__(self, glb, parent=None): + super(TimeChartByCPUWindow, self).__init__(parent) + + self.glb = glb + self.machine_id = glb.HostMachineId() + self.collection_name = "SwitchGraphDataCollection " + str(self.machine_id) + + collection = LookupModel(self.collection_name) + if collection is None: + collection = SlowInit(glb, "Time Chart", self.Init) + + self.widget = SwitchGraphWidget(glb, collection, self) + self.view = self.widget + + self.base_title = "Time Chart by CPU" + self.setWindowTitle(self.base_title + self.widget.Title()) + self.widget.graph_title_changed.connect(self.GraphTitleChanged) + + self.setWidget(self.widget) + + AddSubWindow(glb.mainwindow.mdi_area, self, self.windowTitle()) + + def Init(self, db): + return LookupCreateModel(self.collection_name, lambda : SwitchGraphDataCollection(self.glb, db, self.machine_id)) + + def GraphTitleChanged(self, title): + self.setWindowTitle(self.base_title + " : " + title) + +# Child data item finder + +class ChildDataItemFinder(): + + def __init__(self, root): + self.root = root + self.value, self.direction, self.pattern, self.last_value, self.last_pattern = (None,) * 5 + self.rows = [] + self.pos = 0 + + def FindSelect(self): + self.rows = [] + if self.pattern: + pattern = re.compile(self.value) + for child in self.root.child_items: + for column_data in child.data: + if re.search(pattern, str(column_data)) is not None: + self.rows.append(child.row) + break + else: + for child in self.root.child_items: + for column_data in child.data: + if self.value in str(column_data): + self.rows.append(child.row) + break + + def FindValue(self): + self.pos = 0 + if self.last_value != self.value or self.pattern != self.last_pattern: + self.FindSelect() + if not len(self.rows): + return -1 + return self.rows[self.pos] + + def FindThread(self): + if self.direction == 0 or self.value != self.last_value or self.pattern != self.last_pattern: + row = self.FindValue() + elif len(self.rows): + if self.direction > 0: + self.pos += 1 + if self.pos >= len(self.rows): + self.pos = 0 + else: + self.pos -= 1 + if self.pos < 0: + self.pos = len(self.rows) - 1 + row = self.rows[self.pos] + else: + row = -1 + return (True, row) + + def Find(self, value, direction, pattern, context, callback): + self.value, self.direction, self.pattern, self.last_value, self.last_pattern = (value, direction,pattern, self.value, self.pattern) + # Use a thread so the UI is not blocked + thread = Thread(self.FindThread) + thread.done.connect(lambda row, t=thread, c=callback: self.FindDone(t, c, row), Qt.QueuedConnection) + thread.start() + + def FindDone(self, thread, callback, row): + callback(row) + +# Number of database records to fetch in one go + +glb_chunk_sz = 10000 + +# Background process for SQL data fetcher + +class SQLFetcherProcess(): + + def __init__(self, dbref, sql, buffer, head, tail, fetch_count, fetching_done, process_target, wait_event, fetched_event, prep): + # Need a unique connection name + conn_name = "SQLFetcher" + str(os.getpid()) + self.db, dbname = dbref.Open(conn_name) + self.sql = sql + self.buffer = buffer + self.head = head + self.tail = tail + self.fetch_count = fetch_count + self.fetching_done = fetching_done + self.process_target = process_target + self.wait_event = wait_event + self.fetched_event = fetched_event + self.prep = prep + self.query = QSqlQuery(self.db) + self.query_limit = 0 if "$$last_id$$" in sql else 2 + self.last_id = -1 + self.fetched = 0 + self.more = True + self.local_head = self.head.value + self.local_tail = self.tail.value + + def Select(self): + if self.query_limit: + if self.query_limit == 1: + return + self.query_limit -= 1 + stmt = self.sql.replace("$$last_id$$", str(self.last_id)) + QueryExec(self.query, stmt) + + def Next(self): + if not self.query.next(): + self.Select() + if not self.query.next(): + return None + self.last_id = self.query.value(0) + return self.prep(self.query) + + def WaitForTarget(self): + while True: + self.wait_event.clear() + target = self.process_target.value + if target > self.fetched or target < 0: + break + self.wait_event.wait() + return target + + def HasSpace(self, sz): + if self.local_tail <= self.local_head: + space = len(self.buffer) - self.local_head + if space > sz: + return True + if space >= glb_nsz: + # Use 0 (or space < glb_nsz) to mean there is no more at the top of the buffer + nd = pickle.dumps(0, pickle.HIGHEST_PROTOCOL) + self.buffer[self.local_head : self.local_head + len(nd)] = nd + self.local_head = 0 + if self.local_tail - self.local_head > sz: + return True + return False + + def WaitForSpace(self, sz): + if self.HasSpace(sz): + return + while True: + self.wait_event.clear() + self.local_tail = self.tail.value + if self.HasSpace(sz): + return + self.wait_event.wait() + + def AddToBuffer(self, obj): + d = pickle.dumps(obj, pickle.HIGHEST_PROTOCOL) + n = len(d) + nd = pickle.dumps(n, pickle.HIGHEST_PROTOCOL) + sz = n + glb_nsz + self.WaitForSpace(sz) + pos = self.local_head + self.buffer[pos : pos + len(nd)] = nd + self.buffer[pos + glb_nsz : pos + sz] = d + self.local_head += sz + + def FetchBatch(self, batch_size): + fetched = 0 + while batch_size > fetched: + obj = self.Next() + if obj is None: + self.more = False + break + self.AddToBuffer(obj) + fetched += 1 + if fetched: + self.fetched += fetched + with self.fetch_count.get_lock(): + self.fetch_count.value += fetched + self.head.value = self.local_head + self.fetched_event.set() + + def Run(self): + while self.more: + target = self.WaitForTarget() + if target < 0: + break + batch_size = min(glb_chunk_sz, target - self.fetched) + self.FetchBatch(batch_size) + self.fetching_done.value = True + self.fetched_event.set() + +def SQLFetcherFn(*x): + process = SQLFetcherProcess(*x) + process.Run() + +# SQL data fetcher + +class SQLFetcher(QObject): + + done = Signal(object) + + def __init__(self, glb, sql, prep, process_data, parent=None): + super(SQLFetcher, self).__init__(parent) + self.process_data = process_data + self.more = True + self.target = 0 + self.last_target = 0 + self.fetched = 0 + self.buffer_size = 16 * 1024 * 1024 + self.buffer = Array(c_char, self.buffer_size, lock=False) + self.head = Value(c_longlong) + self.tail = Value(c_longlong) + self.local_tail = 0 + self.fetch_count = Value(c_longlong) + self.fetching_done = Value(c_bool) + self.last_count = 0 + self.process_target = Value(c_longlong) + self.wait_event = Event() + self.fetched_event = Event() + glb.AddInstanceToShutdownOnExit(self) + self.process = Process(target=SQLFetcherFn, args=(glb.dbref, sql, self.buffer, self.head, self.tail, self.fetch_count, self.fetching_done, self.process_target, self.wait_event, self.fetched_event, prep)) + self.process.start() + self.thread = Thread(self.Thread) + self.thread.done.connect(self.ProcessData, Qt.QueuedConnection) + self.thread.start() + + def Shutdown(self): + # Tell the thread and process to exit + self.process_target.value = -1 + self.wait_event.set() + self.more = False + self.fetching_done.value = True + self.fetched_event.set() + + def Thread(self): + if not self.more: + return True, 0 + while True: + self.fetched_event.clear() + fetch_count = self.fetch_count.value + if fetch_count != self.last_count: + break + if self.fetching_done.value: + self.more = False + return True, 0 + self.fetched_event.wait() + count = fetch_count - self.last_count + self.last_count = fetch_count + self.fetched += count + return False, count + + def Fetch(self, nr): + if not self.more: + # -1 inidcates there are no more + return -1 + result = self.fetched + extra = result + nr - self.target + if extra > 0: + self.target += extra + # process_target < 0 indicates shutting down + if self.process_target.value >= 0: + self.process_target.value = self.target + self.wait_event.set() + return result + + def RemoveFromBuffer(self): + pos = self.local_tail + if len(self.buffer) - pos < glb_nsz: + pos = 0 + n = pickle.loads(self.buffer[pos : pos + glb_nsz]) + if n == 0: + pos = 0 + n = pickle.loads(self.buffer[0 : glb_nsz]) + pos += glb_nsz + obj = pickle.loads(self.buffer[pos : pos + n]) + self.local_tail = pos + n + return obj + + def ProcessData(self, count): + for i in xrange(count): + obj = self.RemoveFromBuffer() + self.process_data(obj) + self.tail.value = self.local_tail + self.wait_event.set() + self.done.emit(count) + +# Fetch more records bar + +class FetchMoreRecordsBar(): + + def __init__(self, model, parent): + self.model = model + + self.label = QLabel("Number of records (x " + "{:,}".format(glb_chunk_sz) + ") to fetch:") + self.label.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed) + + self.fetch_count = QSpinBox() + self.fetch_count.setRange(1, 1000000) + self.fetch_count.setValue(10) + self.fetch_count.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed) + + self.fetch = QPushButton("Go!") + self.fetch.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed) + self.fetch.released.connect(self.FetchMoreRecords) + + self.progress = QProgressBar() + self.progress.setRange(0, 100) + self.progress.hide() + + self.done_label = QLabel("All records fetched") + self.done_label.hide() + + self.spacer = QLabel("") + + self.close_button = QToolButton() + self.close_button.setIcon(parent.style().standardIcon(QStyle.SP_DockWidgetCloseButton)) + self.close_button.released.connect(self.Deactivate) + + self.hbox = QHBoxLayout() + self.hbox.setContentsMargins(0, 0, 0, 0) + + self.hbox.addWidget(self.label) + self.hbox.addWidget(self.fetch_count) + self.hbox.addWidget(self.fetch) + self.hbox.addWidget(self.spacer) + self.hbox.addWidget(self.progress) + self.hbox.addWidget(self.done_label) + self.hbox.addWidget(self.close_button) + + self.bar = QWidget() + self.bar.setLayout(self.hbox) + self.bar.show() + + self.in_progress = False + self.model.progress.connect(self.Progress) + + self.done = False + + if not model.HasMoreRecords(): + self.Done() + + def Widget(self): + return self.bar + + def Activate(self): + self.bar.show() + self.fetch.setFocus() + + def Deactivate(self): + self.bar.hide() + + def Enable(self, enable): + self.fetch.setEnabled(enable) + self.fetch_count.setEnabled(enable) + + def Busy(self): + self.Enable(False) + self.fetch.hide() + self.spacer.hide() + self.progress.show() + + def Idle(self): + self.in_progress = False + self.Enable(True) + self.progress.hide() + self.fetch.show() + self.spacer.show() + + def Target(self): + return self.fetch_count.value() * glb_chunk_sz + + def Done(self): + self.done = True + self.Idle() + self.label.hide() + self.fetch_count.hide() + self.fetch.hide() + self.spacer.hide() + self.done_label.show() + + def Progress(self, count): + if self.in_progress: + if count: + percent = ((count - self.start) * 100) / self.Target() + if percent >= 100: + self.Idle() + else: + self.progress.setValue(percent) + if not count: + # Count value of zero means no more records + self.Done() + + def FetchMoreRecords(self): + if self.done: + return + self.progress.setValue(0) + self.Busy() + self.in_progress = True + self.start = self.model.FetchMoreRecords(self.Target()) + +# Brance data model level two item + +class BranchLevelTwoItem(): + + def __init__(self, row, col, text, parent_item): + self.row = row + self.parent_item = parent_item + self.data = [""] * (col + 1) + self.data[col] = text + self.level = 2 + + def getParentItem(self): + return self.parent_item + + def getRow(self): + return self.row + + def childCount(self): + return 0 + + def hasChildren(self): + return False + + def getData(self, column): + return self.data[column] + +# Brance data model level one item + +class BranchLevelOneItem(): + + def __init__(self, glb, row, data, parent_item): + self.glb = glb + self.row = row + self.parent_item = parent_item + self.child_count = 0 + self.child_items = [] + self.data = data[1:] + self.dbid = data[0] + self.level = 1 + self.query_done = False + self.br_col = len(self.data) - 1 + + def getChildItem(self, row): + return self.child_items[row] + + def getParentItem(self): + return self.parent_item + + def getRow(self): + return self.row + + def Select(self): + self.query_done = True + + if not self.glb.have_disassembler: + return + + query = QSqlQuery(self.glb.db) + + QueryExec(query, "SELECT cpu, to_dso_id, to_symbol_id, to_sym_offset, short_name, long_name, build_id, sym_start, to_ip" + " FROM samples" + " INNER JOIN dsos ON samples.to_dso_id = dsos.id" + " INNER JOIN symbols ON samples.to_symbol_id = symbols.id" + " WHERE samples.id = " + str(self.dbid)) + if not query.next(): + return + cpu = query.value(0) + dso = query.value(1) + sym = query.value(2) + if dso == 0 or sym == 0: + return + off = query.value(3) + short_name = query.value(4) + long_name = query.value(5) + build_id = query.value(6) + sym_start = query.value(7) + ip = query.value(8) + + QueryExec(query, "SELECT samples.dso_id, symbol_id, sym_offset, sym_start" + " FROM samples" + " INNER JOIN symbols ON samples.symbol_id = symbols.id" + " WHERE samples.id > " + str(self.dbid) + " AND cpu = " + str(cpu) + + " ORDER BY samples.id" + " LIMIT 1") + if not query.next(): + return + if query.value(0) != dso: + # Cannot disassemble from one dso to another + return + bsym = query.value(1) + boff = query.value(2) + bsym_start = query.value(3) + if bsym == 0: + return + tot = bsym_start + boff + 1 - sym_start - off + if tot <= 0 or tot > 16384: + return + + inst = self.glb.disassembler.Instruction() + f = self.glb.FileFromNamesAndBuildId(short_name, long_name, build_id) + if not f: + return + mode = 0 if Is64Bit(f) else 1 + self.glb.disassembler.SetMode(inst, mode) + + buf_sz = tot + 16 + buf = create_string_buffer(tot + 16) + f.seek(sym_start + off) + buf.value = f.read(buf_sz) + buf_ptr = addressof(buf) + i = 0 + while tot > 0: + cnt, text = self.glb.disassembler.DisassembleOne(inst, buf_ptr, buf_sz, ip) + if cnt: + byte_str = tohex(ip).rjust(16) + for k in xrange(cnt): + byte_str += " %02x" % ord(buf[i]) + i += 1 + while k < 15: + byte_str += " " + k += 1 + self.child_items.append(BranchLevelTwoItem(0, self.br_col, byte_str + " " + text, self)) + self.child_count += 1 + else: + return + buf_ptr += cnt + tot -= cnt + buf_sz -= cnt + ip += cnt + + def childCount(self): + if not self.query_done: + self.Select() + if not self.child_count: + return -1 + return self.child_count + + def hasChildren(self): + if not self.query_done: + return True + return self.child_count > 0 + + def getData(self, column): + return self.data[column] + +# Brance data model root item + +class BranchRootItem(): + + def __init__(self): + self.child_count = 0 + self.child_items = [] + self.level = 0 + + def getChildItem(self, row): + return self.child_items[row] + + def getParentItem(self): + return None + + def getRow(self): + return 0 + + def childCount(self): + return self.child_count + + def hasChildren(self): + return self.child_count > 0 + + def getData(self, column): + return "" + +# Calculate instructions per cycle + +def CalcIPC(cyc_cnt, insn_cnt): + if cyc_cnt and insn_cnt: + ipc = Decimal(float(insn_cnt) / cyc_cnt) + ipc = str(ipc.quantize(Decimal(".01"), rounding=ROUND_HALF_UP)) + else: + ipc = "0" + return ipc + +# Branch data preparation + +def BranchDataPrepBr(query, data): + data.append(tohex(query.value(8)).rjust(16) + " " + query.value(9) + offstr(query.value(10)) + + " (" + dsoname(query.value(11)) + ")" + " -> " + + tohex(query.value(12)) + " " + query.value(13) + offstr(query.value(14)) + + " (" + dsoname(query.value(15)) + ")") + +def BranchDataPrepIPC(query, data): + insn_cnt = query.value(16) + cyc_cnt = query.value(17) + ipc = CalcIPC(cyc_cnt, insn_cnt) + data.append(insn_cnt) + data.append(cyc_cnt) + data.append(ipc) + +def BranchDataPrep(query): + data = [] + for i in xrange(0, 8): + data.append(query.value(i)) + BranchDataPrepBr(query, data) + return data + +def BranchDataPrepWA(query): + data = [] + data.append(query.value(0)) + # Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string + data.append("{:>19}".format(query.value(1))) + for i in xrange(2, 8): + data.append(query.value(i)) + BranchDataPrepBr(query, data) + return data + +def BranchDataWithIPCPrep(query): + data = [] + for i in xrange(0, 8): + data.append(query.value(i)) + BranchDataPrepIPC(query, data) + BranchDataPrepBr(query, data) + return data + +def BranchDataWithIPCPrepWA(query): + data = [] + data.append(query.value(0)) + # Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string + data.append("{:>19}".format(query.value(1))) + for i in xrange(2, 8): + data.append(query.value(i)) + BranchDataPrepIPC(query, data) + BranchDataPrepBr(query, data) + return data + +# Branch data model + +class BranchModel(TreeModel): + + progress = Signal(object) + + def __init__(self, glb, event_id, where_clause, parent=None): + super(BranchModel, self).__init__(glb, None, parent) + self.event_id = event_id + self.more = True + self.populated = 0 + self.have_ipc = IsSelectable(glb.db, "samples", columns = "insn_count, cyc_count") + if self.have_ipc: + select_ipc = ", insn_count, cyc_count" + prep_fn = BranchDataWithIPCPrep + prep_wa_fn = BranchDataWithIPCPrepWA + else: + select_ipc = "" + prep_fn = BranchDataPrep + prep_wa_fn = BranchDataPrepWA + sql = ("SELECT samples.id, time, cpu, comm, pid, tid, branch_types.name," + " CASE WHEN in_tx = '0' THEN 'No' ELSE 'Yes' END," + " ip, symbols.name, sym_offset, dsos.short_name," + " to_ip, to_symbols.name, to_sym_offset, to_dsos.short_name" + + select_ipc + + " FROM samples" + " INNER JOIN comms ON comm_id = comms.id" + " INNER JOIN threads ON thread_id = threads.id" + " INNER JOIN branch_types ON branch_type = branch_types.id" + " INNER JOIN symbols ON symbol_id = symbols.id" + " INNER JOIN symbols to_symbols ON to_symbol_id = to_symbols.id" + " INNER JOIN dsos ON samples.dso_id = dsos.id" + " INNER JOIN dsos AS to_dsos ON samples.to_dso_id = to_dsos.id" + " WHERE samples.id > $$last_id$$" + where_clause + + " AND evsel_id = " + str(self.event_id) + + " ORDER BY samples.id" + " LIMIT " + str(glb_chunk_sz)) + if pyside_version_1 and sys.version_info[0] == 3: + prep = prep_fn + else: + prep = prep_wa_fn + self.fetcher = SQLFetcher(glb, sql, prep, self.AddSample) + self.fetcher.done.connect(self.Update) + self.fetcher.Fetch(glb_chunk_sz) + + def GetRoot(self): + return BranchRootItem() + + def columnCount(self, parent=None): + if self.have_ipc: + return 11 + else: + return 8 + + def columnHeader(self, column): + if self.have_ipc: + return ("Time", "CPU", "Command", "PID", "TID", "Branch Type", "In Tx", "Insn Cnt", "Cyc Cnt", "IPC", "Branch")[column] + else: + return ("Time", "CPU", "Command", "PID", "TID", "Branch Type", "In Tx", "Branch")[column] + + def columnFont(self, column): + if self.have_ipc: + br_col = 10 + else: + br_col = 7 + if column != br_col: + return None + return QFont("Monospace") + + def DisplayData(self, item, index): + if item.level == 1: + self.FetchIfNeeded(item.row) + return item.getData(index.column()) + + def AddSample(self, data): + child = BranchLevelOneItem(self.glb, self.populated, data, self.root) + self.root.child_items.append(child) + self.populated += 1 + + def Update(self, fetched): + if not fetched: + self.more = False + self.progress.emit(0) + child_count = self.root.child_count + count = self.populated - child_count + if count > 0: + parent = QModelIndex() + self.beginInsertRows(parent, child_count, child_count + count - 1) + self.insertRows(child_count, count, parent) + self.root.child_count += count + self.endInsertRows() + self.progress.emit(self.root.child_count) + + def FetchMoreRecords(self, count): + current = self.root.child_count + if self.more: + self.fetcher.Fetch(count) + else: + self.progress.emit(0) + return current + + def HasMoreRecords(self): + return self.more + +# Report Variables + +class ReportVars(): + + def __init__(self, name = "", where_clause = "", limit = ""): + self.name = name + self.where_clause = where_clause + self.limit = limit + + def UniqueId(self): + return str(self.where_clause + ";" + self.limit) + +# Branch window + +class BranchWindow(QMdiSubWindow): + + def __init__(self, glb, event_id, report_vars, parent=None): + super(BranchWindow, self).__init__(parent) + + model_name = "Branch Events " + str(event_id) + " " + report_vars.UniqueId() + + self.model = LookupCreateModel(model_name, lambda: BranchModel(glb, event_id, report_vars.where_clause)) + + self.view = QTreeView() + self.view.setUniformRowHeights(True) + self.view.setSelectionMode(QAbstractItemView.ContiguousSelection) + self.view.CopyCellsToClipboard = CopyTreeCellsToClipboard + self.view.setModel(self.model) + + self.ResizeColumnsToContents() + + self.context_menu = TreeContextMenu(self.view) + + self.find_bar = FindBar(self, self, True) + + self.finder = ChildDataItemFinder(self.model.root) + + self.fetch_bar = FetchMoreRecordsBar(self.model, self) + + self.vbox = VBox(self.view, self.find_bar.Widget(), self.fetch_bar.Widget()) + + self.setWidget(self.vbox.Widget()) + + AddSubWindow(glb.mainwindow.mdi_area, self, report_vars.name + " Branch Events") + + def ResizeColumnToContents(self, column, n): + # Using the view's resizeColumnToContents() here is extrememly slow + # so implement a crude alternative + mm = "MM" if column else "MMMM" + font = self.view.font() + metrics = QFontMetrics(font) + max = 0 + for row in xrange(n): + val = self.model.root.child_items[row].data[column] + len = metrics.width(str(val) + mm) + max = len if len > max else max + val = self.model.columnHeader(column) + len = metrics.width(str(val) + mm) + max = len if len > max else max + self.view.setColumnWidth(column, max) + + def ResizeColumnsToContents(self): + n = min(self.model.root.child_count, 100) + if n < 1: + # No data yet, so connect a signal to notify when there is + self.model.rowsInserted.connect(self.UpdateColumnWidths) + return + columns = self.model.columnCount() + for i in xrange(columns): + self.ResizeColumnToContents(i, n) + + def UpdateColumnWidths(self, *x): + # This only needs to be done once, so disconnect the signal now + self.model.rowsInserted.disconnect(self.UpdateColumnWidths) + self.ResizeColumnsToContents() + + def Find(self, value, direction, pattern, context): + self.view.setFocus() + self.find_bar.Busy() + self.finder.Find(value, direction, pattern, context, self.FindDone) + + def FindDone(self, row): + self.find_bar.Idle() + if row >= 0: + self.view.setCurrentIndex(self.model.index(row, 0, QModelIndex())) + else: + self.find_bar.NotFound() + +# Line edit data item + +class LineEditDataItem(object): + + def __init__(self, glb, label, placeholder_text, parent, id = "", default = ""): + self.glb = glb + self.label = label + self.placeholder_text = placeholder_text + self.parent = parent + self.id = id + + self.value = default + + self.widget = QLineEdit(default) + self.widget.editingFinished.connect(self.Validate) + self.widget.textChanged.connect(self.Invalidate) + self.red = False + self.error = "" + self.validated = True + + if placeholder_text: + self.widget.setPlaceholderText(placeholder_text) + + def TurnTextRed(self): + if not self.red: + palette = QPalette() + palette.setColor(QPalette.Text,Qt.red) + self.widget.setPalette(palette) + self.red = True + + def TurnTextNormal(self): + if self.red: + palette = QPalette() + self.widget.setPalette(palette) + self.red = False + + def InvalidValue(self, value): + self.value = "" + self.TurnTextRed() + self.error = self.label + " invalid value '" + value + "'" + self.parent.ShowMessage(self.error) + + def Invalidate(self): + self.validated = False + + def DoValidate(self, input_string): + self.value = input_string.strip() + + def Validate(self): + self.validated = True + self.error = "" + self.TurnTextNormal() + self.parent.ClearMessage() + input_string = self.widget.text() + if not len(input_string.strip()): + self.value = "" + return + self.DoValidate(input_string) + + def IsValid(self): + if not self.validated: + self.Validate() + if len(self.error): + self.parent.ShowMessage(self.error) + return False + return True + + def IsNumber(self, value): + try: + x = int(value) + except: + x = 0 + return str(x) == value + +# Non-negative integer ranges dialog data item + +class NonNegativeIntegerRangesDataItem(LineEditDataItem): + + def __init__(self, glb, label, placeholder_text, column_name, parent): + super(NonNegativeIntegerRangesDataItem, self).__init__(glb, label, placeholder_text, parent) + + self.column_name = column_name + + def DoValidate(self, input_string): + singles = [] + ranges = [] + for value in [x.strip() for x in input_string.split(",")]: + if "-" in value: + vrange = value.split("-") + if len(vrange) != 2 or not self.IsNumber(vrange[0]) or not self.IsNumber(vrange[1]): + return self.InvalidValue(value) + ranges.append(vrange) + else: + if not self.IsNumber(value): + return self.InvalidValue(value) + singles.append(value) + ranges = [("(" + self.column_name + " >= " + r[0] + " AND " + self.column_name + " <= " + r[1] + ")") for r in ranges] + if len(singles): + ranges.append(self.column_name + " IN (" + ",".join(singles) + ")") + self.value = " OR ".join(ranges) + +# Positive integer dialog data item + +class PositiveIntegerDataItem(LineEditDataItem): + + def __init__(self, glb, label, placeholder_text, parent, id = "", default = ""): + super(PositiveIntegerDataItem, self).__init__(glb, label, placeholder_text, parent, id, default) + + def DoValidate(self, input_string): + if not self.IsNumber(input_string.strip()): + return self.InvalidValue(input_string) + value = int(input_string.strip()) + if value <= 0: + return self.InvalidValue(input_string) + self.value = str(value) + +# Dialog data item converted and validated using a SQL table + +class SQLTableDataItem(LineEditDataItem): + + def __init__(self, glb, label, placeholder_text, table_name, match_column, column_name1, column_name2, parent): + super(SQLTableDataItem, self).__init__(glb, label, placeholder_text, parent) + + self.table_name = table_name + self.match_column = match_column + self.column_name1 = column_name1 + self.column_name2 = column_name2 + + def ValueToIds(self, value): + ids = [] + query = QSqlQuery(self.glb.db) + stmt = "SELECT id FROM " + self.table_name + " WHERE " + self.match_column + " = '" + value + "'" + ret = query.exec_(stmt) + if ret: + while query.next(): + ids.append(str(query.value(0))) + return ids + + def DoValidate(self, input_string): + all_ids = [] + for value in [x.strip() for x in input_string.split(",")]: + ids = self.ValueToIds(value) + if len(ids): + all_ids.extend(ids) + else: + return self.InvalidValue(value) + self.value = self.column_name1 + " IN (" + ",".join(all_ids) + ")" + if self.column_name2: + self.value = "( " + self.value + " OR " + self.column_name2 + " IN (" + ",".join(all_ids) + ") )" + +# Sample time ranges dialog data item converted and validated using 'samples' SQL table + +class SampleTimeRangesDataItem(LineEditDataItem): + + def __init__(self, glb, label, placeholder_text, column_name, parent): + self.column_name = column_name + + self.last_id = 0 + self.first_time = 0 + self.last_time = 2 ** 64 + + query = QSqlQuery(glb.db) + QueryExec(query, "SELECT id, time FROM samples ORDER BY id DESC LIMIT 1") + if query.next(): + self.last_id = int(query.value(0)) + self.first_time = int(glb.HostStartTime()) + self.last_time = int(glb.HostFinishTime()) + if placeholder_text: + placeholder_text += ", between " + str(self.first_time) + " and " + str(self.last_time) + + super(SampleTimeRangesDataItem, self).__init__(glb, label, placeholder_text, parent) + + def IdBetween(self, query, lower_id, higher_id, order): + QueryExec(query, "SELECT id FROM samples WHERE id > " + str(lower_id) + " AND id < " + str(higher_id) + " ORDER BY id " + order + " LIMIT 1") + if query.next(): + return True, int(query.value(0)) + else: + return False, 0 + + def BinarySearchTime(self, lower_id, higher_id, target_time, get_floor): + query = QSqlQuery(self.glb.db) + while True: + next_id = int((lower_id + higher_id) / 2) + QueryExec(query, "SELECT time FROM samples WHERE id = " + str(next_id)) + if not query.next(): + ok, dbid = self.IdBetween(query, lower_id, next_id, "DESC") + if not ok: + ok, dbid = self.IdBetween(query, next_id, higher_id, "") + if not ok: + return str(higher_id) + next_id = dbid + QueryExec(query, "SELECT time FROM samples WHERE id = " + str(next_id)) + next_time = int(query.value(0)) + if get_floor: + if target_time > next_time: + lower_id = next_id + else: + higher_id = next_id + if higher_id <= lower_id + 1: + return str(higher_id) + else: + if target_time >= next_time: + lower_id = next_id + else: + higher_id = next_id + if higher_id <= lower_id + 1: + return str(lower_id) + + def ConvertRelativeTime(self, val): + mult = 1 + suffix = val[-2:] + if suffix == "ms": + mult = 1000000 + elif suffix == "us": + mult = 1000 + elif suffix == "ns": + mult = 1 + else: + return val + val = val[:-2].strip() + if not self.IsNumber(val): + return val + val = int(val) * mult + if val >= 0: + val += self.first_time + else: + val += self.last_time + return str(val) + + def ConvertTimeRange(self, vrange): + if vrange[0] == "": + vrange[0] = str(self.first_time) + if vrange[1] == "": + vrange[1] = str(self.last_time) + vrange[0] = self.ConvertRelativeTime(vrange[0]) + vrange[1] = self.ConvertRelativeTime(vrange[1]) + if not self.IsNumber(vrange[0]) or not self.IsNumber(vrange[1]): + return False + beg_range = max(int(vrange[0]), self.first_time) + end_range = min(int(vrange[1]), self.last_time) + if beg_range > self.last_time or end_range < self.first_time: + return False + vrange[0] = self.BinarySearchTime(0, self.last_id, beg_range, True) + vrange[1] = self.BinarySearchTime(1, self.last_id + 1, end_range, False) + return True + + def AddTimeRange(self, value, ranges): + n = value.count("-") + if n == 1: + pass + elif n == 2: + if value.split("-")[1].strip() == "": + n = 1 + elif n == 3: + n = 2 + else: + return False + pos = findnth(value, "-", n) + vrange = [value[:pos].strip() ,value[pos+1:].strip()] + if self.ConvertTimeRange(vrange): + ranges.append(vrange) + return True + return False + + def DoValidate(self, input_string): + ranges = [] + for value in [x.strip() for x in input_string.split(",")]: + if not self.AddTimeRange(value, ranges): + return self.InvalidValue(value) + ranges = [("(" + self.column_name + " >= " + r[0] + " AND " + self.column_name + " <= " + r[1] + ")") for r in ranges] + self.value = " OR ".join(ranges) + +# Report Dialog Base + +class ReportDialogBase(QDialog): + + def __init__(self, glb, title, items, partial, parent=None): + super(ReportDialogBase, self).__init__(parent) + + self.glb = glb + + self.report_vars = ReportVars() + + self.setWindowTitle(title) + self.setMinimumWidth(600) + + self.data_items = [x(glb, self) for x in items] + + self.partial = partial + + self.grid = QGridLayout() + + for row in xrange(len(self.data_items)): + self.grid.addWidget(QLabel(self.data_items[row].label), row, 0) + self.grid.addWidget(self.data_items[row].widget, row, 1) + + self.status = QLabel() + + self.ok_button = QPushButton("Ok", self) + self.ok_button.setDefault(True) + self.ok_button.released.connect(self.Ok) + self.ok_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed) + + self.cancel_button = QPushButton("Cancel", self) + self.cancel_button.released.connect(self.reject) + self.cancel_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed) + + self.hbox = QHBoxLayout() + #self.hbox.addStretch() + self.hbox.addWidget(self.status) + self.hbox.addWidget(self.ok_button) + self.hbox.addWidget(self.cancel_button) + + self.vbox = QVBoxLayout() + self.vbox.addLayout(self.grid) + self.vbox.addLayout(self.hbox) + + self.setLayout(self.vbox) + + def Ok(self): + vars = self.report_vars + for d in self.data_items: + if d.id == "REPORTNAME": + vars.name = d.value + if not vars.name: + self.ShowMessage("Report name is required") + return + for d in self.data_items: + if not d.IsValid(): + return + for d in self.data_items[1:]: + if d.id == "LIMIT": + vars.limit = d.value + elif len(d.value): + if len(vars.where_clause): + vars.where_clause += " AND " + vars.where_clause += d.value + if len(vars.where_clause): + if self.partial: + vars.where_clause = " AND ( " + vars.where_clause + " ) " + else: + vars.where_clause = " WHERE " + vars.where_clause + " " + self.accept() + + def ShowMessage(self, msg): + self.status.setText("" + msg) + + def ClearMessage(self): + self.status.setText("") + +# Selected branch report creation dialog + +class SelectedBranchDialog(ReportDialogBase): + + def __init__(self, glb, parent=None): + title = "Selected Branches" + items = (lambda g, p: LineEditDataItem(g, "Report name:", "Enter a name to appear in the window title bar", p, "REPORTNAME"), + lambda g, p: SampleTimeRangesDataItem(g, "Time ranges:", "Enter time ranges", "samples.id", p), + lambda g, p: NonNegativeIntegerRangesDataItem(g, "CPUs:", "Enter CPUs or ranges e.g. 0,5-6", "cpu", p), + lambda g, p: SQLTableDataItem(g, "Commands:", "Only branches with these commands will be included", "comms", "comm", "comm_id", "", p), + lambda g, p: SQLTableDataItem(g, "PIDs:", "Only branches with these process IDs will be included", "threads", "pid", "thread_id", "", p), + lambda g, p: SQLTableDataItem(g, "TIDs:", "Only branches with these thread IDs will be included", "threads", "tid", "thread_id", "", p), + lambda g, p: SQLTableDataItem(g, "DSOs:", "Only branches with these DSOs will be included", "dsos", "short_name", "samples.dso_id", "to_dso_id", p), + lambda g, p: SQLTableDataItem(g, "Symbols:", "Only branches with these symbols will be included", "symbols", "name", "symbol_id", "to_symbol_id", p), + lambda g, p: LineEditDataItem(g, "Raw SQL clause: ", "Enter a raw SQL WHERE clause", p)) + super(SelectedBranchDialog, self).__init__(glb, title, items, True, parent) + +# Event list + +def GetEventList(db): + events = [] + query = QSqlQuery(db) + QueryExec(query, "SELECT name FROM selected_events WHERE id > 0 ORDER BY id") + while query.next(): + events.append(query.value(0)) + return events + +# Is a table selectable + +def IsSelectable(db, table, sql = "", columns = "*"): + query = QSqlQuery(db) + try: + QueryExec(query, "SELECT " + columns + " FROM " + table + " " + sql + " LIMIT 1") + except: + return False + return True + +# SQL table data model item + +class SQLTableItem(): + + def __init__(self, row, data): + self.row = row + self.data = data + + def getData(self, column): + return self.data[column] + +# SQL table data model + +class SQLTableModel(TableModel): + + progress = Signal(object) + + def __init__(self, glb, sql, column_headers, parent=None): + super(SQLTableModel, self).__init__(parent) + self.glb = glb + self.more = True + self.populated = 0 + self.column_headers = column_headers + self.fetcher = SQLFetcher(glb, sql, lambda x, y=len(column_headers): self.SQLTableDataPrep(x, y), self.AddSample) + self.fetcher.done.connect(self.Update) + self.fetcher.Fetch(glb_chunk_sz) + + def DisplayData(self, item, index): + self.FetchIfNeeded(item.row) + return item.getData(index.column()) + + def AddSample(self, data): + child = SQLTableItem(self.populated, data) + self.child_items.append(child) + self.populated += 1 + + def Update(self, fetched): + if not fetched: + self.more = False + self.progress.emit(0) + child_count = self.child_count + count = self.populated - child_count + if count > 0: + parent = QModelIndex() + self.beginInsertRows(parent, child_count, child_count + count - 1) + self.insertRows(child_count, count, parent) + self.child_count += count + self.endInsertRows() + self.progress.emit(self.child_count) + + def FetchMoreRecords(self, count): + current = self.child_count + if self.more: + self.fetcher.Fetch(count) + else: + self.progress.emit(0) + return current + + def HasMoreRecords(self): + return self.more + + def columnCount(self, parent=None): + return len(self.column_headers) + + def columnHeader(self, column): + return self.column_headers[column] + + def SQLTableDataPrep(self, query, count): + data = [] + for i in xrange(count): + data.append(query.value(i)) + return data + +# SQL automatic table data model + +class SQLAutoTableModel(SQLTableModel): + + def __init__(self, glb, table_name, parent=None): + sql = "SELECT * FROM " + table_name + " WHERE id > $$last_id$$ ORDER BY id LIMIT " + str(glb_chunk_sz) + if table_name == "comm_threads_view": + # For now, comm_threads_view has no id column + sql = "SELECT * FROM " + table_name + " WHERE comm_id > $$last_id$$ ORDER BY comm_id LIMIT " + str(glb_chunk_sz) + column_headers = [] + query = QSqlQuery(glb.db) + if glb.dbref.is_sqlite3: + QueryExec(query, "PRAGMA table_info(" + table_name + ")") + while query.next(): + column_headers.append(query.value(1)) + if table_name == "sqlite_master": + sql = "SELECT * FROM " + table_name + else: + if table_name[:19] == "information_schema.": + sql = "SELECT * FROM " + table_name + select_table_name = table_name[19:] + schema = "information_schema" + else: + select_table_name = table_name + schema = "public" + QueryExec(query, "SELECT column_name FROM information_schema.columns WHERE table_schema = '" + schema + "' and table_name = '" + select_table_name + "'") + while query.next(): + column_headers.append(query.value(0)) + if pyside_version_1 and sys.version_info[0] == 3: + if table_name == "samples_view": + self.SQLTableDataPrep = self.samples_view_DataPrep + if table_name == "samples": + self.SQLTableDataPrep = self.samples_DataPrep + super(SQLAutoTableModel, self).__init__(glb, sql, column_headers, parent) + + def samples_view_DataPrep(self, query, count): + data = [] + data.append(query.value(0)) + # Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string + data.append("{:>19}".format(query.value(1))) + for i in xrange(2, count): + data.append(query.value(i)) + return data + + def samples_DataPrep(self, query, count): + data = [] + for i in xrange(9): + data.append(query.value(i)) + # Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string + data.append("{:>19}".format(query.value(9))) + for i in xrange(10, count): + data.append(query.value(i)) + return data + +# Base class for custom ResizeColumnsToContents + +class ResizeColumnsToContentsBase(QObject): + + def __init__(self, parent=None): + super(ResizeColumnsToContentsBase, self).__init__(parent) + + def ResizeColumnToContents(self, column, n): + # Using the view's resizeColumnToContents() here is extrememly slow + # so implement a crude alternative + font = self.view.font() + metrics = QFontMetrics(font) + max = 0 + for row in xrange(n): + val = self.data_model.child_items[row].data[column] + len = metrics.width(str(val) + "MM") + max = len if len > max else max + val = self.data_model.columnHeader(column) + len = metrics.width(str(val) + "MM") + max = len if len > max else max + self.view.setColumnWidth(column, max) + + def ResizeColumnsToContents(self): + n = min(self.data_model.child_count, 100) + if n < 1: + # No data yet, so connect a signal to notify when there is + self.data_model.rowsInserted.connect(self.UpdateColumnWidths) + return + columns = self.data_model.columnCount() + for i in xrange(columns): + self.ResizeColumnToContents(i, n) + + def UpdateColumnWidths(self, *x): + # This only needs to be done once, so disconnect the signal now + self.data_model.rowsInserted.disconnect(self.UpdateColumnWidths) + self.ResizeColumnsToContents() + +# Convert value to CSV + +def ToCSValue(val): + if '"' in val: + val = val.replace('"', '""') + if "," in val or '"' in val: + val = '"' + val + '"' + return val + +# Key to sort table model indexes by row / column, assuming fewer than 1000 columns + +glb_max_cols = 1000 + +def RowColumnKey(a): + return a.row() * glb_max_cols + a.column() + +# Copy selected table cells to clipboard + +def CopyTableCellsToClipboard(view, as_csv=False, with_hdr=False): + indexes = sorted(view.selectedIndexes(), key=RowColumnKey) + idx_cnt = len(indexes) + if not idx_cnt: + return + if idx_cnt == 1: + with_hdr=False + min_row = indexes[0].row() + max_row = indexes[0].row() + min_col = indexes[0].column() + max_col = indexes[0].column() + for i in indexes: + min_row = min(min_row, i.row()) + max_row = max(max_row, i.row()) + min_col = min(min_col, i.column()) + max_col = max(max_col, i.column()) + if max_col > glb_max_cols: + raise RuntimeError("glb_max_cols is too low") + max_width = [0] * (1 + max_col - min_col) + for i in indexes: + c = i.column() - min_col + max_width[c] = max(max_width[c], len(str(i.data()))) + text = "" + pad = "" + sep = "" + if with_hdr: + model = indexes[0].model() + for col in range(min_col, max_col + 1): + val = model.headerData(col, Qt.Horizontal, Qt.DisplayRole) + if as_csv: + text += sep + ToCSValue(val) + sep = "," + else: + c = col - min_col + max_width[c] = max(max_width[c], len(val)) + width = max_width[c] + align = model.headerData(col, Qt.Horizontal, Qt.TextAlignmentRole) + if align & Qt.AlignRight: + val = val.rjust(width) + text += pad + sep + val + pad = " " * (width - len(val)) + sep = " " + text += "\n" + pad = "" + sep = "" + last_row = min_row + for i in indexes: + if i.row() > last_row: + last_row = i.row() + text += "\n" + pad = "" + sep = "" + if as_csv: + text += sep + ToCSValue(str(i.data())) + sep = "," + else: + width = max_width[i.column() - min_col] + if i.data(Qt.TextAlignmentRole) & Qt.AlignRight: + val = str(i.data()).rjust(width) + else: + val = str(i.data()) + text += pad + sep + val + pad = " " * (width - len(val)) + sep = " " + QApplication.clipboard().setText(text) + +def CopyTreeCellsToClipboard(view, as_csv=False, with_hdr=False): + indexes = view.selectedIndexes() + if not len(indexes): + return + + selection = view.selectionModel() + + first = None + for i in indexes: + above = view.indexAbove(i) + if not selection.isSelected(above): + first = i + break + + if first is None: + raise RuntimeError("CopyTreeCellsToClipboard internal error") + + model = first.model() + row_cnt = 0 + col_cnt = model.columnCount(first) + max_width = [0] * col_cnt + + indent_sz = 2 + indent_str = " " * indent_sz + + expanded_mark_sz = 2 + if sys.version_info[0] == 3: + expanded_mark = "\u25BC " + not_expanded_mark = "\u25B6 " + else: + expanded_mark = unicode(chr(0xE2) + chr(0x96) + chr(0xBC) + " ", "utf-8") + not_expanded_mark = unicode(chr(0xE2) + chr(0x96) + chr(0xB6) + " ", "utf-8") + leaf_mark = " " + + if not as_csv: + pos = first + while True: + row_cnt += 1 + row = pos.row() + for c in range(col_cnt): + i = pos.sibling(row, c) + if c: + n = len(str(i.data())) + else: + n = len(str(i.data()).strip()) + n += (i.internalPointer().level - 1) * indent_sz + n += expanded_mark_sz + max_width[c] = max(max_width[c], n) + pos = view.indexBelow(pos) + if not selection.isSelected(pos): + break + + text = "" + pad = "" + sep = "" + if with_hdr: + for c in range(col_cnt): + val = model.headerData(c, Qt.Horizontal, Qt.DisplayRole).strip() + if as_csv: + text += sep + ToCSValue(val) + sep = "," + else: + max_width[c] = max(max_width[c], len(val)) + width = max_width[c] + align = model.headerData(c, Qt.Horizontal, Qt.TextAlignmentRole) + if align & Qt.AlignRight: + val = val.rjust(width) + text += pad + sep + val + pad = " " * (width - len(val)) + sep = " " + text += "\n" + pad = "" + sep = "" + + pos = first + while True: + row = pos.row() + for c in range(col_cnt): + i = pos.sibling(row, c) + val = str(i.data()) + if not c: + if model.hasChildren(i): + if view.isExpanded(i): + mark = expanded_mark + else: + mark = not_expanded_mark + else: + mark = leaf_mark + val = indent_str * (i.internalPointer().level - 1) + mark + val.strip() + if as_csv: + text += sep + ToCSValue(val) + sep = "," + else: + width = max_width[c] + if c and i.data(Qt.TextAlignmentRole) & Qt.AlignRight: + val = val.rjust(width) + text += pad + sep + val + pad = " " * (width - len(val)) + sep = " " + pos = view.indexBelow(pos) + if not selection.isSelected(pos): + break + text = text.rstrip() + "\n" + pad = "" + sep = "" + + QApplication.clipboard().setText(text) + +def CopyCellsToClipboard(view, as_csv=False, with_hdr=False): + view.CopyCellsToClipboard(view, as_csv, with_hdr) + +def CopyCellsToClipboardHdr(view): + CopyCellsToClipboard(view, False, True) + +def CopyCellsToClipboardCSV(view): + CopyCellsToClipboard(view, True, True) + +# Context menu + +class ContextMenu(object): + + def __init__(self, view): + self.view = view + self.view.setContextMenuPolicy(Qt.CustomContextMenu) + self.view.customContextMenuRequested.connect(self.ShowContextMenu) + + def ShowContextMenu(self, pos): + menu = QMenu(self.view) + self.AddActions(menu) + menu.exec_(self.view.mapToGlobal(pos)) + + def AddCopy(self, menu): + menu.addAction(CreateAction("&Copy selection", "Copy to clipboard", lambda: CopyCellsToClipboardHdr(self.view), self.view)) + menu.addAction(CreateAction("Copy selection as CS&V", "Copy to clipboard as CSV", lambda: CopyCellsToClipboardCSV(self.view), self.view)) + + def AddActions(self, menu): + self.AddCopy(menu) + +class TreeContextMenu(ContextMenu): + + def __init__(self, view): + super(TreeContextMenu, self).__init__(view) + + def AddActions(self, menu): + i = self.view.currentIndex() + text = str(i.data()).strip() + if len(text): + menu.addAction(CreateAction('Copy "' + text + '"', "Copy to clipboard", lambda: QApplication.clipboard().setText(text), self.view)) + self.AddCopy(menu) + +# Table window + +class TableWindow(QMdiSubWindow, ResizeColumnsToContentsBase): + + def __init__(self, glb, table_name, parent=None): + super(TableWindow, self).__init__(parent) + + self.data_model = LookupCreateModel(table_name + " Table", lambda: SQLAutoTableModel(glb, table_name)) + + self.model = QSortFilterProxyModel() + self.model.setSourceModel(self.data_model) + + self.view = QTableView() + self.view.setModel(self.model) + self.view.setEditTriggers(QAbstractItemView.NoEditTriggers) + self.view.verticalHeader().setVisible(False) + self.view.sortByColumn(-1, Qt.AscendingOrder) + self.view.setSortingEnabled(True) + self.view.setSelectionMode(QAbstractItemView.ContiguousSelection) + self.view.CopyCellsToClipboard = CopyTableCellsToClipboard + + self.ResizeColumnsToContents() + + self.context_menu = ContextMenu(self.view) + + self.find_bar = FindBar(self, self, True) + + self.finder = ChildDataItemFinder(self.data_model) + + self.fetch_bar = FetchMoreRecordsBar(self.data_model, self) + + self.vbox = VBox(self.view, self.find_bar.Widget(), self.fetch_bar.Widget()) + + self.setWidget(self.vbox.Widget()) + + AddSubWindow(glb.mainwindow.mdi_area, self, table_name + " Table") + + def Find(self, value, direction, pattern, context): + self.view.setFocus() + self.find_bar.Busy() + self.finder.Find(value, direction, pattern, context, self.FindDone) + + def FindDone(self, row): + self.find_bar.Idle() + if row >= 0: + self.view.setCurrentIndex(self.model.mapFromSource(self.data_model.index(row, 0, QModelIndex()))) + else: + self.find_bar.NotFound() + +# Table list + +def GetTableList(glb): + tables = [] + query = QSqlQuery(glb.db) + if glb.dbref.is_sqlite3: + QueryExec(query, "SELECT name FROM sqlite_master WHERE type IN ( 'table' , 'view' ) ORDER BY name") + else: + QueryExec(query, "SELECT table_name FROM information_schema.tables WHERE table_schema = 'public' AND table_type IN ( 'BASE TABLE' , 'VIEW' ) ORDER BY table_name") + while query.next(): + tables.append(query.value(0)) + if glb.dbref.is_sqlite3: + tables.append("sqlite_master") + else: + tables.append("information_schema.tables") + tables.append("information_schema.views") + tables.append("information_schema.columns") + return tables + +# Top Calls data model + +class TopCallsModel(SQLTableModel): + + def __init__(self, glb, report_vars, parent=None): + text = "" + if not glb.dbref.is_sqlite3: + text = "::text" + limit = "" + if len(report_vars.limit): + limit = " LIMIT " + report_vars.limit + sql = ("SELECT comm, pid, tid, name," + " CASE" + " WHEN (short_name = '[kernel.kallsyms]') THEN '[kernel]'" + text + + " ELSE short_name" + " END AS dso," + " call_time, return_time, (return_time - call_time) AS elapsed_time, branch_count, " + " CASE" + " WHEN (calls.flags = 1) THEN 'no call'" + text + + " WHEN (calls.flags = 2) THEN 'no return'" + text + + " WHEN (calls.flags = 3) THEN 'no call/return'" + text + + " ELSE ''" + text + + " END AS flags" + " FROM calls" + " INNER JOIN call_paths ON calls.call_path_id = call_paths.id" + " INNER JOIN symbols ON call_paths.symbol_id = symbols.id" + " INNER JOIN dsos ON symbols.dso_id = dsos.id" + " INNER JOIN comms ON calls.comm_id = comms.id" + " INNER JOIN threads ON calls.thread_id = threads.id" + + report_vars.where_clause + + " ORDER BY elapsed_time DESC" + + limit + ) + column_headers = ("Command", "PID", "TID", "Symbol", "Object", "Call Time", "Return Time", "Elapsed Time (ns)", "Branch Count", "Flags") + self.alignment = (Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignLeft) + super(TopCallsModel, self).__init__(glb, sql, column_headers, parent) + + def columnAlignment(self, column): + return self.alignment[column] + +# Top Calls report creation dialog + +class TopCallsDialog(ReportDialogBase): + + def __init__(self, glb, parent=None): + title = "Top Calls by Elapsed Time" + items = (lambda g, p: LineEditDataItem(g, "Report name:", "Enter a name to appear in the window title bar", p, "REPORTNAME"), + lambda g, p: SQLTableDataItem(g, "Commands:", "Only calls with these commands will be included", "comms", "comm", "comm_id", "", p), + lambda g, p: SQLTableDataItem(g, "PIDs:", "Only calls with these process IDs will be included", "threads", "pid", "thread_id", "", p), + lambda g, p: SQLTableDataItem(g, "TIDs:", "Only calls with these thread IDs will be included", "threads", "tid", "thread_id", "", p), + lambda g, p: SQLTableDataItem(g, "DSOs:", "Only calls with these DSOs will be included", "dsos", "short_name", "dso_id", "", p), + lambda g, p: SQLTableDataItem(g, "Symbols:", "Only calls with these symbols will be included", "symbols", "name", "symbol_id", "", p), + lambda g, p: LineEditDataItem(g, "Raw SQL clause: ", "Enter a raw SQL WHERE clause", p), + lambda g, p: PositiveIntegerDataItem(g, "Record limit:", "Limit selection to this number of records", p, "LIMIT", "100")) + super(TopCallsDialog, self).__init__(glb, title, items, False, parent) + +# Top Calls window + +class TopCallsWindow(QMdiSubWindow, ResizeColumnsToContentsBase): + + def __init__(self, glb, report_vars, parent=None): + super(TopCallsWindow, self).__init__(parent) + + self.data_model = LookupCreateModel("Top Calls " + report_vars.UniqueId(), lambda: TopCallsModel(glb, report_vars)) + self.model = self.data_model + + self.view = QTableView() + self.view.setModel(self.model) + self.view.setEditTriggers(QAbstractItemView.NoEditTriggers) + self.view.verticalHeader().setVisible(False) + self.view.setSelectionMode(QAbstractItemView.ContiguousSelection) + self.view.CopyCellsToClipboard = CopyTableCellsToClipboard + + self.context_menu = ContextMenu(self.view) + + self.ResizeColumnsToContents() + + self.find_bar = FindBar(self, self, True) + + self.finder = ChildDataItemFinder(self.model) + + self.fetch_bar = FetchMoreRecordsBar(self.data_model, self) + + self.vbox = VBox(self.view, self.find_bar.Widget(), self.fetch_bar.Widget()) + + self.setWidget(self.vbox.Widget()) + + AddSubWindow(glb.mainwindow.mdi_area, self, report_vars.name) + + def Find(self, value, direction, pattern, context): + self.view.setFocus() + self.find_bar.Busy() + self.finder.Find(value, direction, pattern, context, self.FindDone) + + def FindDone(self, row): + self.find_bar.Idle() + if row >= 0: + self.view.setCurrentIndex(self.model.index(row, 0, QModelIndex())) + else: + self.find_bar.NotFound() + +# Action Definition + +def CreateAction(label, tip, callback, parent=None, shortcut=None): + action = QAction(label, parent) + if shortcut != None: + action.setShortcuts(shortcut) + action.setStatusTip(tip) + action.triggered.connect(callback) + return action + +# Typical application actions + +def CreateExitAction(app, parent=None): + return CreateAction("&Quit", "Exit the application", app.closeAllWindows, parent, QKeySequence.Quit) + +# Typical MDI actions + +def CreateCloseActiveWindowAction(mdi_area): + return CreateAction("Cl&ose", "Close the active window", mdi_area.closeActiveSubWindow, mdi_area) + +def CreateCloseAllWindowsAction(mdi_area): + return CreateAction("Close &All", "Close all the windows", mdi_area.closeAllSubWindows, mdi_area) + +def CreateTileWindowsAction(mdi_area): + return CreateAction("&Tile", "Tile the windows", mdi_area.tileSubWindows, mdi_area) + +def CreateCascadeWindowsAction(mdi_area): + return CreateAction("&Cascade", "Cascade the windows", mdi_area.cascadeSubWindows, mdi_area) + +def CreateNextWindowAction(mdi_area): + return CreateAction("Ne&xt", "Move the focus to the next window", mdi_area.activateNextSubWindow, mdi_area, QKeySequence.NextChild) + +def CreatePreviousWindowAction(mdi_area): + return CreateAction("Pre&vious", "Move the focus to the previous window", mdi_area.activatePreviousSubWindow, mdi_area, QKeySequence.PreviousChild) + +# Typical MDI window menu + +class WindowMenu(): + + def __init__(self, mdi_area, menu): + self.mdi_area = mdi_area + self.window_menu = menu.addMenu("&Windows") + self.close_active_window = CreateCloseActiveWindowAction(mdi_area) + self.close_all_windows = CreateCloseAllWindowsAction(mdi_area) + self.tile_windows = CreateTileWindowsAction(mdi_area) + self.cascade_windows = CreateCascadeWindowsAction(mdi_area) + self.next_window = CreateNextWindowAction(mdi_area) + self.previous_window = CreatePreviousWindowAction(mdi_area) + self.window_menu.aboutToShow.connect(self.Update) + + def Update(self): + self.window_menu.clear() + sub_window_count = len(self.mdi_area.subWindowList()) + have_sub_windows = sub_window_count != 0 + self.close_active_window.setEnabled(have_sub_windows) + self.close_all_windows.setEnabled(have_sub_windows) + self.tile_windows.setEnabled(have_sub_windows) + self.cascade_windows.setEnabled(have_sub_windows) + self.next_window.setEnabled(have_sub_windows) + self.previous_window.setEnabled(have_sub_windows) + self.window_menu.addAction(self.close_active_window) + self.window_menu.addAction(self.close_all_windows) + self.window_menu.addSeparator() + self.window_menu.addAction(self.tile_windows) + self.window_menu.addAction(self.cascade_windows) + self.window_menu.addSeparator() + self.window_menu.addAction(self.next_window) + self.window_menu.addAction(self.previous_window) + if sub_window_count == 0: + return + self.window_menu.addSeparator() + nr = 1 + for sub_window in self.mdi_area.subWindowList(): + label = str(nr) + " " + sub_window.name + if nr < 10: + label = "&" + label + action = self.window_menu.addAction(label) + action.setCheckable(True) + action.setChecked(sub_window == self.mdi_area.activeSubWindow()) + action.triggered.connect(lambda a=None,x=nr: self.setActiveSubWindow(x)) + self.window_menu.addAction(action) + nr += 1 + + def setActiveSubWindow(self, nr): + self.mdi_area.setActiveSubWindow(self.mdi_area.subWindowList()[nr - 1]) + +# Help text + +glb_help_text = """ +

Contents

+ +

1. Reports

+

1.1 Context-Sensitive Call Graph

+

1.2 Call Tree

+

1.3 All branches

+

1.4 Selected branches

+

1.5 Top calls by elapsed time

+

2. Charts

+

2.1 Time chart by CPU

+

3. Tables

+

1. Reports

+

1.1 Context-Sensitive Call Graph

+The result is a GUI window with a tree representing a context-sensitive +call-graph. Expanding a couple of levels of the tree and adjusting column +widths to suit will display something like: +
+                                         Call Graph: pt_example
+Call Path                          Object      Count   Time(ns)  Time(%)  Branch Count   Branch Count(%)
+v- ls
+    v- 2638:2638
+        v- _start                  ld-2.19.so    1     10074071   100.0         211135            100.0
+          |- unknown               unknown       1        13198     0.1              1              0.0
+          >- _dl_start             ld-2.19.so    1      1400980    13.9          19637              9.3
+          >- _d_linit_internal     ld-2.19.so    1       448152     4.4          11094              5.3
+          v-__libc_start_main@plt  ls            1      8211741    81.5         180397             85.4
+             >- _dl_fixup          ld-2.19.so    1         7607     0.1            108              0.1
+             >- __cxa_atexit       libc-2.19.so  1        11737     0.1             10              0.0
+             >- __libc_csu_init    ls            1        10354     0.1             10              0.0
+             |- _setjmp            libc-2.19.so  1            0     0.0              4              0.0
+             v- main               ls            1      8182043    99.6         180254             99.9
+
+

Points to note:

+
    +
  • The top level is a command name (comm)
  • +
  • The next level is a thread (pid:tid)
  • +
  • Subsequent levels are functions
  • +
  • 'Count' is the number of calls
  • +
  • 'Time' is the elapsed time until the function returns
  • +
  • Percentages are relative to the level above
  • +
  • 'Branch Count' is the total number of branches for that function and all functions that it calls +
+

Find

+Ctrl-F displays a Find bar which finds function names by either an exact match or a pattern match. +The pattern matching symbols are ? for any character and * for zero or more characters. +

1.2 Call Tree

+The Call Tree report is very similar to the Context-Sensitive Call Graph, but the data is not aggregated. +Also the 'Count' column, which would be always 1, is replaced by the 'Call Time'. +

1.3 All branches

+The All branches report displays all branches in chronological order. +Not all data is fetched immediately. More records can be fetched using the Fetch bar provided. +

Disassembly

+Open a branch to display disassembly. This only works if: +
    +
  1. The disassembler is available. Currently, only Intel XED is supported - see Intel XED Setup
  2. +
  3. The object code is available. Currently, only the perf build ID cache is searched for object code. +The default directory ~/.debug can be overridden by setting environment variable PERF_BUILDID_DIR. +One exception is kcore where the DSO long name is used (refer dsos_view on the Tables menu), +or alternatively, set environment variable PERF_KCORE to the kcore file name.
  4. +
+

Intel XED Setup

+To use Intel XED, libxed.so must be present. To build and install libxed.so: +
+git clone https://github.com/intelxed/mbuild.git mbuild
+git clone https://github.com/intelxed/xed
+cd xed
+./mfile.py --share
+sudo ./mfile.py --prefix=/usr/local install
+sudo ldconfig
+
+

Instructions per Cycle (IPC)

+If available, IPC information is displayed in columns 'insn_cnt', 'cyc_cnt' and 'IPC'. +

Intel PT note: The information applies to the blocks of code ending with, and including, that branch. +Due to the granularity of timing information, the number of cycles for some code blocks will not be known. +In that case, 'insn_cnt', 'cyc_cnt' and 'IPC' are zero, but when 'IPC' is displayed it covers the period +since the previous displayed 'IPC'. +

Find

+Ctrl-F displays a Find bar which finds substrings by either an exact match or a regular expression match. +Refer to Python documentation for the regular expression syntax. +All columns are searched, but only currently fetched rows are searched. +

1.4 Selected branches

+This is the same as the All branches report but with the data reduced +by various selection criteria. A dialog box displays available criteria which are AND'ed together. +

1.4.1 Time ranges

+The time ranges hint text shows the total time range. Relative time ranges can also be entered in +ms, us or ns. Also, negative values are relative to the end of trace. Examples: +
+	81073085947329-81073085958238	From 81073085947329 to 81073085958238
+	100us-200us		From 100us to 200us
+	10ms-			From 10ms to the end
+	-100ns			The first 100ns
+	-10ms-			The last 10ms
+
+N.B. Due to the granularity of timestamps, there could be no branches in any given time range. +

1.5 Top calls by elapsed time

+The Top calls by elapsed time report displays calls in descending order of time elapsed between when the function was called and when it returned. +The data is reduced by various selection criteria. A dialog box displays available criteria which are AND'ed together. +If not all data is fetched, a Fetch bar is provided. Ctrl-F displays a Find bar. +

2. Charts

+

2.1 Time chart by CPU

+This chart displays context switch information when that data is available. Refer to context_switches_view on the Tables menu. +

Features

+
    +
  1. Mouse over to highight the task and show the time
  2. +
  3. Drag the mouse to select a region and zoom by pushing the Zoom button
  4. +
  5. Go back and forward by pressing the arrow buttons
  6. +
  7. If call information is available, right-click to show a call tree opened to that task and time. +Note, the call tree may take some time to appear, and there may not be call information for the task or time selected. +
  8. +
+

Important

+The graph can be misleading in the following respects: +
    +
  1. The graph shows the first task on each CPU as running from the beginning of the time range. +Because tracing might start on different CPUs at different times, that is not necessarily the case. +Refer to context_switches_view on the Tables menu to understand what data the graph is based upon.
  2. +
  3. Similarly, the last task on each CPU can be showing running longer than it really was. +Again, refer to context_switches_view on the Tables menu to understand what data the graph is based upon.
  4. +
  5. When the mouse is over a task, the highlighted task might not be visible on the legend without scrolling if the legend does not fit fully in the window
  6. +
+

3. Tables

+The Tables menu shows all tables and views in the database. Most tables have an associated view +which displays the information in a more friendly way. Not all data for large tables is fetched +immediately. More records can be fetched using the Fetch bar provided. Columns can be sorted, +but that can be slow for large tables. +

There are also tables of database meta-information. +For SQLite3 databases, the sqlite_master table is included. +For PostgreSQL databases, information_schema.tables/views/columns are included. +

Find

+Ctrl-F displays a Find bar which finds substrings by either an exact match or a regular expression match. +Refer to Python documentation for the regular expression syntax. +All columns are searched, but only currently fetched rows are searched. +

N.B. Results are found in id order, so if the table is re-ordered, find-next and find-previous +will go to the next/previous result in id order, instead of display order. +""" + +# Help window + +class HelpWindow(QMdiSubWindow): + + def __init__(self, glb, parent=None): + super(HelpWindow, self).__init__(parent) + + self.text = QTextBrowser() + self.text.setHtml(glb_help_text) + self.text.setReadOnly(True) + self.text.setOpenExternalLinks(True) + + self.setWidget(self.text) + + AddSubWindow(glb.mainwindow.mdi_area, self, "Exported SQL Viewer Help") + +# Main window that only displays the help text + +class HelpOnlyWindow(QMainWindow): + + def __init__(self, parent=None): + super(HelpOnlyWindow, self).__init__(parent) + + self.setMinimumSize(200, 100) + self.resize(800, 600) + self.setWindowTitle("Exported SQL Viewer Help") + self.setWindowIcon(self.style().standardIcon(QStyle.SP_MessageBoxInformation)) + + self.text = QTextBrowser() + self.text.setHtml(glb_help_text) + self.text.setReadOnly(True) + self.text.setOpenExternalLinks(True) + + self.setCentralWidget(self.text) + +# PostqreSQL server version + +def PostqreSQLServerVersion(db): + query = QSqlQuery(db) + QueryExec(query, "SELECT VERSION()") + if query.next(): + v_str = query.value(0) + v_list = v_str.strip().split(" ") + if v_list[0] == "PostgreSQL" and v_list[2] == "on": + return v_list[1] + return v_str + return "Unknown" + +# SQLite version + +def SQLiteVersion(db): + query = QSqlQuery(db) + QueryExec(query, "SELECT sqlite_version()") + if query.next(): + return query.value(0) + return "Unknown" + +# About dialog + +class AboutDialog(QDialog): + + def __init__(self, glb, parent=None): + super(AboutDialog, self).__init__(parent) + + self.setWindowTitle("About Exported SQL Viewer") + self.setMinimumWidth(300) + + pyside_version = "1" if pyside_version_1 else "2" + + text = "

"
+		text += "Python version:     " + sys.version.split(" ")[0] + "\n"
+		text += "PySide version:     " + pyside_version + "\n"
+		text += "Qt version:         " + qVersion() + "\n"
+		if glb.dbref.is_sqlite3:
+			text += "SQLite version:     " + SQLiteVersion(glb.db) + "\n"
+		else:
+			text += "PostqreSQL version: " + PostqreSQLServerVersion(glb.db) + "\n"
+		text += "
" + + self.text = QTextBrowser() + self.text.setHtml(text) + self.text.setReadOnly(True) + self.text.setOpenExternalLinks(True) + + self.vbox = QVBoxLayout() + self.vbox.addWidget(self.text) + + self.setLayout(self.vbox) + +# Font resize + +def ResizeFont(widget, diff): + font = widget.font() + sz = font.pointSize() + font.setPointSize(sz + diff) + widget.setFont(font) + +def ShrinkFont(widget): + ResizeFont(widget, -1) + +def EnlargeFont(widget): + ResizeFont(widget, 1) + +# Unique name for sub-windows + +def NumberedWindowName(name, nr): + if nr > 1: + name += " <" + str(nr) + ">" + return name + +def UniqueSubWindowName(mdi_area, name): + nr = 1 + while True: + unique_name = NumberedWindowName(name, nr) + ok = True + for sub_window in mdi_area.subWindowList(): + if sub_window.name == unique_name: + ok = False + break + if ok: + return unique_name + nr += 1 + +# Add a sub-window + +def AddSubWindow(mdi_area, sub_window, name): + unique_name = UniqueSubWindowName(mdi_area, name) + sub_window.setMinimumSize(200, 100) + sub_window.resize(800, 600) + sub_window.setWindowTitle(unique_name) + sub_window.setAttribute(Qt.WA_DeleteOnClose) + sub_window.setWindowIcon(sub_window.style().standardIcon(QStyle.SP_FileIcon)) + sub_window.name = unique_name + mdi_area.addSubWindow(sub_window) + sub_window.show() + +# Main window + +class MainWindow(QMainWindow): + + def __init__(self, glb, parent=None): + super(MainWindow, self).__init__(parent) + + self.glb = glb + + self.setWindowTitle("Exported SQL Viewer: " + glb.dbname) + self.setWindowIcon(self.style().standardIcon(QStyle.SP_ComputerIcon)) + self.setMinimumSize(200, 100) + + self.mdi_area = QMdiArea() + self.mdi_area.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded) + self.mdi_area.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded) + + self.setCentralWidget(self.mdi_area) + + menu = self.menuBar() + + file_menu = menu.addMenu("&File") + file_menu.addAction(CreateExitAction(glb.app, self)) + + edit_menu = menu.addMenu("&Edit") + edit_menu.addAction(CreateAction("&Copy", "Copy to clipboard", self.CopyToClipboard, self, QKeySequence.Copy)) + edit_menu.addAction(CreateAction("Copy as CS&V", "Copy to clipboard as CSV", self.CopyToClipboardCSV, self)) + edit_menu.addAction(CreateAction("&Find...", "Find items", self.Find, self, QKeySequence.Find)) + edit_menu.addAction(CreateAction("Fetch &more records...", "Fetch more records", self.FetchMoreRecords, self, [QKeySequence(Qt.Key_F8)])) + edit_menu.addAction(CreateAction("&Shrink Font", "Make text smaller", self.ShrinkFont, self, [QKeySequence("Ctrl+-")])) + edit_menu.addAction(CreateAction("&Enlarge Font", "Make text bigger", self.EnlargeFont, self, [QKeySequence("Ctrl++")])) + + reports_menu = menu.addMenu("&Reports") + if IsSelectable(glb.db, "calls"): + reports_menu.addAction(CreateAction("Context-Sensitive Call &Graph", "Create a new window containing a context-sensitive call graph", self.NewCallGraph, self)) + + if IsSelectable(glb.db, "calls", "WHERE parent_id >= 0"): + reports_menu.addAction(CreateAction("Call &Tree", "Create a new window containing a call tree", self.NewCallTree, self)) + + self.EventMenu(GetEventList(glb.db), reports_menu) + + if IsSelectable(glb.db, "calls"): + reports_menu.addAction(CreateAction("&Top calls by elapsed time", "Create a new window displaying top calls by elapsed time", self.NewTopCalls, self)) + + if IsSelectable(glb.db, "context_switches"): + charts_menu = menu.addMenu("&Charts") + charts_menu.addAction(CreateAction("&Time chart by CPU", "Create a new window displaying time charts by CPU", self.TimeChartByCPU, self)) + + self.TableMenu(GetTableList(glb), menu) + + self.window_menu = WindowMenu(self.mdi_area, menu) + + help_menu = menu.addMenu("&Help") + help_menu.addAction(CreateAction("&Exported SQL Viewer Help", "Helpful information", self.Help, self, QKeySequence.HelpContents)) + help_menu.addAction(CreateAction("&About Exported SQL Viewer", "About this application", self.About, self)) + + def Try(self, fn): + win = self.mdi_area.activeSubWindow() + if win: + try: + fn(win.view) + except: + pass + + def CopyToClipboard(self): + self.Try(CopyCellsToClipboardHdr) + + def CopyToClipboardCSV(self): + self.Try(CopyCellsToClipboardCSV) + + def Find(self): + win = self.mdi_area.activeSubWindow() + if win: + try: + win.find_bar.Activate() + except: + pass + + def FetchMoreRecords(self): + win = self.mdi_area.activeSubWindow() + if win: + try: + win.fetch_bar.Activate() + except: + pass + + def ShrinkFont(self): + self.Try(ShrinkFont) + + def EnlargeFont(self): + self.Try(EnlargeFont) + + def EventMenu(self, events, reports_menu): + branches_events = 0 + for event in events: + event = event.split(":")[0] + if event == "branches": + branches_events += 1 + dbid = 0 + for event in events: + dbid += 1 + event = event.split(":")[0] + if event == "branches": + label = "All branches" if branches_events == 1 else "All branches " + "(id=" + dbid + ")" + reports_menu.addAction(CreateAction(label, "Create a new window displaying branch events", lambda a=None,x=dbid: self.NewBranchView(x), self)) + label = "Selected branches" if branches_events == 1 else "Selected branches " + "(id=" + dbid + ")" + reports_menu.addAction(CreateAction(label, "Create a new window displaying branch events", lambda a=None,x=dbid: self.NewSelectedBranchView(x), self)) + + def TimeChartByCPU(self): + TimeChartByCPUWindow(self.glb, self) + + def TableMenu(self, tables, menu): + table_menu = menu.addMenu("&Tables") + for table in tables: + table_menu.addAction(CreateAction(table, "Create a new window containing a table view", lambda a=None,t=table: self.NewTableView(t), self)) + + def NewCallGraph(self): + CallGraphWindow(self.glb, self) + + def NewCallTree(self): + CallTreeWindow(self.glb, self) + + def NewTopCalls(self): + dialog = TopCallsDialog(self.glb, self) + ret = dialog.exec_() + if ret: + TopCallsWindow(self.glb, dialog.report_vars, self) + + def NewBranchView(self, event_id): + BranchWindow(self.glb, event_id, ReportVars(), self) + + def NewSelectedBranchView(self, event_id): + dialog = SelectedBranchDialog(self.glb, self) + ret = dialog.exec_() + if ret: + BranchWindow(self.glb, event_id, dialog.report_vars, self) + + def NewTableView(self, table_name): + TableWindow(self.glb, table_name, self) + + def Help(self): + HelpWindow(self.glb, self) + + def About(self): + dialog = AboutDialog(self.glb, self) + dialog.exec_() + +def TryOpen(file_name): + try: + return open(file_name, "rb") + except: + return None + +def Is64Bit(f): + result = sizeof(c_void_p) + # ELF support only + pos = f.tell() + f.seek(0) + header = f.read(7) + f.seek(pos) + magic = header[0:4] + if sys.version_info[0] == 2: + eclass = ord(header[4]) + encoding = ord(header[5]) + version = ord(header[6]) + else: + eclass = header[4] + encoding = header[5] + version = header[6] + if magic == chr(127) + "ELF" and eclass > 0 and eclass < 3 and encoding > 0 and encoding < 3 and version == 1: + result = True if eclass == 2 else False + return result + +# Global data + +class Glb(): + + def __init__(self, dbref, db, dbname): + self.dbref = dbref + self.db = db + self.dbname = dbname + self.home_dir = os.path.expanduser("~") + self.buildid_dir = os.getenv("PERF_BUILDID_DIR") + if self.buildid_dir: + self.buildid_dir += "/.build-id/" + else: + self.buildid_dir = self.home_dir + "/.debug/.build-id/" + self.app = None + self.mainwindow = None + self.instances_to_shutdown_on_exit = weakref.WeakSet() + try: + self.disassembler = LibXED() + self.have_disassembler = True + except: + self.have_disassembler = False + self.host_machine_id = 0 + self.host_start_time = 0 + self.host_finish_time = 0 + + def FileFromBuildId(self, build_id): + file_name = self.buildid_dir + build_id[0:2] + "/" + build_id[2:] + "/elf" + return TryOpen(file_name) + + def FileFromNamesAndBuildId(self, short_name, long_name, build_id): + # Assume current machine i.e. no support for virtualization + if short_name[0:7] == "[kernel" and os.path.basename(long_name) == "kcore": + file_name = os.getenv("PERF_KCORE") + f = TryOpen(file_name) if file_name else None + if f: + return f + # For now, no special handling if long_name is /proc/kcore + f = TryOpen(long_name) + if f: + return f + f = self.FileFromBuildId(build_id) + if f: + return f + return None + + def AddInstanceToShutdownOnExit(self, instance): + self.instances_to_shutdown_on_exit.add(instance) + + # Shutdown any background processes or threads + def ShutdownInstances(self): + for x in self.instances_to_shutdown_on_exit: + try: + x.Shutdown() + except: + pass + + def GetHostMachineId(self): + query = QSqlQuery(self.db) + QueryExec(query, "SELECT id FROM machines WHERE pid = -1") + if query.next(): + self.host_machine_id = query.value(0) + else: + self.host_machine_id = 0 + return self.host_machine_id + + def HostMachineId(self): + if self.host_machine_id: + return self.host_machine_id + return self.GetHostMachineId() + + def SelectValue(self, sql): + query = QSqlQuery(self.db) + try: + QueryExec(query, sql) + except: + return None + if query.next(): + return Decimal(query.value(0)) + return None + + def SwitchesMinTime(self, machine_id): + return self.SelectValue("SELECT time" + " FROM context_switches" + " WHERE time != 0 AND machine_id = " + str(machine_id) + + " ORDER BY id LIMIT 1") + + def SwitchesMaxTime(self, machine_id): + return self.SelectValue("SELECT time" + " FROM context_switches" + " WHERE time != 0 AND machine_id = " + str(machine_id) + + " ORDER BY id DESC LIMIT 1") + + def SamplesMinTime(self, machine_id): + return self.SelectValue("SELECT time" + " FROM samples" + " WHERE time != 0 AND machine_id = " + str(machine_id) + + " ORDER BY id LIMIT 1") + + def SamplesMaxTime(self, machine_id): + return self.SelectValue("SELECT time" + " FROM samples" + " WHERE time != 0 AND machine_id = " + str(machine_id) + + " ORDER BY id DESC LIMIT 1") + + def CallsMinTime(self, machine_id): + return self.SelectValue("SELECT calls.call_time" + " FROM calls" + " INNER JOIN threads ON threads.thread_id = calls.thread_id" + " WHERE calls.call_time != 0 AND threads.machine_id = " + str(machine_id) + + " ORDER BY calls.id LIMIT 1") + + def CallsMaxTime(self, machine_id): + return self.SelectValue("SELECT calls.return_time" + " FROM calls" + " INNER JOIN threads ON threads.thread_id = calls.thread_id" + " WHERE calls.return_time != 0 AND threads.machine_id = " + str(machine_id) + + " ORDER BY calls.return_time DESC LIMIT 1") + + def GetStartTime(self, machine_id): + t0 = self.SwitchesMinTime(machine_id) + t1 = self.SamplesMinTime(machine_id) + t2 = self.CallsMinTime(machine_id) + if t0 is None or (not(t1 is None) and t1 < t0): + t0 = t1 + if t0 is None or (not(t2 is None) and t2 < t0): + t0 = t2 + return t0 + + def GetFinishTime(self, machine_id): + t0 = self.SwitchesMaxTime(machine_id) + t1 = self.SamplesMaxTime(machine_id) + t2 = self.CallsMaxTime(machine_id) + if t0 is None or (not(t1 is None) and t1 > t0): + t0 = t1 + if t0 is None or (not(t2 is None) and t2 > t0): + t0 = t2 + return t0 + + def HostStartTime(self): + if self.host_start_time: + return self.host_start_time + self.host_start_time = self.GetStartTime(self.HostMachineId()) + return self.host_start_time + + def HostFinishTime(self): + if self.host_finish_time: + return self.host_finish_time + self.host_finish_time = self.GetFinishTime(self.HostMachineId()) + return self.host_finish_time + + def StartTime(self, machine_id): + if machine_id == self.HostMachineId(): + return self.HostStartTime() + return self.GetStartTime(machine_id) + + def FinishTime(self, machine_id): + if machine_id == self.HostMachineId(): + return self.HostFinishTime() + return self.GetFinishTime(machine_id) + +# Database reference + +class DBRef(): + + def __init__(self, is_sqlite3, dbname): + self.is_sqlite3 = is_sqlite3 + self.dbname = dbname + self.TRUE = "TRUE" + self.FALSE = "FALSE" + # SQLite prior to version 3.23 does not support TRUE and FALSE + if self.is_sqlite3: + self.TRUE = "1" + self.FALSE = "0" + + def Open(self, connection_name): + dbname = self.dbname + if self.is_sqlite3: + db = QSqlDatabase.addDatabase("QSQLITE", connection_name) + else: + db = QSqlDatabase.addDatabase("QPSQL", connection_name) + opts = dbname.split() + for opt in opts: + if "=" in opt: + opt = opt.split("=") + if opt[0] == "hostname": + db.setHostName(opt[1]) + elif opt[0] == "port": + db.setPort(int(opt[1])) + elif opt[0] == "username": + db.setUserName(opt[1]) + elif opt[0] == "password": + db.setPassword(opt[1]) + elif opt[0] == "dbname": + dbname = opt[1] + else: + dbname = opt + + db.setDatabaseName(dbname) + if not db.open(): + raise Exception("Failed to open database " + dbname + " error: " + db.lastError().text()) + return db, dbname + +# Main + +def Main(): + usage_str = "exported-sql-viewer.py [--pyside-version-1] \n" \ + " or: exported-sql-viewer.py --help-only" + ap = argparse.ArgumentParser(usage = usage_str, add_help = False) + ap.add_argument("--pyside-version-1", action='store_true') + ap.add_argument("dbname", nargs="?") + ap.add_argument("--help-only", action='store_true') + args = ap.parse_args() + + if args.help_only: + app = QApplication(sys.argv) + mainwindow = HelpOnlyWindow() + mainwindow.show() + err = app.exec_() + sys.exit(err) + + dbname = args.dbname + if dbname is None: + ap.print_usage() + print("Too few arguments") + sys.exit(1) + + is_sqlite3 = False + try: + f = open(dbname, "rb") + if f.read(15) == b'SQLite format 3': + is_sqlite3 = True + f.close() + except: + pass + + dbref = DBRef(is_sqlite3, dbname) + db, dbname = dbref.Open("main") + glb = Glb(dbref, db, dbname) + app = QApplication(sys.argv) + glb.app = app + mainwindow = MainWindow(glb) + glb.mainwindow = mainwindow + mainwindow.show() + err = app.exec_() + glb.ShutdownInstances() + db.close() + sys.exit(err) + +if __name__ == "__main__": + Main() diff --git a/data/linux-small/tools/perf/scripts/python/failed-syscalls-by-pid.py b/data/linux-small/tools/perf/scripts/python/failed-syscalls-by-pid.py new file mode 100644 index 0000000..310efe5 --- /dev/null +++ b/data/linux-small/tools/perf/scripts/python/failed-syscalls-by-pid.py @@ -0,0 +1,79 @@ +# failed system call counts, by pid +# (c) 2010, Tom Zanussi +# Licensed under the terms of the GNU GPL License version 2 +# +# Displays system-wide failed system call totals, broken down by pid. +# If a [comm] arg is specified, only syscalls called by [comm] are displayed. + +from __future__ import print_function + +import os +import sys + +sys.path.append(os.environ['PERF_EXEC_PATH'] + \ + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') + +from perf_trace_context import * +from Core import * +from Util import * + +usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n"; + +for_comm = None +for_pid = None + +if len(sys.argv) > 2: + sys.exit(usage) + +if len(sys.argv) > 1: + try: + for_pid = int(sys.argv[1]) + except: + for_comm = sys.argv[1] + +syscalls = autodict() + +def trace_begin(): + print("Press control+C to stop and show the summary") + +def trace_end(): + print_error_totals() + +def raw_syscalls__sys_exit(event_name, context, common_cpu, + common_secs, common_nsecs, common_pid, common_comm, + common_callchain, id, ret): + if (for_comm and common_comm != for_comm) or \ + (for_pid and common_pid != for_pid ): + return + + if ret < 0: + try: + syscalls[common_comm][common_pid][id][ret] += 1 + except TypeError: + syscalls[common_comm][common_pid][id][ret] = 1 + +def syscalls__sys_exit(event_name, context, common_cpu, + common_secs, common_nsecs, common_pid, common_comm, + id, ret): + raw_syscalls__sys_exit(**locals()) + +def print_error_totals(): + if for_comm is not None: + print("\nsyscall errors for %s:\n" % (for_comm)) + else: + print("\nsyscall errors:\n") + + print("%-30s %10s" % ("comm [pid]", "count")) + print("%-30s %10s" % ("------------------------------", "----------")) + + comm_keys = syscalls.keys() + for comm in comm_keys: + pid_keys = syscalls[comm].keys() + for pid in pid_keys: + print("\n%s [%d]" % (comm, pid)) + id_keys = syscalls[comm][pid].keys() + for id in id_keys: + print(" syscall: %-16s" % syscall_name(id)) + ret_keys = syscalls[comm][pid][id].keys() + for ret, val in sorted(syscalls[comm][pid][id].items(), key = lambda kv: (kv[1], kv[0]), reverse = True): + print(" err = %-20s %10d" % (strerror(ret), val)) diff --git a/data/linux-small/tools/perf/scripts/python/intel-pt-events.py b/data/linux-small/tools/perf/scripts/python/intel-pt-events.py new file mode 100644 index 0000000..1d3a189 --- /dev/null +++ b/data/linux-small/tools/perf/scripts/python/intel-pt-events.py @@ -0,0 +1,355 @@ +# SPDX-License-Identifier: GPL-2.0 +# intel-pt-events.py: Print Intel PT Events including Power Events and PTWRITE +# Copyright (c) 2017-2021, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. + +from __future__ import print_function + +import os +import sys +import struct +import argparse + +from libxed import LibXED +from ctypes import create_string_buffer, addressof + +sys.path.append(os.environ['PERF_EXEC_PATH'] + \ + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') + +from perf_trace_context import perf_set_itrace_options, \ + perf_sample_insn, perf_sample_srccode + +try: + broken_pipe_exception = BrokenPipeError +except: + broken_pipe_exception = IOError + +glb_switch_str = None +glb_switch_printed = True +glb_insn = False +glb_disassembler = None +glb_src = False +glb_source_file_name = None +glb_line_number = None +glb_dso = None + +def get_optional_null(perf_dict, field): + if field in perf_dict: + return perf_dict[field] + return "" + +def get_optional_zero(perf_dict, field): + if field in perf_dict: + return perf_dict[field] + return 0 + +def get_optional_bytes(perf_dict, field): + if field in perf_dict: + return perf_dict[field] + return bytes() + +def get_optional(perf_dict, field): + if field in perf_dict: + return perf_dict[field] + return "[unknown]" + +def get_offset(perf_dict, field): + if field in perf_dict: + return "+%#x" % perf_dict[field] + return "" + +def trace_begin(): + ap = argparse.ArgumentParser(usage = "", add_help = False) + ap.add_argument("--insn-trace", action='store_true') + ap.add_argument("--src-trace", action='store_true') + global glb_args + global glb_insn + global glb_src + glb_args = ap.parse_args() + if glb_args.insn_trace: + print("Intel PT Instruction Trace") + itrace = "i0nsepwx" + glb_insn = True + elif glb_args.src_trace: + print("Intel PT Source Trace") + itrace = "i0nsepwx" + glb_insn = True + glb_src = True + else: + print("Intel PT Branch Trace, Power Events and PTWRITE") + itrace = "bepwx" + global glb_disassembler + try: + glb_disassembler = LibXED() + except: + glb_disassembler = None + perf_set_itrace_options(perf_script_context, itrace) + +def trace_end(): + print("End") + +def trace_unhandled(event_name, context, event_fields_dict): + print(' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])) + +def print_ptwrite(raw_buf): + data = struct.unpack_from("> 32) & 0x3 + print("hints: %#x extensions: %#x" % (hints, extensions), end=' ') + +def print_pwre(raw_buf): + data = struct.unpack_from("> 7) & 1 + cstate = (payload >> 12) & 0xf + subcstate = (payload >> 8) & 0xf + print("hw: %u cstate: %u sub-cstate: %u" % (hw, cstate, subcstate), + end=' ') + +def print_exstop(raw_buf): + data = struct.unpack_from("> 4) & 0xf + wake_reason = (payload >> 8) & 0xf + print("deepest cstate: %u last cstate: %u wake reason: %#x" % + (deepest_cstate, last_cstate, wake_reason), end=' ') + +def print_psb(raw_buf): + data = struct.unpack_from("= 3: + for k in range(cnt): + byte_str += " %02x" % insn[k] + else: + for k in xrange(cnt): + byte_str += " %02x" % ord(insn[k]) + print("%-40s %-30s" % (byte_str, text), end=' ') + print("%s%s (%s)" % (symbol, offs, dso), end=' ') + else: + print("%16x %s%s (%s)" % (ip, symbol, offs, dso), end=' ') + if "addr_correlates_sym" in sample: + addr = sample["addr"] + dso = get_optional(sample, "addr_dso") + symbol = get_optional(sample, "addr_symbol") + offs = get_offset(sample, "addr_symoff") + print("=> %x %s%s (%s)%s" % (addr, symbol, offs, dso, ipc_str)) + else: + print(ipc_str) + +def print_srccode(comm, param_dict, sample, symbol, dso, with_insn): + ip = sample["ip"] + if symbol == "[unknown]": + start_str = common_start_str(comm, sample) + ("%x" % ip).rjust(16).ljust(40) + else: + offs = get_offset(param_dict, "symoff") + start_str = common_start_str(comm, sample) + (symbol + offs).ljust(40) + + if with_insn and glb_insn and glb_disassembler is not None: + insn = perf_sample_insn(perf_script_context) + if insn and len(insn): + cnt, text = disassem(insn, ip) + start_str += text.ljust(30) + + global glb_source_file_name + global glb_line_number + global glb_dso + + source_file_name, line_number, source_line = perf_sample_srccode(perf_script_context) + if source_file_name: + if glb_line_number == line_number and glb_source_file_name == source_file_name: + src_str = "" + else: + if len(source_file_name) > 40: + src_file = ("..." + source_file_name[-37:]) + " " + else: + src_file = source_file_name.ljust(41) + if source_line is None: + src_str = src_file + str(line_number).rjust(4) + " " + else: + src_str = src_file + str(line_number).rjust(4) + " " + source_line + glb_dso = None + elif dso == glb_dso: + src_str = "" + else: + src_str = dso + glb_dso = dso + + glb_line_number = line_number + glb_source_file_name = source_file_name + + print(start_str, src_str) + +def do_process_event(param_dict): + global glb_switch_printed + if not glb_switch_printed: + print(glb_switch_str) + glb_switch_printed = True + event_attr = param_dict["attr"] + sample = param_dict["sample"] + raw_buf = param_dict["raw_buf"] + comm = param_dict["comm"] + name = param_dict["ev_name"] + # Unused fields: + # callchain = param_dict["callchain"] + # brstack = param_dict["brstack"] + # brstacksym = param_dict["brstacksym"] + + # Symbol and dso info are not always resolved + dso = get_optional(param_dict, "dso") + symbol = get_optional(param_dict, "symbol") + + if name[0:12] == "instructions": + if glb_src: + print_srccode(comm, param_dict, sample, symbol, dso, True) + else: + print_instructions_start(comm, sample) + print_common_ip(param_dict, sample, symbol, dso) + elif name[0:8] == "branches": + if glb_src: + print_srccode(comm, param_dict, sample, symbol, dso, False) + else: + print_common_start(comm, sample, name) + print_common_ip(param_dict, sample, symbol, dso) + elif name == "ptwrite": + print_common_start(comm, sample, name) + print_ptwrite(raw_buf) + print_common_ip(param_dict, sample, symbol, dso) + elif name == "cbr": + print_common_start(comm, sample, name) + print_cbr(raw_buf) + print_common_ip(param_dict, sample, symbol, dso) + elif name == "mwait": + print_common_start(comm, sample, name) + print_mwait(raw_buf) + print_common_ip(param_dict, sample, symbol, dso) + elif name == "pwre": + print_common_start(comm, sample, name) + print_pwre(raw_buf) + print_common_ip(param_dict, sample, symbol, dso) + elif name == "exstop": + print_common_start(comm, sample, name) + print_exstop(raw_buf) + print_common_ip(param_dict, sample, symbol, dso) + elif name == "pwrx": + print_common_start(comm, sample, name) + print_pwrx(raw_buf) + print_common_ip(param_dict, sample, symbol, dso) + elif name == "psb": + print_common_start(comm, sample, name) + print_psb(raw_buf) + print_common_ip(param_dict, sample, symbol, dso) + else: + print_common_start(comm, sample, name) + print_common_ip(param_dict, sample, symbol, dso) + +def process_event(param_dict): + try: + do_process_event(param_dict) + except broken_pipe_exception: + # Stop python printing broken pipe errors and traceback + sys.stdout = open(os.devnull, 'w') + sys.exit(1) + +def auxtrace_error(typ, code, cpu, pid, tid, ip, ts, msg, cpumode, *x): + try: + print("%16s %5u/%-5u [%03u] %9u.%09u error type %u code %u: %s ip 0x%16x" % + ("Trace error", pid, tid, cpu, ts / 1000000000, ts %1000000000, typ, code, msg, ip)) + except broken_pipe_exception: + # Stop python printing broken pipe errors and traceback + sys.stdout = open(os.devnull, 'w') + sys.exit(1) + +def context_switch(ts, cpu, pid, tid, np_pid, np_tid, machine_pid, out, out_preempt, *x): + global glb_switch_printed + global glb_switch_str + if out: + out_str = "Switch out " + else: + out_str = "Switch In " + if out_preempt: + preempt_str = "preempt" + else: + preempt_str = "" + if machine_pid == -1: + machine_str = "" + else: + machine_str = "machine PID %d" % machine_pid + glb_switch_str = "%16s %5d/%-5d [%03u] %9u.%09u %5d/%-5d %s %s" % \ + (out_str, pid, tid, cpu, ts / 1000000000, ts %1000000000, np_pid, np_tid, machine_str, preempt_str) + glb_switch_printed = False diff --git a/data/linux-small/tools/perf/scripts/python/libxed.py b/data/linux-small/tools/perf/scripts/python/libxed.py new file mode 100644 index 0000000..2c70a5a --- /dev/null +++ b/data/linux-small/tools/perf/scripts/python/libxed.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python +# SPDX-License-Identifier: GPL-2.0 +# libxed.py: Python wrapper for libxed.so +# Copyright (c) 2014-2021, Intel Corporation. + +# To use Intel XED, libxed.so must be present. To build and install +# libxed.so: +# git clone https://github.com/intelxed/mbuild.git mbuild +# git clone https://github.com/intelxed/xed +# cd xed +# ./mfile.py --share +# sudo ./mfile.py --prefix=/usr/local install +# sudo ldconfig +# + +import sys + +from ctypes import CDLL, Structure, create_string_buffer, addressof, sizeof, \ + c_void_p, c_bool, c_byte, c_char, c_int, c_uint, c_longlong, c_ulonglong + +# XED Disassembler + +class xed_state_t(Structure): + + _fields_ = [ + ("mode", c_int), + ("width", c_int) + ] + +class XEDInstruction(): + + def __init__(self, libxed): + # Current xed_decoded_inst_t structure is 192 bytes. Use 512 to allow for future expansion + xedd_t = c_byte * 512 + self.xedd = xedd_t() + self.xedp = addressof(self.xedd) + libxed.xed_decoded_inst_zero(self.xedp) + self.state = xed_state_t() + self.statep = addressof(self.state) + # Buffer for disassembled instruction text + self.buffer = create_string_buffer(256) + self.bufferp = addressof(self.buffer) + +class LibXED(): + + def __init__(self): + try: + self.libxed = CDLL("libxed.so") + except: + self.libxed = None + if not self.libxed: + self.libxed = CDLL("/usr/local/lib/libxed.so") + + self.xed_tables_init = self.libxed.xed_tables_init + self.xed_tables_init.restype = None + self.xed_tables_init.argtypes = [] + + self.xed_decoded_inst_zero = self.libxed.xed_decoded_inst_zero + self.xed_decoded_inst_zero.restype = None + self.xed_decoded_inst_zero.argtypes = [ c_void_p ] + + self.xed_operand_values_set_mode = self.libxed.xed_operand_values_set_mode + self.xed_operand_values_set_mode.restype = None + self.xed_operand_values_set_mode.argtypes = [ c_void_p, c_void_p ] + + self.xed_decoded_inst_zero_keep_mode = self.libxed.xed_decoded_inst_zero_keep_mode + self.xed_decoded_inst_zero_keep_mode.restype = None + self.xed_decoded_inst_zero_keep_mode.argtypes = [ c_void_p ] + + self.xed_decode = self.libxed.xed_decode + self.xed_decode.restype = c_int + self.xed_decode.argtypes = [ c_void_p, c_void_p, c_uint ] + + self.xed_format_context = self.libxed.xed_format_context + self.xed_format_context.restype = c_uint + self.xed_format_context.argtypes = [ c_int, c_void_p, c_void_p, c_int, c_ulonglong, c_void_p, c_void_p ] + + self.xed_tables_init() + + def Instruction(self): + return XEDInstruction(self) + + def SetMode(self, inst, mode): + if mode: + inst.state.mode = 4 # 32-bit + inst.state.width = 4 # 4 bytes + else: + inst.state.mode = 1 # 64-bit + inst.state.width = 8 # 8 bytes + self.xed_operand_values_set_mode(inst.xedp, inst.statep) + + def DisassembleOne(self, inst, bytes_ptr, bytes_cnt, ip): + self.xed_decoded_inst_zero_keep_mode(inst.xedp) + err = self.xed_decode(inst.xedp, bytes_ptr, bytes_cnt) + if err: + return 0, "" + # Use AT&T mode (2), alternative is Intel (3) + ok = self.xed_format_context(2, inst.xedp, inst.bufferp, sizeof(inst.buffer), ip, 0, 0) + if not ok: + return 0, "" + if sys.version_info[0] == 2: + result = inst.buffer.value + else: + result = inst.buffer.value.decode() + # Return instruction length and the disassembled instruction text + # For now, assume the length is in byte 166 + return inst.xedd[166], result diff --git a/data/linux-small/tools/perf/scripts/python/mem-phys-addr.py b/data/linux-small/tools/perf/scripts/python/mem-phys-addr.py new file mode 100644 index 0000000..1f332e7 --- /dev/null +++ b/data/linux-small/tools/perf/scripts/python/mem-phys-addr.py @@ -0,0 +1,100 @@ +# mem-phys-addr.py: Resolve physical address samples +# SPDX-License-Identifier: GPL-2.0 +# +# Copyright (c) 2018, Intel Corporation. + +from __future__ import division +from __future__ import print_function + +import os +import sys +import struct +import re +import bisect +import collections + +sys.path.append(os.environ['PERF_EXEC_PATH'] + \ + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') + +#physical address ranges for System RAM +system_ram = [] +#physical address ranges for Persistent Memory +pmem = [] +#file object for proc iomem +f = None +#Count for each type of memory +load_mem_type_cnt = collections.Counter() +#perf event name +event_name = None + +def parse_iomem(): + global f + f = open('/proc/iomem', 'r') + for i, j in enumerate(f): + m = re.split('-|:',j,2) + if m[2].strip() == 'System RAM': + system_ram.append(int(m[0], 16)) + system_ram.append(int(m[1], 16)) + if m[2].strip() == 'Persistent Memory': + pmem.append(int(m[0], 16)) + pmem.append(int(m[1], 16)) + +def print_memory_type(): + print("Event: %s" % (event_name)) + print("%-40s %10s %10s\n" % ("Memory type", "count", "percentage"), end='') + print("%-40s %10s %10s\n" % ("----------------------------------------", + "-----------", "-----------"), + end=''); + total = sum(load_mem_type_cnt.values()) + for mem_type, count in sorted(load_mem_type_cnt.most_common(), \ + key = lambda kv: (kv[1], kv[0]), reverse = True): + print("%-40s %10d %10.1f%%\n" % + (mem_type, count, 100 * count / total), + end='') + +def trace_begin(): + parse_iomem() + +def trace_end(): + print_memory_type() + f.close() + +def is_system_ram(phys_addr): + #/proc/iomem is sorted + position = bisect.bisect(system_ram, phys_addr) + if position % 2 == 0: + return False + return True + +def is_persistent_mem(phys_addr): + position = bisect.bisect(pmem, phys_addr) + if position % 2 == 0: + return False + return True + +def find_memory_type(phys_addr): + if phys_addr == 0: + return "N/A" + if is_system_ram(phys_addr): + return "System RAM" + + if is_persistent_mem(phys_addr): + return "Persistent Memory" + + #slow path, search all + f.seek(0, 0) + for j in f: + m = re.split('-|:',j,2) + if int(m[0], 16) <= phys_addr <= int(m[1], 16): + return m[2] + return "N/A" + +def process_event(param_dict): + name = param_dict["ev_name"] + sample = param_dict["sample"] + phys_addr = sample["phys_addr"] + + global event_name + if event_name == None: + event_name = name + load_mem_type_cnt[find_memory_type(phys_addr)] += 1 diff --git a/data/linux-small/tools/perf/scripts/python/net_dropmonitor.py b/data/linux-small/tools/perf/scripts/python/net_dropmonitor.py new file mode 100755 index 0000000..1010599 --- /dev/null +++ b/data/linux-small/tools/perf/scripts/python/net_dropmonitor.py @@ -0,0 +1,78 @@ +# Monitor the system for dropped packets and proudce a report of drop locations and counts +# SPDX-License-Identifier: GPL-2.0 + +from __future__ import print_function + +import os +import sys + +sys.path.append(os.environ['PERF_EXEC_PATH'] + \ + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') + +from perf_trace_context import * +from Core import * +from Util import * + +drop_log = {} +kallsyms = [] + +def get_kallsyms_table(): + global kallsyms + + try: + f = open("/proc/kallsyms", "r") + except: + return + + for line in f: + loc = int(line.split()[0], 16) + name = line.split()[2] + kallsyms.append((loc, name)) + kallsyms.sort() + +def get_sym(sloc): + loc = int(sloc) + + # Invariant: kallsyms[i][0] <= loc for all 0 <= i <= start + # kallsyms[i][0] > loc for all end <= i < len(kallsyms) + start, end = -1, len(kallsyms) + while end != start + 1: + pivot = (start + end) // 2 + if loc < kallsyms[pivot][0]: + end = pivot + else: + start = pivot + + # Now (start == -1 or kallsyms[start][0] <= loc) + # and (start == len(kallsyms) - 1 or loc < kallsyms[start + 1][0]) + if start >= 0: + symloc, name = kallsyms[start] + return (name, loc - symloc) + else: + return (None, 0) + +def print_drop_table(): + print("%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")) + for i in drop_log.keys(): + (sym, off) = get_sym(i) + if sym == None: + sym = i + print("%25s %25s %25s" % (sym, off, drop_log[i])) + + +def trace_begin(): + print("Starting trace (Ctrl-C to dump results)") + +def trace_end(): + print("Gathering kallsyms data") + get_kallsyms_table() + print_drop_table() + +# called from perf, when it finds a correspoinding event +def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, callchain, + skbaddr, location, protocol): + slocation = str(location) + try: + drop_log[slocation] = drop_log[slocation] + 1 + except: + drop_log[slocation] = 1 diff --git a/data/linux-small/tools/perf/scripts/python/sched-migration.py b/data/linux-small/tools/perf/scripts/python/sched-migration.py new file mode 100644 index 0000000..8196e30 --- /dev/null +++ b/data/linux-small/tools/perf/scripts/python/sched-migration.py @@ -0,0 +1,462 @@ +# Cpu task migration overview toy +# +# Copyright (C) 2010 Frederic Weisbecker +# +# perf script event handlers have been generated by perf script -g python +# +# This software is distributed under the terms of the GNU General +# Public License ("GPL") version 2 as published by the Free Software +# Foundation. +from __future__ import print_function + +import os +import sys + +from collections import defaultdict +try: + from UserList import UserList +except ImportError: + # Python 3: UserList moved to the collections package + from collections import UserList + +sys.path.append(os.environ['PERF_EXEC_PATH'] + \ + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') +sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace') + +from perf_trace_context import * +from Core import * +from SchedGui import * + + +threads = { 0 : "idle"} + +def thread_name(pid): + return "%s:%d" % (threads[pid], pid) + +class RunqueueEventUnknown: + @staticmethod + def color(): + return None + + def __repr__(self): + return "unknown" + +class RunqueueEventSleep: + @staticmethod + def color(): + return (0, 0, 0xff) + + def __init__(self, sleeper): + self.sleeper = sleeper + + def __repr__(self): + return "%s gone to sleep" % thread_name(self.sleeper) + +class RunqueueEventWakeup: + @staticmethod + def color(): + return (0xff, 0xff, 0) + + def __init__(self, wakee): + self.wakee = wakee + + def __repr__(self): + return "%s woke up" % thread_name(self.wakee) + +class RunqueueEventFork: + @staticmethod + def color(): + return (0, 0xff, 0) + + def __init__(self, child): + self.child = child + + def __repr__(self): + return "new forked task %s" % thread_name(self.child) + +class RunqueueMigrateIn: + @staticmethod + def color(): + return (0, 0xf0, 0xff) + + def __init__(self, new): + self.new = new + + def __repr__(self): + return "task migrated in %s" % thread_name(self.new) + +class RunqueueMigrateOut: + @staticmethod + def color(): + return (0xff, 0, 0xff) + + def __init__(self, old): + self.old = old + + def __repr__(self): + return "task migrated out %s" % thread_name(self.old) + +class RunqueueSnapshot: + def __init__(self, tasks = [0], event = RunqueueEventUnknown()): + self.tasks = tuple(tasks) + self.event = event + + def sched_switch(self, prev, prev_state, next): + event = RunqueueEventUnknown() + + if taskState(prev_state) == "R" and next in self.tasks \ + and prev in self.tasks: + return self + + if taskState(prev_state) != "R": + event = RunqueueEventSleep(prev) + + next_tasks = list(self.tasks[:]) + if prev in self.tasks: + if taskState(prev_state) != "R": + next_tasks.remove(prev) + elif taskState(prev_state) == "R": + next_tasks.append(prev) + + if next not in next_tasks: + next_tasks.append(next) + + return RunqueueSnapshot(next_tasks, event) + + def migrate_out(self, old): + if old not in self.tasks: + return self + next_tasks = [task for task in self.tasks if task != old] + + return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old)) + + def __migrate_in(self, new, event): + if new in self.tasks: + self.event = event + return self + next_tasks = self.tasks[:] + tuple([new]) + + return RunqueueSnapshot(next_tasks, event) + + def migrate_in(self, new): + return self.__migrate_in(new, RunqueueMigrateIn(new)) + + def wake_up(self, new): + return self.__migrate_in(new, RunqueueEventWakeup(new)) + + def wake_up_new(self, new): + return self.__migrate_in(new, RunqueueEventFork(new)) + + def load(self): + """ Provide the number of tasks on the runqueue. + Don't count idle""" + return len(self.tasks) - 1 + + def __repr__(self): + ret = self.tasks.__repr__() + ret += self.origin_tostring() + + return ret + +class TimeSlice: + def __init__(self, start, prev): + self.start = start + self.prev = prev + self.end = start + # cpus that triggered the event + self.event_cpus = [] + if prev is not None: + self.total_load = prev.total_load + self.rqs = prev.rqs.copy() + else: + self.rqs = defaultdict(RunqueueSnapshot) + self.total_load = 0 + + def __update_total_load(self, old_rq, new_rq): + diff = new_rq.load() - old_rq.load() + self.total_load += diff + + def sched_switch(self, ts_list, prev, prev_state, next, cpu): + old_rq = self.prev.rqs[cpu] + new_rq = old_rq.sched_switch(prev, prev_state, next) + + if old_rq is new_rq: + return + + self.rqs[cpu] = new_rq + self.__update_total_load(old_rq, new_rq) + ts_list.append(self) + self.event_cpus = [cpu] + + def migrate(self, ts_list, new, old_cpu, new_cpu): + if old_cpu == new_cpu: + return + old_rq = self.prev.rqs[old_cpu] + out_rq = old_rq.migrate_out(new) + self.rqs[old_cpu] = out_rq + self.__update_total_load(old_rq, out_rq) + + new_rq = self.prev.rqs[new_cpu] + in_rq = new_rq.migrate_in(new) + self.rqs[new_cpu] = in_rq + self.__update_total_load(new_rq, in_rq) + + ts_list.append(self) + + if old_rq is not out_rq: + self.event_cpus.append(old_cpu) + self.event_cpus.append(new_cpu) + + def wake_up(self, ts_list, pid, cpu, fork): + old_rq = self.prev.rqs[cpu] + if fork: + new_rq = old_rq.wake_up_new(pid) + else: + new_rq = old_rq.wake_up(pid) + + if new_rq is old_rq: + return + self.rqs[cpu] = new_rq + self.__update_total_load(old_rq, new_rq) + ts_list.append(self) + self.event_cpus = [cpu] + + def next(self, t): + self.end = t + return TimeSlice(t, self) + +class TimeSliceList(UserList): + def __init__(self, arg = []): + self.data = arg + + def get_time_slice(self, ts): + if len(self.data) == 0: + slice = TimeSlice(ts, TimeSlice(-1, None)) + else: + slice = self.data[-1].next(ts) + return slice + + def find_time_slice(self, ts): + start = 0 + end = len(self.data) + found = -1 + searching = True + while searching: + if start == end or start == end - 1: + searching = False + + i = (end + start) / 2 + if self.data[i].start <= ts and self.data[i].end >= ts: + found = i + end = i + continue + + if self.data[i].end < ts: + start = i + + elif self.data[i].start > ts: + end = i + + return found + + def set_root_win(self, win): + self.root_win = win + + def mouse_down(self, cpu, t): + idx = self.find_time_slice(t) + if idx == -1: + return + + ts = self[idx] + rq = ts.rqs[cpu] + raw = "CPU: %d\n" % cpu + raw += "Last event : %s\n" % rq.event.__repr__() + raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000) + raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6)) + raw += "Load = %d\n" % rq.load() + for t in rq.tasks: + raw += "%s \n" % thread_name(t) + + self.root_win.update_summary(raw) + + def update_rectangle_cpu(self, slice, cpu): + rq = slice.rqs[cpu] + + if slice.total_load != 0: + load_rate = rq.load() / float(slice.total_load) + else: + load_rate = 0 + + red_power = int(0xff - (0xff * load_rate)) + color = (0xff, red_power, red_power) + + top_color = None + + if cpu in slice.event_cpus: + top_color = rq.event.color() + + self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end) + + def fill_zone(self, start, end): + i = self.find_time_slice(start) + if i == -1: + return + + for i in range(i, len(self.data)): + timeslice = self.data[i] + if timeslice.start > end: + return + + for cpu in timeslice.rqs: + self.update_rectangle_cpu(timeslice, cpu) + + def interval(self): + if len(self.data) == 0: + return (0, 0) + + return (self.data[0].start, self.data[-1].end) + + def nr_rectangles(self): + last_ts = self.data[-1] + max_cpu = 0 + for cpu in last_ts.rqs: + if cpu > max_cpu: + max_cpu = cpu + return max_cpu + + +class SchedEventProxy: + def __init__(self): + self.current_tsk = defaultdict(lambda : -1) + self.timeslices = TimeSliceList() + + def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state, + next_comm, next_pid, next_prio): + """ Ensure the task we sched out this cpu is really the one + we logged. Otherwise we may have missed traces """ + + on_cpu_task = self.current_tsk[headers.cpu] + + if on_cpu_task != -1 and on_cpu_task != prev_pid: + print("Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \ + headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid) + + threads[prev_pid] = prev_comm + threads[next_pid] = next_comm + self.current_tsk[headers.cpu] = next_pid + + ts = self.timeslices.get_time_slice(headers.ts()) + ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu) + + def migrate(self, headers, pid, prio, orig_cpu, dest_cpu): + ts = self.timeslices.get_time_slice(headers.ts()) + ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu) + + def wake_up(self, headers, comm, pid, success, target_cpu, fork): + if success == 0: + return + ts = self.timeslices.get_time_slice(headers.ts()) + ts.wake_up(self.timeslices, pid, target_cpu, fork) + + +def trace_begin(): + global parser + parser = SchedEventProxy() + +def trace_end(): + app = wx.App(False) + timeslices = parser.timeslices + frame = RootFrame(timeslices, "Migration") + app.MainLoop() + +def sched__sched_stat_runtime(event_name, context, common_cpu, + common_secs, common_nsecs, common_pid, common_comm, + common_callchain, comm, pid, runtime, vruntime): + pass + +def sched__sched_stat_iowait(event_name, context, common_cpu, + common_secs, common_nsecs, common_pid, common_comm, + common_callchain, comm, pid, delay): + pass + +def sched__sched_stat_sleep(event_name, context, common_cpu, + common_secs, common_nsecs, common_pid, common_comm, + common_callchain, comm, pid, delay): + pass + +def sched__sched_stat_wait(event_name, context, common_cpu, + common_secs, common_nsecs, common_pid, common_comm, + common_callchain, comm, pid, delay): + pass + +def sched__sched_process_fork(event_name, context, common_cpu, + common_secs, common_nsecs, common_pid, common_comm, + common_callchain, parent_comm, parent_pid, child_comm, child_pid): + pass + +def sched__sched_process_wait(event_name, context, common_cpu, + common_secs, common_nsecs, common_pid, common_comm, + common_callchain, comm, pid, prio): + pass + +def sched__sched_process_exit(event_name, context, common_cpu, + common_secs, common_nsecs, common_pid, common_comm, + common_callchain, comm, pid, prio): + pass + +def sched__sched_process_free(event_name, context, common_cpu, + common_secs, common_nsecs, common_pid, common_comm, + common_callchain, comm, pid, prio): + pass + +def sched__sched_migrate_task(event_name, context, common_cpu, + common_secs, common_nsecs, common_pid, common_comm, + common_callchain, comm, pid, prio, orig_cpu, + dest_cpu): + headers = EventHeaders(common_cpu, common_secs, common_nsecs, + common_pid, common_comm, common_callchain) + parser.migrate(headers, pid, prio, orig_cpu, dest_cpu) + +def sched__sched_switch(event_name, context, common_cpu, + common_secs, common_nsecs, common_pid, common_comm, common_callchain, + prev_comm, prev_pid, prev_prio, prev_state, + next_comm, next_pid, next_prio): + + headers = EventHeaders(common_cpu, common_secs, common_nsecs, + common_pid, common_comm, common_callchain) + parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state, + next_comm, next_pid, next_prio) + +def sched__sched_wakeup_new(event_name, context, common_cpu, + common_secs, common_nsecs, common_pid, common_comm, + common_callchain, comm, pid, prio, success, + target_cpu): + headers = EventHeaders(common_cpu, common_secs, common_nsecs, + common_pid, common_comm, common_callchain) + parser.wake_up(headers, comm, pid, success, target_cpu, 1) + +def sched__sched_wakeup(event_name, context, common_cpu, + common_secs, common_nsecs, common_pid, common_comm, + common_callchain, comm, pid, prio, success, + target_cpu): + headers = EventHeaders(common_cpu, common_secs, common_nsecs, + common_pid, common_comm, common_callchain) + parser.wake_up(headers, comm, pid, success, target_cpu, 0) + +def sched__sched_wait_task(event_name, context, common_cpu, + common_secs, common_nsecs, common_pid, common_comm, + common_callchain, comm, pid, prio): + pass + +def sched__sched_kthread_stop_ret(event_name, context, common_cpu, + common_secs, common_nsecs, common_pid, common_comm, + common_callchain, ret): + pass + +def sched__sched_kthread_stop(event_name, context, common_cpu, + common_secs, common_nsecs, common_pid, common_comm, + common_callchain, comm, pid): + pass + +def trace_unhandled(event_name, context, event_fields_dict): + pass diff --git a/data/linux-small/tools/perf/scripts/python/sctop.py b/data/linux-small/tools/perf/scripts/python/sctop.py new file mode 100644 index 0000000..6e0278d --- /dev/null +++ b/data/linux-small/tools/perf/scripts/python/sctop.py @@ -0,0 +1,89 @@ +# system call top +# (c) 2010, Tom Zanussi +# Licensed under the terms of the GNU GPL License version 2 +# +# Periodically displays system-wide system call totals, broken down by +# syscall. If a [comm] arg is specified, only syscalls called by +# [comm] are displayed. If an [interval] arg is specified, the display +# will be refreshed every [interval] seconds. The default interval is +# 3 seconds. + +from __future__ import print_function + +import os, sys, time + +try: + import thread +except ImportError: + import _thread as thread + +sys.path.append(os.environ['PERF_EXEC_PATH'] + \ + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') + +from perf_trace_context import * +from Core import * +from Util import * + +usage = "perf script -s sctop.py [comm] [interval]\n"; + +for_comm = None +default_interval = 3 +interval = default_interval + +if len(sys.argv) > 3: + sys.exit(usage) + +if len(sys.argv) > 2: + for_comm = sys.argv[1] + interval = int(sys.argv[2]) +elif len(sys.argv) > 1: + try: + interval = int(sys.argv[1]) + except ValueError: + for_comm = sys.argv[1] + interval = default_interval + +syscalls = autodict() + +def trace_begin(): + thread.start_new_thread(print_syscall_totals, (interval,)) + pass + +def raw_syscalls__sys_enter(event_name, context, common_cpu, + common_secs, common_nsecs, common_pid, common_comm, + common_callchain, id, args): + if for_comm is not None: + if common_comm != for_comm: + return + try: + syscalls[id] += 1 + except TypeError: + syscalls[id] = 1 + +def syscalls__sys_enter(event_name, context, common_cpu, + common_secs, common_nsecs, common_pid, common_comm, + id, args): + raw_syscalls__sys_enter(**locals()) + +def print_syscall_totals(interval): + while 1: + clear_term() + if for_comm is not None: + print("\nsyscall events for %s:\n" % (for_comm)) + else: + print("\nsyscall events:\n") + + print("%-40s %10s" % ("event", "count")) + print("%-40s %10s" % + ("----------------------------------------", + "----------")) + + for id, val in sorted(syscalls.items(), + key = lambda kv: (kv[1], kv[0]), + reverse = True): + try: + print("%-40s %10d" % (syscall_name(id), val)) + except TypeError: + pass + syscalls.clear() + time.sleep(interval) diff --git a/data/linux-small/tools/perf/scripts/python/syscall-counts-by-pid.py b/data/linux-small/tools/perf/scripts/python/syscall-counts-by-pid.py new file mode 100644 index 0000000..f254e40 --- /dev/null +++ b/data/linux-small/tools/perf/scripts/python/syscall-counts-by-pid.py @@ -0,0 +1,75 @@ +# system call counts, by pid +# (c) 2010, Tom Zanussi +# Licensed under the terms of the GNU GPL License version 2 +# +# Displays system-wide system call totals, broken down by syscall. +# If a [comm] arg is specified, only syscalls called by [comm] are displayed. + +from __future__ import print_function + +import os, sys + +sys.path.append(os.environ['PERF_EXEC_PATH'] + \ + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') + +from perf_trace_context import * +from Core import * +from Util import syscall_name + +usage = "perf script -s syscall-counts-by-pid.py [comm]\n"; + +for_comm = None +for_pid = None + +if len(sys.argv) > 2: + sys.exit(usage) + +if len(sys.argv) > 1: + try: + for_pid = int(sys.argv[1]) + except: + for_comm = sys.argv[1] + +syscalls = autodict() + +def trace_begin(): + print("Press control+C to stop and show the summary") + +def trace_end(): + print_syscall_totals() + +def raw_syscalls__sys_enter(event_name, context, common_cpu, + common_secs, common_nsecs, common_pid, common_comm, + common_callchain, id, args): + if (for_comm and common_comm != for_comm) or \ + (for_pid and common_pid != for_pid ): + return + try: + syscalls[common_comm][common_pid][id] += 1 + except TypeError: + syscalls[common_comm][common_pid][id] = 1 + +def syscalls__sys_enter(event_name, context, common_cpu, + common_secs, common_nsecs, common_pid, common_comm, + id, args): + raw_syscalls__sys_enter(**locals()) + +def print_syscall_totals(): + if for_comm is not None: + print("\nsyscall events for %s:\n" % (for_comm)) + else: + print("\nsyscall events by comm/pid:\n") + + print("%-40s %10s" % ("comm [pid]/syscalls", "count")) + print("%-40s %10s" % ("----------------------------------------", + "----------")) + + comm_keys = syscalls.keys() + for comm in comm_keys: + pid_keys = syscalls[comm].keys() + for pid in pid_keys: + print("\n%s [%d]" % (comm, pid)) + id_keys = syscalls[comm][pid].keys() + for id, val in sorted(syscalls[comm][pid].items(), + key = lambda kv: (kv[1], kv[0]), reverse = True): + print(" %-38s %10d" % (syscall_name(id), val)) diff --git a/data/linux-small/tools/power/pm-graph/bootgraph.py b/data/linux-small/tools/power/pm-graph/bootgraph.py new file mode 100755 index 0000000..2823cd3 --- /dev/null +++ b/data/linux-small/tools/power/pm-graph/bootgraph.py @@ -0,0 +1,1101 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0-only +# +# Tool for analyzing boot timing +# Copyright (c) 2013, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# Authors: +# Todd Brandt +# +# Description: +# This tool is designed to assist kernel and OS developers in optimizing +# their linux stack's boot time. It creates an html representation of +# the kernel boot timeline up to the start of the init process. +# + +# ----------------- LIBRARIES -------------------- + +import sys +import time +import os +import string +import re +import platform +import shutil +from datetime import datetime, timedelta +from subprocess import call, Popen, PIPE +import sleepgraph as aslib + +def pprint(msg): + print(msg) + sys.stdout.flush() + +# ----------------- CLASSES -------------------- + +# Class: SystemValues +# Description: +# A global, single-instance container used to +# store system values and test parameters +class SystemValues(aslib.SystemValues): + title = 'BootGraph' + version = '2.2' + hostname = 'localhost' + testtime = '' + kernel = '' + dmesgfile = '' + ftracefile = '' + htmlfile = 'bootgraph.html' + testdir = '' + kparams = '' + result = '' + useftrace = False + usecallgraph = False + suspendmode = 'boot' + max_graph_depth = 2 + graph_filter = 'do_one_initcall' + reboot = False + manual = False + iscronjob = False + timeformat = '%.6f' + bootloader = 'grub' + blexec = [] + def __init__(self): + self.hostname = platform.node() + self.testtime = datetime.now().strftime('%Y-%m-%d_%H:%M:%S') + if os.path.exists('/proc/version'): + fp = open('/proc/version', 'r') + val = fp.read().strip() + fp.close() + self.kernel = self.kernelVersion(val) + else: + self.kernel = 'unknown' + self.testdir = datetime.now().strftime('boot-%y%m%d-%H%M%S') + def kernelVersion(self, msg): + return msg.split()[2] + def checkFtraceKernelVersion(self): + val = tuple(map(int, self.kernel.split('-')[0].split('.'))) + if val >= (4, 10, 0): + return True + return False + def kernelParams(self): + cmdline = 'initcall_debug log_buf_len=32M' + if self.useftrace: + if self.cpucount > 0: + bs = min(self.memtotal // 2, 2*1024*1024) // self.cpucount + else: + bs = 131072 + cmdline += ' trace_buf_size=%dK trace_clock=global '\ + 'trace_options=nooverwrite,funcgraph-abstime,funcgraph-cpu,'\ + 'funcgraph-duration,funcgraph-proc,funcgraph-tail,'\ + 'nofuncgraph-overhead,context-info,graph-time '\ + 'ftrace=function_graph '\ + 'ftrace_graph_max_depth=%d '\ + 'ftrace_graph_filter=%s' % \ + (bs, self.max_graph_depth, self.graph_filter) + return cmdline + def setGraphFilter(self, val): + master = self.getBootFtraceFilterFunctions() + fs = '' + for i in val.split(','): + func = i.strip() + if func == '': + doError('badly formatted filter function string') + if '[' in func or ']' in func: + doError('loadable module functions not allowed - "%s"' % func) + if ' ' in func: + doError('spaces found in filter functions - "%s"' % func) + if func not in master: + doError('function "%s" not available for ftrace' % func) + if not fs: + fs = func + else: + fs += ','+func + if not fs: + doError('badly formatted filter function string') + self.graph_filter = fs + def getBootFtraceFilterFunctions(self): + self.rootCheck(True) + fp = open(self.tpath+'available_filter_functions') + fulllist = fp.read().split('\n') + fp.close() + list = [] + for i in fulllist: + if not i or ' ' in i or '[' in i or ']' in i: + continue + list.append(i) + return list + def myCronJob(self, line): + if '@reboot' not in line: + return False + if 'bootgraph' in line or 'analyze_boot.py' in line or '-cronjob' in line: + return True + return False + def cronjobCmdString(self): + cmdline = '%s -cronjob' % os.path.abspath(sys.argv[0]) + args = iter(sys.argv[1:]) + for arg in args: + if arg in ['-h', '-v', '-cronjob', '-reboot', '-verbose']: + continue + elif arg in ['-o', '-dmesg', '-ftrace', '-func']: + next(args) + continue + elif arg == '-result': + cmdline += ' %s "%s"' % (arg, os.path.abspath(next(args))) + continue + elif arg == '-cgskip': + file = self.configFile(next(args)) + cmdline += ' %s "%s"' % (arg, os.path.abspath(file)) + continue + cmdline += ' '+arg + if self.graph_filter != 'do_one_initcall': + cmdline += ' -func "%s"' % self.graph_filter + cmdline += ' -o "%s"' % os.path.abspath(self.testdir) + return cmdline + def manualRebootRequired(self): + cmdline = self.kernelParams() + pprint('To generate a new timeline manually, follow these steps:\n\n'\ + '1. Add the CMDLINE string to your kernel command line.\n'\ + '2. Reboot the system.\n'\ + '3. After reboot, re-run this tool with the same arguments but no command (w/o -reboot or -manual).\n\n'\ + 'CMDLINE="%s"' % cmdline) + sys.exit() + def blGrub(self): + blcmd = '' + for cmd in ['update-grub', 'grub-mkconfig', 'grub2-mkconfig']: + if blcmd: + break + blcmd = self.getExec(cmd) + if not blcmd: + doError('[GRUB] missing update command') + if not os.path.exists('/etc/default/grub'): + doError('[GRUB] missing /etc/default/grub') + if 'grub2' in blcmd: + cfg = '/boot/grub2/grub.cfg' + else: + cfg = '/boot/grub/grub.cfg' + if not os.path.exists(cfg): + doError('[GRUB] missing %s' % cfg) + if 'update-grub' in blcmd: + self.blexec = [blcmd] + else: + self.blexec = [blcmd, '-o', cfg] + def getBootLoader(self): + if self.bootloader == 'grub': + self.blGrub() + else: + doError('unknown boot loader: %s' % self.bootloader) + def writeDatafileHeader(self, filename): + self.kparams = open('/proc/cmdline', 'r').read().strip() + fp = open(filename, 'w') + fp.write(self.teststamp+'\n') + fp.write(self.sysstamp+'\n') + fp.write('# command | %s\n' % self.cmdline) + fp.write('# kparams | %s\n' % self.kparams) + fp.close() + +sysvals = SystemValues() + +# Class: Data +# Description: +# The primary container for test data. +class Data(aslib.Data): + dmesg = {} # root data structure + start = 0.0 # test start + end = 0.0 # test end + dmesgtext = [] # dmesg text file in memory + testnumber = 0 + idstr = '' + html_device_id = 0 + valid = False + tUserMode = 0.0 + boottime = '' + phases = ['kernel', 'user'] + do_one_initcall = False + def __init__(self, num): + self.testnumber = num + self.idstr = 'a' + self.dmesgtext = [] + self.dmesg = { + 'kernel': {'list': dict(), 'start': -1.0, 'end': -1.0, 'row': 0, + 'order': 0, 'color': 'linear-gradient(to bottom, #fff, #bcf)'}, + 'user': {'list': dict(), 'start': -1.0, 'end': -1.0, 'row': 0, + 'order': 1, 'color': '#fff'} + } + def deviceTopology(self): + return '' + def newAction(self, phase, name, pid, start, end, ret, ulen): + # new device callback for a specific phase + self.html_device_id += 1 + devid = '%s%d' % (self.idstr, self.html_device_id) + list = self.dmesg[phase]['list'] + length = -1.0 + if(start >= 0 and end >= 0): + length = end - start + i = 2 + origname = name + while(name in list): + name = '%s[%d]' % (origname, i) + i += 1 + list[name] = {'name': name, 'start': start, 'end': end, + 'pid': pid, 'length': length, 'row': 0, 'id': devid, + 'ret': ret, 'ulen': ulen } + return name + def deviceMatch(self, pid, cg): + if cg.end - cg.start == 0: + return '' + for p in data.phases: + list = self.dmesg[p]['list'] + for devname in list: + dev = list[devname] + if pid != dev['pid']: + continue + if cg.name == 'do_one_initcall': + if(cg.start <= dev['start'] and cg.end >= dev['end'] and dev['length'] > 0): + dev['ftrace'] = cg + self.do_one_initcall = True + return devname + else: + if(cg.start > dev['start'] and cg.end < dev['end']): + if 'ftraces' not in dev: + dev['ftraces'] = [] + dev['ftraces'].append(cg) + return devname + return '' + def printDetails(self): + sysvals.vprint('Timeline Details:') + sysvals.vprint(' Host: %s' % sysvals.hostname) + sysvals.vprint(' Kernel: %s' % sysvals.kernel) + sysvals.vprint(' Test time: %s' % sysvals.testtime) + sysvals.vprint(' Boot time: %s' % self.boottime) + for phase in self.phases: + dc = len(self.dmesg[phase]['list']) + sysvals.vprint('%9s mode: %.3f - %.3f (%d initcalls)' % (phase, + self.dmesg[phase]['start']*1000, + self.dmesg[phase]['end']*1000, dc)) + +# ----------------- FUNCTIONS -------------------- + +# Function: parseKernelLog +# Description: +# parse a kernel log for boot data +def parseKernelLog(): + sysvals.vprint('Analyzing the dmesg data (%s)...' % \ + os.path.basename(sysvals.dmesgfile)) + phase = 'kernel' + data = Data(0) + data.dmesg['kernel']['start'] = data.start = ktime = 0.0 + sysvals.stamp = { + 'time': datetime.now().strftime('%B %d %Y, %I:%M:%S %p'), + 'host': sysvals.hostname, + 'mode': 'boot', 'kernel': ''} + + tp = aslib.TestProps() + devtemp = dict() + if(sysvals.dmesgfile): + lf = open(sysvals.dmesgfile, 'rb') + else: + lf = Popen('dmesg', stdout=PIPE).stdout + for line in lf: + line = aslib.ascii(line).replace('\r\n', '') + # grab the stamp and sysinfo + if re.match(tp.stampfmt, line): + tp.stamp = line + continue + elif re.match(tp.sysinfofmt, line): + tp.sysinfo = line + continue + elif re.match(tp.cmdlinefmt, line): + tp.cmdline = line + continue + elif re.match(tp.kparamsfmt, line): + tp.kparams = line + continue + idx = line.find('[') + if idx > 1: + line = line[idx:] + m = re.match('[ \t]*(\[ *)(?P[0-9\.]*)(\]) (?P.*)', line) + if(not m): + continue + ktime = float(m.group('ktime')) + if(ktime > 120): + break + msg = m.group('msg') + data.dmesgtext.append(line) + if(ktime == 0.0 and re.match('^Linux version .*', msg)): + if(not sysvals.stamp['kernel']): + sysvals.stamp['kernel'] = sysvals.kernelVersion(msg) + continue + m = re.match('.* setting system clock to (?P[0-9\-]*)[ A-Z](?P[0-9:]*) UTC.*', msg) + if(m): + bt = datetime.strptime(m.group('d')+' '+m.group('t'), '%Y-%m-%d %H:%M:%S') + bt = bt - timedelta(seconds=int(ktime)) + data.boottime = bt.strftime('%Y-%m-%d_%H:%M:%S') + sysvals.stamp['time'] = bt.strftime('%B %d %Y, %I:%M:%S %p') + continue + m = re.match('^calling *(?P.*)\+.* @ (?P

[0-9]*)', msg) + if(m): + func = m.group('f') + pid = int(m.group('p')) + devtemp[func] = (ktime, pid) + continue + m = re.match('^initcall *(?P.*)\+.* returned (?P.*) after (?P.*) usecs', msg) + if(m): + data.valid = True + data.end = ktime + f, r, t = m.group('f', 'r', 't') + if(f in devtemp): + start, pid = devtemp[f] + data.newAction(phase, f, pid, start, ktime, int(r), int(t)) + del devtemp[f] + continue + if(re.match('^Freeing unused kernel .*', msg)): + data.tUserMode = ktime + data.dmesg['kernel']['end'] = ktime + data.dmesg['user']['start'] = ktime + phase = 'user' + + if tp.stamp: + sysvals.stamp = 0 + tp.parseStamp(data, sysvals) + data.dmesg['user']['end'] = data.end + lf.close() + return data + +# Function: parseTraceLog +# Description: +# Check if trace is available and copy to a temp file +def parseTraceLog(data): + sysvals.vprint('Analyzing the ftrace data (%s)...' % \ + os.path.basename(sysvals.ftracefile)) + # if available, calculate cgfilter allowable ranges + cgfilter = [] + if len(sysvals.cgfilter) > 0: + for p in data.phases: + list = data.dmesg[p]['list'] + for i in sysvals.cgfilter: + if i in list: + cgfilter.append([list[i]['start']-0.0001, + list[i]['end']+0.0001]) + # parse the trace log + ftemp = dict() + tp = aslib.TestProps() + tp.setTracerType('function_graph') + tf = open(sysvals.ftracefile, 'r') + for line in tf: + if line[0] == '#': + continue + m = re.match(tp.ftrace_line_fmt, line.strip()) + if(not m): + continue + m_time, m_proc, m_pid, m_msg, m_dur = \ + m.group('time', 'proc', 'pid', 'msg', 'dur') + t = float(m_time) + if len(cgfilter) > 0: + allow = False + for r in cgfilter: + if t >= r[0] and t < r[1]: + allow = True + break + if not allow: + continue + if t > data.end: + break + if(m_time and m_pid and m_msg): + t = aslib.FTraceLine(m_time, m_msg, m_dur) + pid = int(m_pid) + else: + continue + if t.fevent or t.fkprobe: + continue + key = (m_proc, pid) + if(key not in ftemp): + ftemp[key] = [] + ftemp[key].append(aslib.FTraceCallGraph(pid, sysvals)) + cg = ftemp[key][-1] + res = cg.addLine(t) + if(res != 0): + ftemp[key].append(aslib.FTraceCallGraph(pid, sysvals)) + if(res == -1): + ftemp[key][-1].addLine(t) + + tf.close() + + # add the callgraph data to the device hierarchy + for key in ftemp: + proc, pid = key + for cg in ftemp[key]: + if len(cg.list) < 1 or cg.invalid or (cg.end - cg.start == 0): + continue + if(not cg.postProcess()): + pprint('Sanity check failed for %s-%d' % (proc, pid)) + continue + # match cg data to devices + devname = data.deviceMatch(pid, cg) + if not devname: + kind = 'Orphan' + if cg.partial: + kind = 'Partial' + sysvals.vprint('%s callgraph found for %s %s-%d [%f - %f]' %\ + (kind, cg.name, proc, pid, cg.start, cg.end)) + elif len(cg.list) > 1000000: + pprint('WARNING: the callgraph found for %s is massive! (%d lines)' %\ + (devname, len(cg.list))) + +# Function: retrieveLogs +# Description: +# Create copies of dmesg and/or ftrace for later processing +def retrieveLogs(): + # check ftrace is configured first + if sysvals.useftrace: + tracer = sysvals.fgetVal('current_tracer').strip() + if tracer != 'function_graph': + doError('ftrace not configured for a boot callgraph') + # create the folder and get dmesg + sysvals.systemInfo(aslib.dmidecode(sysvals.mempath)) + sysvals.initTestOutput('boot') + sysvals.writeDatafileHeader(sysvals.dmesgfile) + call('dmesg >> '+sysvals.dmesgfile, shell=True) + if not sysvals.useftrace: + return + # get ftrace + sysvals.writeDatafileHeader(sysvals.ftracefile) + call('cat '+sysvals.tpath+'trace >> '+sysvals.ftracefile, shell=True) + +# Function: colorForName +# Description: +# Generate a repeatable color from a list for a given name +def colorForName(name): + list = [ + ('c1', '#ec9999'), + ('c2', '#ffc1a6'), + ('c3', '#fff0a6'), + ('c4', '#adf199'), + ('c5', '#9fadea'), + ('c6', '#a699c1'), + ('c7', '#ad99b4'), + ('c8', '#eaffea'), + ('c9', '#dcecfb'), + ('c10', '#ffffea') + ] + i = 0 + total = 0 + count = len(list) + while i < len(name): + total += ord(name[i]) + i += 1 + return list[total % count] + +def cgOverview(cg, minlen): + stats = dict() + large = [] + for l in cg.list: + if l.fcall and l.depth == 1: + if l.length >= minlen: + large.append(l) + if l.name not in stats: + stats[l.name] = [0, 0.0] + stats[l.name][0] += (l.length * 1000.0) + stats[l.name][1] += 1 + return (large, stats) + +# Function: createBootGraph +# Description: +# Create the output html file from the resident test data +# Arguments: +# testruns: array of Data objects from parseKernelLog or parseTraceLog +# Output: +# True if the html file was created, false if it failed +def createBootGraph(data): + # html function templates + html_srccall = '

{0}
\n' + html_timetotal = '\n'\ + ''\ + ''\ + '\n
Init process starts @ {0} msLast initcall ends @ {1} ms
\n' + + # device timeline + devtl = aslib.Timeline(100, 20) + + # write the test title and general info header + devtl.createHeader(sysvals, sysvals.stamp) + + # Generate the header for this timeline + t0 = data.start + tMax = data.end + tTotal = tMax - t0 + if(tTotal == 0): + pprint('ERROR: No timeline data') + return False + user_mode = '%.0f'%(data.tUserMode*1000) + last_init = '%.0f'%(tTotal*1000) + devtl.html += html_timetotal.format(user_mode, last_init) + + # determine the maximum number of rows we need to draw + devlist = [] + for p in data.phases: + list = data.dmesg[p]['list'] + for devname in list: + d = aslib.DevItem(0, p, list[devname]) + devlist.append(d) + devtl.getPhaseRows(devlist, 0, 'start') + devtl.calcTotalRows() + + # draw the timeline background + devtl.createZoomBox() + devtl.html += devtl.html_tblock.format('boot', '0', '100', devtl.scaleH) + for p in data.phases: + phase = data.dmesg[p] + length = phase['end']-phase['start'] + left = '%.3f' % (((phase['start']-t0)*100.0)/tTotal) + width = '%.3f' % ((length*100.0)/tTotal) + devtl.html += devtl.html_phase.format(left, width, \ + '%.3f'%devtl.scaleH, '%.3f'%devtl.bodyH, \ + phase['color'], '') + + # draw the device timeline + num = 0 + devstats = dict() + for phase in data.phases: + list = data.dmesg[phase]['list'] + for devname in sorted(list): + cls, color = colorForName(devname) + dev = list[devname] + info = '@|%.3f|%.3f|%.3f|%d' % (dev['start']*1000.0, dev['end']*1000.0, + dev['ulen']/1000.0, dev['ret']) + devstats[dev['id']] = {'info':info} + dev['color'] = color + height = devtl.phaseRowHeight(0, phase, dev['row']) + top = '%.6f' % ((dev['row']*height) + devtl.scaleH) + left = '%.6f' % (((dev['start']-t0)*100)/tTotal) + width = '%.6f' % (((dev['end']-dev['start'])*100)/tTotal) + length = ' (%0.3f ms) ' % ((dev['end']-dev['start'])*1000) + devtl.html += devtl.html_device.format(dev['id'], + devname+length+phase+'_mode', left, top, '%.3f'%height, + width, devname, ' '+cls, '') + rowtop = devtl.phaseRowTop(0, phase, dev['row']) + height = '%.6f' % (devtl.rowH / 2) + top = '%.6f' % (rowtop + devtl.scaleH + (devtl.rowH / 2)) + if data.do_one_initcall: + if('ftrace' not in dev): + continue + cg = dev['ftrace'] + large, stats = cgOverview(cg, 0.001) + devstats[dev['id']]['fstat'] = stats + for l in large: + left = '%f' % (((l.time-t0)*100)/tTotal) + width = '%f' % (l.length*100/tTotal) + title = '%s (%0.3fms)' % (l.name, l.length * 1000.0) + devtl.html += html_srccall.format(l.name, left, + top, height, width, title, 'x%d'%num) + num += 1 + continue + if('ftraces' not in dev): + continue + for cg in dev['ftraces']: + left = '%f' % (((cg.start-t0)*100)/tTotal) + width = '%f' % ((cg.end-cg.start)*100/tTotal) + cglen = (cg.end - cg.start) * 1000.0 + title = '%s (%0.3fms)' % (cg.name, cglen) + cg.id = 'x%d' % num + devtl.html += html_srccall.format(cg.name, left, + top, height, width, title, dev['id']+cg.id) + num += 1 + + # draw the time scale, try to make the number of labels readable + devtl.createTimeScale(t0, tMax, tTotal, 'boot') + devtl.html += '\n' + + # timeline is finished + devtl.html += '\n\n' + + # draw a legend which describes the phases by color + devtl.html += '
\n' + pdelta = 20.0 + pmargin = 36.0 + for phase in data.phases: + order = '%.2f' % ((data.dmesg[phase]['order'] * pdelta) + pmargin) + devtl.html += devtl.html_legend.format(order, \ + data.dmesg[phase]['color'], phase+'_mode', phase[0]) + devtl.html += '
\n' + + hf = open(sysvals.htmlfile, 'w') + + # add the css + extra = '\ + .c1 {background:rgba(209,0,0,0.4);}\n\ + .c2 {background:rgba(255,102,34,0.4);}\n\ + .c3 {background:rgba(255,218,33,0.4);}\n\ + .c4 {background:rgba(51,221,0,0.4);}\n\ + .c5 {background:rgba(17,51,204,0.4);}\n\ + .c6 {background:rgba(34,0,102,0.4);}\n\ + .c7 {background:rgba(51,0,68,0.4);}\n\ + .c8 {background:rgba(204,255,204,0.4);}\n\ + .c9 {background:rgba(169,208,245,0.4);}\n\ + .c10 {background:rgba(255,255,204,0.4);}\n\ + .vt {transform:rotate(-60deg);transform-origin:0 0;}\n\ + table.fstat {table-layout:fixed;padding:150px 15px 0 0;font-size:10px;column-width:30px;}\n\ + .fstat th {width:55px;}\n\ + .fstat td {text-align:left;width:35px;}\n\ + .srccall {position:absolute;font-size:10px;z-index:7;overflow:hidden;color:black;text-align:center;white-space:nowrap;border-radius:5px;border:1px solid black;background:linear-gradient(to bottom right,#CCC,#969696);}\n\ + .srccall:hover {color:white;font-weight:bold;border:1px solid white;}\n' + aslib.addCSS(hf, sysvals, 1, False, extra) + + # write the device timeline + hf.write(devtl.html) + + # add boot specific html + statinfo = 'var devstats = {\n' + for n in sorted(devstats): + statinfo += '\t"%s": [\n\t\t"%s",\n' % (n, devstats[n]['info']) + if 'fstat' in devstats[n]: + funcs = devstats[n]['fstat'] + for f in sorted(funcs, key=lambda k:(funcs[k], k), reverse=True): + if funcs[f][0] < 0.01 and len(funcs) > 10: + break + statinfo += '\t\t"%f|%s|%d",\n' % (funcs[f][0], f, funcs[f][1]) + statinfo += '\t],\n' + statinfo += '};\n' + html = \ + '
\n'\ + '\n'\ + '\n' + hf.write(html) + + # add the callgraph html + if(sysvals.usecallgraph): + aslib.addCallgraphs(sysvals, hf, data) + + # add the test log as a hidden div + if sysvals.testlog and sysvals.logmsg: + hf.write('\n') + # add the dmesg log as a hidden div + if sysvals.dmesglog: + hf.write('\n') + + # write the footer and close + aslib.addScriptCode(hf, [data]) + hf.write('\n\n') + hf.close() + return True + +# Function: updateCron +# Description: +# (restore=False) Set the tool to run automatically on reboot +# (restore=True) Restore the original crontab +def updateCron(restore=False): + if not restore: + sysvals.rootUser(True) + crondir = '/var/spool/cron/crontabs/' + if not os.path.exists(crondir): + crondir = '/var/spool/cron/' + if not os.path.exists(crondir): + doError('%s not found' % crondir) + cronfile = crondir+'root' + backfile = crondir+'root-analyze_boot-backup' + cmd = sysvals.getExec('crontab') + if not cmd: + doError('crontab not found') + # on restore: move the backup cron back into place + if restore: + if os.path.exists(backfile): + shutil.move(backfile, cronfile) + call([cmd, cronfile]) + return + # backup current cron and install new one with reboot + if os.path.exists(cronfile): + shutil.move(cronfile, backfile) + else: + fp = open(backfile, 'w') + fp.close() + res = -1 + try: + fp = open(backfile, 'r') + op = open(cronfile, 'w') + for line in fp: + if not sysvals.myCronJob(line): + op.write(line) + continue + fp.close() + op.write('@reboot python %s\n' % sysvals.cronjobCmdString()) + op.close() + res = call([cmd, cronfile]) + except Exception as e: + pprint('Exception: %s' % str(e)) + shutil.move(backfile, cronfile) + res = -1 + if res != 0: + doError('crontab failed') + +# Function: updateGrub +# Description: +# update grub.cfg for all kernels with our parameters +def updateGrub(restore=False): + # call update-grub on restore + if restore: + try: + call(sysvals.blexec, stderr=PIPE, stdout=PIPE, + env={'PATH': '.:/sbin:/usr/sbin:/usr/bin:/sbin:/bin'}) + except Exception as e: + pprint('Exception: %s\n' % str(e)) + return + # extract the option and create a grub config without it + sysvals.rootUser(True) + tgtopt = 'GRUB_CMDLINE_LINUX_DEFAULT' + cmdline = '' + grubfile = '/etc/default/grub' + tempfile = '/etc/default/grub.analyze_boot' + shutil.move(grubfile, tempfile) + res = -1 + try: + fp = open(tempfile, 'r') + op = open(grubfile, 'w') + cont = False + for line in fp: + line = line.strip() + if len(line) == 0 or line[0] == '#': + continue + opt = line.split('=')[0].strip() + if opt == tgtopt: + cmdline = line.split('=', 1)[1].strip('\\') + if line[-1] == '\\': + cont = True + elif cont: + cmdline += line.strip('\\') + if line[-1] != '\\': + cont = False + else: + op.write('%s\n' % line) + fp.close() + # if the target option value is in quotes, strip them + sp = '"' + val = cmdline.strip() + if val and (val[0] == '\'' or val[0] == '"'): + sp = val[0] + val = val.strip(sp) + cmdline = val + # append our cmd line options + if len(cmdline) > 0: + cmdline += ' ' + cmdline += sysvals.kernelParams() + # write out the updated target option + op.write('\n%s=%s%s%s\n' % (tgtopt, sp, cmdline, sp)) + op.close() + res = call(sysvals.blexec) + os.remove(grubfile) + except Exception as e: + pprint('Exception: %s' % str(e)) + res = -1 + # cleanup + shutil.move(tempfile, grubfile) + if res != 0: + doError('update grub failed') + +# Function: updateKernelParams +# Description: +# update boot conf for all kernels with our parameters +def updateKernelParams(restore=False): + # find the boot loader + sysvals.getBootLoader() + if sysvals.bootloader == 'grub': + updateGrub(restore) + +# Function: doError Description: +# generic error function for catastrphic failures +# Arguments: +# msg: the error message to print +# help: True if printHelp should be called after, False otherwise +def doError(msg, help=False): + if help == True: + printHelp() + pprint('ERROR: %s\n' % msg) + sysvals.outputResult({'error':msg}) + sys.exit() + +# Function: printHelp +# Description: +# print out the help text +def printHelp(): + pprint('\n%s v%s\n'\ + 'Usage: bootgraph \n'\ + '\n'\ + 'Description:\n'\ + ' This tool reads in a dmesg log of linux kernel boot and\n'\ + ' creates an html representation of the boot timeline up to\n'\ + ' the start of the init process.\n'\ + '\n'\ + ' If no specific command is given the tool reads the current dmesg\n'\ + ' and/or ftrace log and creates a timeline\n'\ + '\n'\ + ' Generates output files in subdirectory: boot-yymmdd-HHMMSS\n'\ + ' HTML output: _boot.html\n'\ + ' raw dmesg output: _boot_dmesg.txt\n'\ + ' raw ftrace output: _boot_ftrace.txt\n'\ + '\n'\ + 'Options:\n'\ + ' -h Print this help text\n'\ + ' -v Print the current tool version\n'\ + ' -verbose Print extra information during execution and analysis\n'\ + ' -addlogs Add the dmesg log to the html output\n'\ + ' -result fn Export a results table to a text file for parsing.\n'\ + ' -o name Overrides the output subdirectory name when running a new test\n'\ + ' default: boot-{date}-{time}\n'\ + ' [advanced]\n'\ + ' -fstat Use ftrace to add function detail and statistics (default: disabled)\n'\ + ' -f/-callgraph Add callgraph detail, can be very large (default: disabled)\n'\ + ' -maxdepth N limit the callgraph data to N call levels (default: 2)\n'\ + ' -mincg ms Discard all callgraphs shorter than ms milliseconds (e.g. 0.001 for us)\n'\ + ' -timeprec N Number of significant digits in timestamps (0:S, 3:ms, [6:us])\n'\ + ' -expandcg pre-expand the callgraph data in the html output (default: disabled)\n'\ + ' -func list Limit ftrace to comma-delimited list of functions (default: do_one_initcall)\n'\ + ' -cgfilter S Filter the callgraph output in the timeline\n'\ + ' -cgskip file Callgraph functions to skip, off to disable (default: cgskip.txt)\n'\ + ' -bl name Use the following boot loader for kernel params (default: grub)\n'\ + ' -reboot Reboot the machine automatically and generate a new timeline\n'\ + ' -manual Show the steps to generate a new timeline manually (used with -reboot)\n'\ + '\n'\ + 'Other commands:\n'\ + ' -flistall Print all functions capable of being captured in ftrace\n'\ + ' -sysinfo Print out system info extracted from BIOS\n'\ + ' -which exec Print an executable path, should function even without PATH\n'\ + ' [redo]\n'\ + ' -dmesg file Create HTML output using dmesg input (used with -ftrace)\n'\ + ' -ftrace file Create HTML output using ftrace input (used with -dmesg)\n'\ + '' % (sysvals.title, sysvals.version)) + return True + +# ----------------- MAIN -------------------- +# exec start (skipped if script is loaded as library) +if __name__ == '__main__': + # loop through the command line arguments + cmd = '' + testrun = True + switchoff = ['disable', 'off', 'false', '0'] + simplecmds = ['-sysinfo', '-kpupdate', '-flistall', '-checkbl'] + cgskip = '' + if '-f' in sys.argv: + cgskip = sysvals.configFile('cgskip.txt') + args = iter(sys.argv[1:]) + mdset = False + for arg in args: + if(arg == '-h'): + printHelp() + sys.exit() + elif(arg == '-v'): + pprint("Version %s" % sysvals.version) + sys.exit() + elif(arg == '-verbose'): + sysvals.verbose = True + elif(arg in simplecmds): + cmd = arg[1:] + elif(arg == '-fstat'): + sysvals.useftrace = True + elif(arg == '-callgraph' or arg == '-f'): + sysvals.useftrace = True + sysvals.usecallgraph = True + elif(arg == '-cgdump'): + sysvals.cgdump = True + elif(arg == '-mincg'): + sysvals.mincglen = aslib.getArgFloat('-mincg', args, 0.0, 10000.0) + elif(arg == '-cgfilter'): + try: + val = next(args) + except: + doError('No callgraph functions supplied', True) + sysvals.setCallgraphFilter(val) + elif(arg == '-cgskip'): + try: + val = next(args) + except: + doError('No file supplied', True) + if val.lower() in switchoff: + cgskip = '' + else: + cgskip = sysvals.configFile(val) + if(not cgskip): + doError('%s does not exist' % cgskip) + elif(arg == '-bl'): + try: + val = next(args) + except: + doError('No boot loader name supplied', True) + if val.lower() not in ['grub']: + doError('Unknown boot loader: %s' % val, True) + sysvals.bootloader = val.lower() + elif(arg == '-timeprec'): + sysvals.setPrecision(aslib.getArgInt('-timeprec', args, 0, 6)) + elif(arg == '-maxdepth'): + mdset = True + sysvals.max_graph_depth = aslib.getArgInt('-maxdepth', args, 0, 1000) + elif(arg == '-func'): + try: + val = next(args) + except: + doError('No filter functions supplied', True) + sysvals.useftrace = True + sysvals.usecallgraph = True + sysvals.rootCheck(True) + sysvals.setGraphFilter(val) + elif(arg == '-ftrace'): + try: + val = next(args) + except: + doError('No ftrace file supplied', True) + if(os.path.exists(val) == False): + doError('%s does not exist' % val) + testrun = False + sysvals.ftracefile = val + elif(arg == '-addlogs'): + sysvals.dmesglog = True + elif(arg == '-expandcg'): + sysvals.cgexp = True + elif(arg == '-dmesg'): + try: + val = next(args) + except: + doError('No dmesg file supplied', True) + if(os.path.exists(val) == False): + doError('%s does not exist' % val) + testrun = False + sysvals.dmesgfile = val + elif(arg == '-o'): + try: + val = next(args) + except: + doError('No subdirectory name supplied', True) + sysvals.testdir = sysvals.setOutputFolder(val) + elif(arg == '-result'): + try: + val = next(args) + except: + doError('No result file supplied', True) + sysvals.result = val + elif(arg == '-reboot'): + sysvals.reboot = True + elif(arg == '-manual'): + sysvals.reboot = True + sysvals.manual = True + # remaining options are only for cron job use + elif(arg == '-cronjob'): + sysvals.iscronjob = True + elif(arg == '-which'): + try: + val = next(args) + except: + doError('No executable supplied', True) + out = sysvals.getExec(val) + if not out: + print('%s not found' % val) + sys.exit(1) + print(out) + sys.exit(0) + else: + doError('Invalid argument: '+arg, True) + + # compatibility errors and access checks + if(sysvals.iscronjob and (sysvals.reboot or \ + sysvals.dmesgfile or sysvals.ftracefile or cmd)): + doError('-cronjob is meant for batch purposes only') + if(sysvals.reboot and (sysvals.dmesgfile or sysvals.ftracefile)): + doError('-reboot and -dmesg/-ftrace are incompatible') + if cmd or sysvals.reboot or sysvals.iscronjob or testrun: + sysvals.rootCheck(True) + if (testrun and sysvals.useftrace) or cmd == 'flistall': + if not sysvals.verifyFtrace(): + doError('Ftrace is not properly enabled') + + # run utility commands + sysvals.cpuInfo() + if cmd != '': + if cmd == 'kpupdate': + updateKernelParams() + elif cmd == 'flistall': + for f in sysvals.getBootFtraceFilterFunctions(): + print(f) + elif cmd == 'checkbl': + sysvals.getBootLoader() + pprint('Boot Loader: %s\n%s' % (sysvals.bootloader, sysvals.blexec)) + elif(cmd == 'sysinfo'): + sysvals.printSystemInfo(True) + sys.exit() + + # reboot: update grub, setup a cronjob, and reboot + if sysvals.reboot: + if (sysvals.useftrace or sysvals.usecallgraph) and \ + not sysvals.checkFtraceKernelVersion(): + doError('Ftrace functionality requires kernel v4.10 or newer') + if not sysvals.manual: + updateKernelParams() + updateCron() + call('reboot') + else: + sysvals.manualRebootRequired() + sys.exit() + + if sysvals.usecallgraph and cgskip: + sysvals.vprint('Using cgskip file: %s' % cgskip) + sysvals.setCallgraphBlacklist(cgskip) + + # cronjob: remove the cronjob, grub changes, and disable ftrace + if sysvals.iscronjob: + updateCron(True) + updateKernelParams(True) + try: + sysvals.fsetVal('0', 'tracing_on') + except: + pass + + # testrun: generate copies of the logs + if testrun: + retrieveLogs() + else: + sysvals.setOutputFile() + + # process the log data + if sysvals.dmesgfile: + if not mdset: + sysvals.max_graph_depth = 0 + data = parseKernelLog() + if(not data.valid): + doError('No initcall data found in %s' % sysvals.dmesgfile) + if sysvals.useftrace and sysvals.ftracefile: + parseTraceLog(data) + if sysvals.cgdump: + data.debugPrint() + sys.exit() + else: + doError('dmesg file required') + + sysvals.vprint('Creating the html timeline (%s)...' % sysvals.htmlfile) + sysvals.vprint('Command:\n %s' % sysvals.cmdline) + sysvals.vprint('Kernel parameters:\n %s' % sysvals.kparams) + data.printDetails() + createBootGraph(data) + + # if running as root, change output dir owner to sudo_user + if testrun and os.path.isdir(sysvals.testdir) and \ + os.getuid() == 0 and 'SUDO_USER' in os.environ: + cmd = 'chown -R {0}:{0} {1} > /dev/null 2>&1' + call(cmd.format(os.environ['SUDO_USER'], sysvals.testdir), shell=True) + + sysvals.stamp['boot'] = (data.tUserMode - data.start) * 1000 + sysvals.stamp['lastinit'] = data.end * 1000 + sysvals.outputResult(sysvals.stamp) diff --git a/data/linux-small/tools/power/pm-graph/sleepgraph.py b/data/linux-small/tools/power/pm-graph/sleepgraph.py new file mode 100755 index 0000000..ffd5095 --- /dev/null +++ b/data/linux-small/tools/power/pm-graph/sleepgraph.py @@ -0,0 +1,6969 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0-only +# +# Tool for analyzing suspend/resume timing +# Copyright (c) 2013, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# Authors: +# Todd Brandt +# +# Links: +# Home Page +# https://01.org/pm-graph +# Source repo +# git@github.com:intel/pm-graph +# +# Description: +# This tool is designed to assist kernel and OS developers in optimizing +# their linux stack's suspend/resume time. Using a kernel image built +# with a few extra options enabled, the tool will execute a suspend and +# will capture dmesg and ftrace data until resume is complete. This data +# is transformed into a device timeline and a callgraph to give a quick +# and detailed view of which devices and callbacks are taking the most +# time in suspend/resume. The output is a single html file which can be +# viewed in firefox or chrome. +# +# The following kernel build options are required: +# CONFIG_DEVMEM=y +# CONFIG_PM_DEBUG=y +# CONFIG_PM_SLEEP_DEBUG=y +# CONFIG_FTRACE=y +# CONFIG_FUNCTION_TRACER=y +# CONFIG_FUNCTION_GRAPH_TRACER=y +# CONFIG_KPROBES=y +# CONFIG_KPROBES_ON_FTRACE=y +# +# For kernel versions older than 3.15: +# The following additional kernel parameters are required: +# (e.g. in file /etc/default/grub) +# GRUB_CMDLINE_LINUX_DEFAULT="... initcall_debug log_buf_len=16M ..." +# + +# ----------------- LIBRARIES -------------------- + +import sys +import time +import os +import string +import re +import platform +import signal +import codecs +from datetime import datetime, timedelta +import struct +import configparser +import gzip +from threading import Thread +from subprocess import call, Popen, PIPE +import base64 + +def pprint(msg): + print(msg) + sys.stdout.flush() + +def ascii(text): + return text.decode('ascii', 'ignore') + +# ----------------- CLASSES -------------------- + +# Class: SystemValues +# Description: +# A global, single-instance container used to +# store system values and test parameters +class SystemValues: + title = 'SleepGraph' + version = '5.8' + ansi = False + rs = 0 + display = '' + gzip = False + sync = False + wifi = False + verbose = False + testlog = True + dmesglog = True + ftracelog = False + acpidebug = True + tstat = True + mindevlen = 0.0001 + mincglen = 0.0 + cgphase = '' + cgtest = -1 + cgskip = '' + maxfail = 0 + multitest = {'run': False, 'count': 1000000, 'delay': 0} + max_graph_depth = 0 + callloopmaxgap = 0.0001 + callloopmaxlen = 0.005 + bufsize = 0 + cpucount = 0 + memtotal = 204800 + memfree = 204800 + srgap = 0 + cgexp = False + testdir = '' + outdir = '' + tpath = '/sys/kernel/debug/tracing/' + fpdtpath = '/sys/firmware/acpi/tables/FPDT' + epath = '/sys/kernel/debug/tracing/events/power/' + pmdpath = '/sys/power/pm_debug_messages' + acpipath='/sys/module/acpi/parameters/debug_level' + traceevents = [ + 'suspend_resume', + 'wakeup_source_activate', + 'wakeup_source_deactivate', + 'device_pm_callback_end', + 'device_pm_callback_start' + ] + logmsg = '' + testcommand = '' + mempath = '/dev/mem' + powerfile = '/sys/power/state' + mempowerfile = '/sys/power/mem_sleep' + diskpowerfile = '/sys/power/disk' + suspendmode = 'mem' + memmode = '' + diskmode = '' + hostname = 'localhost' + prefix = 'test' + teststamp = '' + sysstamp = '' + dmesgstart = 0.0 + dmesgfile = '' + ftracefile = '' + htmlfile = 'output.html' + result = '' + rtcwake = True + rtcwaketime = 15 + rtcpath = '' + devicefilter = [] + cgfilter = [] + stamp = 0 + execcount = 1 + x2delay = 0 + skiphtml = False + usecallgraph = False + ftopfunc = 'pm_suspend' + ftop = False + usetraceevents = False + usetracemarkers = True + usekprobes = True + usedevsrc = False + useprocmon = False + notestrun = False + cgdump = False + devdump = False + mixedphaseheight = True + devprops = dict() + cfgdef = dict() + platinfo = [] + predelay = 0 + postdelay = 0 + tmstart = 'SUSPEND START %Y%m%d-%H:%M:%S.%f' + tmend = 'RESUME COMPLETE %Y%m%d-%H:%M:%S.%f' + tracefuncs = { + 'sys_sync': {}, + 'ksys_sync': {}, + '__pm_notifier_call_chain': {}, + 'pm_prepare_console': {}, + 'pm_notifier_call_chain': {}, + 'freeze_processes': {}, + 'freeze_kernel_threads': {}, + 'pm_restrict_gfp_mask': {}, + 'acpi_suspend_begin': {}, + 'acpi_hibernation_begin': {}, + 'acpi_hibernation_enter': {}, + 'acpi_hibernation_leave': {}, + 'acpi_pm_freeze': {}, + 'acpi_pm_thaw': {}, + 'acpi_s2idle_end': {}, + 'acpi_s2idle_sync': {}, + 'acpi_s2idle_begin': {}, + 'acpi_s2idle_prepare': {}, + 'acpi_s2idle_prepare_late': {}, + 'acpi_s2idle_wake': {}, + 'acpi_s2idle_wakeup': {}, + 'acpi_s2idle_restore': {}, + 'acpi_s2idle_restore_early': {}, + 'hibernate_preallocate_memory': {}, + 'create_basic_memory_bitmaps': {}, + 'swsusp_write': {}, + 'suspend_console': {}, + 'acpi_pm_prepare': {}, + 'syscore_suspend': {}, + 'arch_enable_nonboot_cpus_end': {}, + 'syscore_resume': {}, + 'acpi_pm_finish': {}, + 'resume_console': {}, + 'acpi_pm_end': {}, + 'pm_restore_gfp_mask': {}, + 'thaw_processes': {}, + 'pm_restore_console': {}, + 'CPU_OFF': { + 'func':'_cpu_down', + 'args_x86_64': {'cpu':'%di:s32'}, + 'format': 'CPU_OFF[{cpu}]' + }, + 'CPU_ON': { + 'func':'_cpu_up', + 'args_x86_64': {'cpu':'%di:s32'}, + 'format': 'CPU_ON[{cpu}]' + }, + } + dev_tracefuncs = { + # general wait/delay/sleep + 'msleep': { 'args_x86_64': {'time':'%di:s32'}, 'ub': 1 }, + 'schedule_timeout': { 'args_x86_64': {'timeout':'%di:s32'}, 'ub': 1 }, + 'udelay': { 'func':'__const_udelay', 'args_x86_64': {'loops':'%di:s32'}, 'ub': 1 }, + 'usleep_range': { 'args_x86_64': {'min':'%di:s32', 'max':'%si:s32'}, 'ub': 1 }, + 'mutex_lock_slowpath': { 'func':'__mutex_lock_slowpath', 'ub': 1 }, + 'acpi_os_stall': {'ub': 1}, + 'rt_mutex_slowlock': {'ub': 1}, + # ACPI + 'acpi_resume_power_resources': {}, + 'acpi_ps_execute_method': { 'args_x86_64': { + 'fullpath':'+0(+40(%di)):string', + }}, + # mei_me + 'mei_reset': {}, + # filesystem + 'ext4_sync_fs': {}, + # 80211 + 'ath10k_bmi_read_memory': { 'args_x86_64': {'length':'%cx:s32'} }, + 'ath10k_bmi_write_memory': { 'args_x86_64': {'length':'%cx:s32'} }, + 'ath10k_bmi_fast_download': { 'args_x86_64': {'length':'%cx:s32'} }, + 'iwlagn_mac_start': {}, + 'iwlagn_alloc_bcast_station': {}, + 'iwl_trans_pcie_start_hw': {}, + 'iwl_trans_pcie_start_fw': {}, + 'iwl_run_init_ucode': {}, + 'iwl_load_ucode_wait_alive': {}, + 'iwl_alive_start': {}, + 'iwlagn_mac_stop': {}, + 'iwlagn_mac_suspend': {}, + 'iwlagn_mac_resume': {}, + 'iwlagn_mac_add_interface': {}, + 'iwlagn_mac_remove_interface': {}, + 'iwlagn_mac_change_interface': {}, + 'iwlagn_mac_config': {}, + 'iwlagn_configure_filter': {}, + 'iwlagn_mac_hw_scan': {}, + 'iwlagn_bss_info_changed': {}, + 'iwlagn_mac_channel_switch': {}, + 'iwlagn_mac_flush': {}, + # ATA + 'ata_eh_recover': { 'args_x86_64': {'port':'+36(%di):s32'} }, + # i915 + 'i915_gem_resume': {}, + 'i915_restore_state': {}, + 'intel_opregion_setup': {}, + 'g4x_pre_enable_dp': {}, + 'vlv_pre_enable_dp': {}, + 'chv_pre_enable_dp': {}, + 'g4x_enable_dp': {}, + 'vlv_enable_dp': {}, + 'intel_hpd_init': {}, + 'intel_opregion_register': {}, + 'intel_dp_detect': {}, + 'intel_hdmi_detect': {}, + 'intel_opregion_init': {}, + 'intel_fbdev_set_suspend': {}, + } + infocmds = [ + [0, 'kparams', 'cat', '/proc/cmdline'], + [0, 'mcelog', 'mcelog'], + [0, 'pcidevices', 'lspci', '-tv'], + [0, 'usbdevices', 'lsusb', '-t'], + [1, 'interrupts', 'cat', '/proc/interrupts'], + [1, 'wakeups', 'cat', '/sys/kernel/debug/wakeup_sources'], + [2, 'gpecounts', 'sh', '-c', 'grep -v invalid /sys/firmware/acpi/interrupts/*'], + [2, 'suspendstats', 'sh', '-c', 'grep -v invalid /sys/power/suspend_stats/*'], + [2, 'cpuidle', 'sh', '-c', 'grep -v invalid /sys/devices/system/cpu/cpu*/cpuidle/state*/s2idle/*'], + [2, 'battery', 'sh', '-c', 'grep -v invalid /sys/class/power_supply/*/*'], + ] + cgblacklist = [] + kprobes = dict() + timeformat = '%.3f' + cmdline = '%s %s' % \ + (os.path.basename(sys.argv[0]), ' '.join(sys.argv[1:])) + sudouser = '' + def __init__(self): + self.archargs = 'args_'+platform.machine() + self.hostname = platform.node() + if(self.hostname == ''): + self.hostname = 'localhost' + rtc = "rtc0" + if os.path.exists('/dev/rtc'): + rtc = os.readlink('/dev/rtc') + rtc = '/sys/class/rtc/'+rtc + if os.path.exists(rtc) and os.path.exists(rtc+'/date') and \ + os.path.exists(rtc+'/time') and os.path.exists(rtc+'/wakealarm'): + self.rtcpath = rtc + if (hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()): + self.ansi = True + self.testdir = datetime.now().strftime('suspend-%y%m%d-%H%M%S') + if os.getuid() == 0 and 'SUDO_USER' in os.environ and \ + os.environ['SUDO_USER']: + self.sudouser = os.environ['SUDO_USER'] + def resetlog(self): + self.logmsg = '' + self.platinfo = [] + def vprint(self, msg): + self.logmsg += msg+'\n' + if self.verbose or msg.startswith('WARNING:'): + pprint(msg) + def signalHandler(self, signum, frame): + if not self.result: + return + signame = self.signames[signum] if signum in self.signames else 'UNKNOWN' + msg = 'Signal %s caused a tool exit, line %d' % (signame, frame.f_lineno) + self.outputResult({'error':msg}) + sys.exit(3) + def signalHandlerInit(self): + capture = ['BUS', 'SYS', 'XCPU', 'XFSZ', 'PWR', 'HUP', 'INT', 'QUIT', + 'ILL', 'ABRT', 'FPE', 'SEGV', 'TERM'] + self.signames = dict() + for i in capture: + s = 'SIG'+i + try: + signum = getattr(signal, s) + signal.signal(signum, self.signalHandler) + except: + continue + self.signames[signum] = s + def rootCheck(self, fatal=True): + if(os.access(self.powerfile, os.W_OK)): + return True + if fatal: + msg = 'This command requires sysfs mount and root access' + pprint('ERROR: %s\n' % msg) + self.outputResult({'error':msg}) + sys.exit(1) + return False + def rootUser(self, fatal=False): + if 'USER' in os.environ and os.environ['USER'] == 'root': + return True + if fatal: + msg = 'This command must be run as root' + pprint('ERROR: %s\n' % msg) + self.outputResult({'error':msg}) + sys.exit(1) + return False + def usable(self, file): + return (os.path.exists(file) and os.path.getsize(file) > 0) + def getExec(self, cmd): + try: + fp = Popen(['which', cmd], stdout=PIPE, stderr=PIPE).stdout + out = ascii(fp.read()).strip() + fp.close() + except: + out = '' + if out: + return out + for path in ['/sbin', '/bin', '/usr/sbin', '/usr/bin', + '/usr/local/sbin', '/usr/local/bin']: + cmdfull = os.path.join(path, cmd) + if os.path.exists(cmdfull): + return cmdfull + return out + def setPrecision(self, num): + if num < 0 or num > 6: + return + self.timeformat = '%.{0}f'.format(num) + def setOutputFolder(self, value): + args = dict() + n = datetime.now() + args['date'] = n.strftime('%y%m%d') + args['time'] = n.strftime('%H%M%S') + args['hostname'] = args['host'] = self.hostname + args['mode'] = self.suspendmode + return value.format(**args) + def setOutputFile(self): + if self.dmesgfile != '': + m = re.match('(?P.*)_dmesg\.txt.*', self.dmesgfile) + if(m): + self.htmlfile = m.group('name')+'.html' + if self.ftracefile != '': + m = re.match('(?P.*)_ftrace\.txt.*', self.ftracefile) + if(m): + self.htmlfile = m.group('name')+'.html' + def systemInfo(self, info): + p = m = '' + if 'baseboard-manufacturer' in info: + m = info['baseboard-manufacturer'] + elif 'system-manufacturer' in info: + m = info['system-manufacturer'] + if 'system-product-name' in info: + p = info['system-product-name'] + elif 'baseboard-product-name' in info: + p = info['baseboard-product-name'] + if m[:5].lower() == 'intel' and 'baseboard-product-name' in info: + p = info['baseboard-product-name'] + c = info['processor-version'] if 'processor-version' in info else '' + b = info['bios-version'] if 'bios-version' in info else '' + r = info['bios-release-date'] if 'bios-release-date' in info else '' + self.sysstamp = '# sysinfo | man:%s | plat:%s | cpu:%s | bios:%s | biosdate:%s | numcpu:%d | memsz:%d | memfr:%d' % \ + (m, p, c, b, r, self.cpucount, self.memtotal, self.memfree) + def printSystemInfo(self, fatal=False): + self.rootCheck(True) + out = dmidecode(self.mempath, fatal) + if len(out) < 1: + return + fmt = '%-24s: %s' + for name in sorted(out): + print(fmt % (name, out[name])) + print(fmt % ('cpucount', ('%d' % self.cpucount))) + print(fmt % ('memtotal', ('%d kB' % self.memtotal))) + print(fmt % ('memfree', ('%d kB' % self.memfree))) + def cpuInfo(self): + self.cpucount = 0 + fp = open('/proc/cpuinfo', 'r') + for line in fp: + if re.match('^processor[ \t]*:[ \t]*[0-9]*', line): + self.cpucount += 1 + fp.close() + fp = open('/proc/meminfo', 'r') + for line in fp: + m = re.match('^MemTotal:[ \t]*(?P[0-9]*) *kB', line) + if m: + self.memtotal = int(m.group('sz')) + m = re.match('^MemFree:[ \t]*(?P[0-9]*) *kB', line) + if m: + self.memfree = int(m.group('sz')) + fp.close() + def initTestOutput(self, name): + self.prefix = self.hostname + v = open('/proc/version', 'r').read().strip() + kver = v.split()[2] + fmt = name+'-%m%d%y-%H%M%S' + testtime = datetime.now().strftime(fmt) + self.teststamp = \ + '# '+testtime+' '+self.prefix+' '+self.suspendmode+' '+kver + ext = '' + if self.gzip: + ext = '.gz' + self.dmesgfile = \ + self.testdir+'/'+self.prefix+'_'+self.suspendmode+'_dmesg.txt'+ext + self.ftracefile = \ + self.testdir+'/'+self.prefix+'_'+self.suspendmode+'_ftrace.txt'+ext + self.htmlfile = \ + self.testdir+'/'+self.prefix+'_'+self.suspendmode+'.html' + if not os.path.isdir(self.testdir): + os.makedirs(self.testdir) + self.sudoUserchown(self.testdir) + def getValueList(self, value): + out = [] + for i in value.split(','): + if i.strip(): + out.append(i.strip()) + return out + def setDeviceFilter(self, value): + self.devicefilter = self.getValueList(value) + def setCallgraphFilter(self, value): + self.cgfilter = self.getValueList(value) + def skipKprobes(self, value): + for k in self.getValueList(value): + if k in self.tracefuncs: + del self.tracefuncs[k] + if k in self.dev_tracefuncs: + del self.dev_tracefuncs[k] + def setCallgraphBlacklist(self, file): + self.cgblacklist = self.listFromFile(file) + def rtcWakeAlarmOn(self): + call('echo 0 > '+self.rtcpath+'/wakealarm', shell=True) + nowtime = open(self.rtcpath+'/since_epoch', 'r').read().strip() + if nowtime: + nowtime = int(nowtime) + else: + # if hardware time fails, use the software time + nowtime = int(datetime.now().strftime('%s')) + alarm = nowtime + self.rtcwaketime + call('echo %d > %s/wakealarm' % (alarm, self.rtcpath), shell=True) + def rtcWakeAlarmOff(self): + call('echo 0 > %s/wakealarm' % self.rtcpath, shell=True) + def initdmesg(self): + # get the latest time stamp from the dmesg log + lines = Popen('dmesg', stdout=PIPE).stdout.readlines() + ktime = '0' + for line in reversed(lines): + line = ascii(line).replace('\r\n', '') + idx = line.find('[') + if idx > 1: + line = line[idx:] + m = re.match('[ \t]*(\[ *)(?P[0-9\.]*)(\]) (?P.*)', line) + if(m): + ktime = m.group('ktime') + break + self.dmesgstart = float(ktime) + def getdmesg(self, testdata): + op = self.writeDatafileHeader(self.dmesgfile, testdata) + # store all new dmesg lines since initdmesg was called + fp = Popen('dmesg', stdout=PIPE).stdout + for line in fp: + line = ascii(line).replace('\r\n', '') + idx = line.find('[') + if idx > 1: + line = line[idx:] + m = re.match('[ \t]*(\[ *)(?P[0-9\.]*)(\]) (?P.*)', line) + if(not m): + continue + ktime = float(m.group('ktime')) + if ktime > self.dmesgstart: + op.write(line) + fp.close() + op.close() + def listFromFile(self, file): + list = [] + fp = open(file) + for i in fp.read().split('\n'): + i = i.strip() + if i and i[0] != '#': + list.append(i) + fp.close() + return list + def addFtraceFilterFunctions(self, file): + for i in self.listFromFile(file): + if len(i) < 2: + continue + self.tracefuncs[i] = dict() + def getFtraceFilterFunctions(self, current): + self.rootCheck(True) + if not current: + call('cat '+self.tpath+'available_filter_functions', shell=True) + return + master = self.listFromFile(self.tpath+'available_filter_functions') + for i in sorted(self.tracefuncs): + if 'func' in self.tracefuncs[i]: + i = self.tracefuncs[i]['func'] + if i in master: + print(i) + else: + print(self.colorText(i)) + def setFtraceFilterFunctions(self, list): + master = self.listFromFile(self.tpath+'available_filter_functions') + flist = '' + for i in list: + if i not in master: + continue + if ' [' in i: + flist += i.split(' ')[0]+'\n' + else: + flist += i+'\n' + fp = open(self.tpath+'set_graph_function', 'w') + fp.write(flist) + fp.close() + def basicKprobe(self, name): + self.kprobes[name] = {'name': name,'func': name,'args': dict(),'format': name} + def defaultKprobe(self, name, kdata): + k = kdata + for field in ['name', 'format', 'func']: + if field not in k: + k[field] = name + if self.archargs in k: + k['args'] = k[self.archargs] + else: + k['args'] = dict() + k['format'] = name + self.kprobes[name] = k + def kprobeColor(self, name): + if name not in self.kprobes or 'color' not in self.kprobes[name]: + return '' + return self.kprobes[name]['color'] + def kprobeDisplayName(self, name, dataraw): + if name not in self.kprobes: + self.basicKprobe(name) + data = '' + quote=0 + # first remvoe any spaces inside quotes, and the quotes + for c in dataraw: + if c == '"': + quote = (quote + 1) % 2 + if quote and c == ' ': + data += '_' + elif c != '"': + data += c + fmt, args = self.kprobes[name]['format'], self.kprobes[name]['args'] + arglist = dict() + # now process the args + for arg in sorted(args): + arglist[arg] = '' + m = re.match('.* '+arg+'=(?P.*) ', data); + if m: + arglist[arg] = m.group('arg') + else: + m = re.match('.* '+arg+'=(?P.*)', data); + if m: + arglist[arg] = m.group('arg') + out = fmt.format(**arglist) + out = out.replace(' ', '_').replace('"', '') + return out + def kprobeText(self, kname, kprobe): + name = fmt = func = kname + args = dict() + if 'name' in kprobe: + name = kprobe['name'] + if 'format' in kprobe: + fmt = kprobe['format'] + if 'func' in kprobe: + func = kprobe['func'] + if self.archargs in kprobe: + args = kprobe[self.archargs] + if 'args' in kprobe: + args = kprobe['args'] + if re.findall('{(?P[a-z,A-Z,0-9]*)}', func): + doError('Kprobe "%s" has format info in the function name "%s"' % (name, func)) + for arg in re.findall('{(?P[a-z,A-Z,0-9]*)}', fmt): + if arg not in args: + doError('Kprobe "%s" is missing argument "%s"' % (name, arg)) + val = 'p:%s_cal %s' % (name, func) + for i in sorted(args): + val += ' %s=%s' % (i, args[i]) + val += '\nr:%s_ret %s $retval\n' % (name, func) + return val + def addKprobes(self, output=False): + if len(self.kprobes) < 1: + return + if output: + pprint(' kprobe functions in this kernel:') + # first test each kprobe + rejects = [] + # sort kprobes: trace, ub-dev, custom, dev + kpl = [[], [], [], []] + linesout = len(self.kprobes) + for name in sorted(self.kprobes): + res = self.colorText('YES', 32) + if not self.testKprobe(name, self.kprobes[name]): + res = self.colorText('NO') + rejects.append(name) + else: + if name in self.tracefuncs: + kpl[0].append(name) + elif name in self.dev_tracefuncs: + if 'ub' in self.dev_tracefuncs[name]: + kpl[1].append(name) + else: + kpl[3].append(name) + else: + kpl[2].append(name) + if output: + pprint(' %s: %s' % (name, res)) + kplist = kpl[0] + kpl[1] + kpl[2] + kpl[3] + # remove all failed ones from the list + for name in rejects: + self.kprobes.pop(name) + # set the kprobes all at once + self.fsetVal('', 'kprobe_events') + kprobeevents = '' + for kp in kplist: + kprobeevents += self.kprobeText(kp, self.kprobes[kp]) + self.fsetVal(kprobeevents, 'kprobe_events') + if output: + check = self.fgetVal('kprobe_events') + linesack = (len(check.split('\n')) - 1) // 2 + pprint(' kprobe functions enabled: %d/%d' % (linesack, linesout)) + self.fsetVal('1', 'events/kprobes/enable') + def testKprobe(self, kname, kprobe): + self.fsetVal('0', 'events/kprobes/enable') + kprobeevents = self.kprobeText(kname, kprobe) + if not kprobeevents: + return False + try: + self.fsetVal(kprobeevents, 'kprobe_events') + check = self.fgetVal('kprobe_events') + except: + return False + linesout = len(kprobeevents.split('\n')) + linesack = len(check.split('\n')) + if linesack < linesout: + return False + return True + def setVal(self, val, file): + if not os.path.exists(file): + return False + try: + fp = open(file, 'wb', 0) + fp.write(val.encode()) + fp.flush() + fp.close() + except: + return False + return True + def fsetVal(self, val, path): + return self.setVal(val, self.tpath+path) + def getVal(self, file): + res = '' + if not os.path.exists(file): + return res + try: + fp = open(file, 'r') + res = fp.read() + fp.close() + except: + pass + return res + def fgetVal(self, path): + return self.getVal(self.tpath+path) + def cleanupFtrace(self): + if(self.usecallgraph or self.usetraceevents or self.usedevsrc): + self.fsetVal('0', 'events/kprobes/enable') + self.fsetVal('', 'kprobe_events') + self.fsetVal('1024', 'buffer_size_kb') + def setupAllKprobes(self): + for name in self.tracefuncs: + self.defaultKprobe(name, self.tracefuncs[name]) + for name in self.dev_tracefuncs: + self.defaultKprobe(name, self.dev_tracefuncs[name]) + def isCallgraphFunc(self, name): + if len(self.tracefuncs) < 1 and self.suspendmode == 'command': + return True + for i in self.tracefuncs: + if 'func' in self.tracefuncs[i]: + f = self.tracefuncs[i]['func'] + else: + f = i + if name == f: + return True + return False + def initFtrace(self, quiet=False): + if not quiet: + sysvals.printSystemInfo(False) + pprint('INITIALIZING FTRACE...') + # turn trace off + self.fsetVal('0', 'tracing_on') + self.cleanupFtrace() + self.testVal(self.pmdpath, 'basic', '1') + # set the trace clock to global + self.fsetVal('global', 'trace_clock') + self.fsetVal('nop', 'current_tracer') + # set trace buffer to an appropriate value + cpus = max(1, self.cpucount) + if self.bufsize > 0: + tgtsize = self.bufsize + elif self.usecallgraph or self.usedevsrc: + bmax = (1*1024*1024) if self.suspendmode in ['disk', 'command'] \ + else (3*1024*1024) + tgtsize = min(self.memfree, bmax) + else: + tgtsize = 65536 + while not self.fsetVal('%d' % (tgtsize // cpus), 'buffer_size_kb'): + # if the size failed to set, lower it and keep trying + tgtsize -= 65536 + if tgtsize < 65536: + tgtsize = int(self.fgetVal('buffer_size_kb')) * cpus + break + self.vprint('Setting trace buffers to %d kB (%d kB per cpu)' % (tgtsize, tgtsize/cpus)) + # initialize the callgraph trace + if(self.usecallgraph): + # set trace type + self.fsetVal('function_graph', 'current_tracer') + self.fsetVal('', 'set_ftrace_filter') + # set trace format options + self.fsetVal('print-parent', 'trace_options') + self.fsetVal('funcgraph-abstime', 'trace_options') + self.fsetVal('funcgraph-cpu', 'trace_options') + self.fsetVal('funcgraph-duration', 'trace_options') + self.fsetVal('funcgraph-proc', 'trace_options') + self.fsetVal('funcgraph-tail', 'trace_options') + self.fsetVal('nofuncgraph-overhead', 'trace_options') + self.fsetVal('context-info', 'trace_options') + self.fsetVal('graph-time', 'trace_options') + self.fsetVal('%d' % self.max_graph_depth, 'max_graph_depth') + cf = ['dpm_run_callback'] + if(self.usetraceevents): + cf += ['dpm_prepare', 'dpm_complete'] + for fn in self.tracefuncs: + if 'func' in self.tracefuncs[fn]: + cf.append(self.tracefuncs[fn]['func']) + else: + cf.append(fn) + if self.ftop: + self.setFtraceFilterFunctions([self.ftopfunc]) + else: + self.setFtraceFilterFunctions(cf) + # initialize the kprobe trace + elif self.usekprobes: + for name in self.tracefuncs: + self.defaultKprobe(name, self.tracefuncs[name]) + if self.usedevsrc: + for name in self.dev_tracefuncs: + self.defaultKprobe(name, self.dev_tracefuncs[name]) + if not quiet: + pprint('INITIALIZING KPROBES...') + self.addKprobes(self.verbose) + if(self.usetraceevents): + # turn trace events on + events = iter(self.traceevents) + for e in events: + self.fsetVal('1', 'events/power/'+e+'/enable') + # clear the trace buffer + self.fsetVal('', 'trace') + def verifyFtrace(self): + # files needed for any trace data + files = ['buffer_size_kb', 'current_tracer', 'trace', 'trace_clock', + 'trace_marker', 'trace_options', 'tracing_on'] + # files needed for callgraph trace data + tp = self.tpath + if(self.usecallgraph): + files += [ + 'available_filter_functions', + 'set_ftrace_filter', + 'set_graph_function' + ] + for f in files: + if(os.path.exists(tp+f) == False): + return False + return True + def verifyKprobes(self): + # files needed for kprobes to work + files = ['kprobe_events', 'events'] + tp = self.tpath + for f in files: + if(os.path.exists(tp+f) == False): + return False + return True + def colorText(self, str, color=31): + if not self.ansi: + return str + return '\x1B[%d;40m%s\x1B[m' % (color, str) + def writeDatafileHeader(self, filename, testdata): + fp = self.openlog(filename, 'w') + fp.write('%s\n%s\n# command | %s\n' % (self.teststamp, self.sysstamp, self.cmdline)) + for test in testdata: + if 'fw' in test: + fw = test['fw'] + if(fw): + fp.write('# fwsuspend %u fwresume %u\n' % (fw[0], fw[1])) + if 'turbo' in test: + fp.write('# turbostat %s\n' % test['turbo']) + if 'wifi' in test: + fp.write('# wifi %s\n' % test['wifi']) + if test['error'] or len(testdata) > 1: + fp.write('# enter_sleep_error %s\n' % test['error']) + return fp + def sudoUserchown(self, dir): + if os.path.exists(dir) and self.sudouser: + cmd = 'chown -R {0}:{0} {1} > /dev/null 2>&1' + call(cmd.format(self.sudouser, dir), shell=True) + def outputResult(self, testdata, num=0): + if not self.result: + return + n = '' + if num > 0: + n = '%d' % num + fp = open(self.result, 'a') + if 'error' in testdata: + fp.write('result%s: fail\n' % n) + fp.write('error%s: %s\n' % (n, testdata['error'])) + else: + fp.write('result%s: pass\n' % n) + for v in ['suspend', 'resume', 'boot', 'lastinit']: + if v in testdata: + fp.write('%s%s: %.3f\n' % (v, n, testdata[v])) + for v in ['fwsuspend', 'fwresume']: + if v in testdata: + fp.write('%s%s: %.3f\n' % (v, n, testdata[v] / 1000000.0)) + if 'bugurl' in testdata: + fp.write('url%s: %s\n' % (n, testdata['bugurl'])) + fp.close() + self.sudoUserchown(self.result) + def configFile(self, file): + dir = os.path.dirname(os.path.realpath(__file__)) + if os.path.exists(file): + return file + elif os.path.exists(dir+'/'+file): + return dir+'/'+file + elif os.path.exists(dir+'/config/'+file): + return dir+'/config/'+file + return '' + def openlog(self, filename, mode): + isgz = self.gzip + if mode == 'r': + try: + with gzip.open(filename, mode+'t') as fp: + test = fp.read(64) + isgz = True + except: + isgz = False + if isgz: + return gzip.open(filename, mode+'t') + return open(filename, mode) + def putlog(self, filename, text): + with self.openlog(filename, 'a') as fp: + fp.write(text) + fp.close() + def dlog(self, text): + self.putlog(self.dmesgfile, '# %s\n' % text) + def flog(self, text): + self.putlog(self.ftracefile, text) + def b64unzip(self, data): + try: + out = codecs.decode(base64.b64decode(data), 'zlib').decode() + except: + out = data + return out + def b64zip(self, data): + out = base64.b64encode(codecs.encode(data.encode(), 'zlib')).decode() + return out + def platforminfo(self, cmdafter): + # add platform info on to a completed ftrace file + if not os.path.exists(self.ftracefile): + return False + footer = '#\n' + + # add test command string line if need be + if self.suspendmode == 'command' and self.testcommand: + footer += '# platform-testcmd: %s\n' % (self.testcommand) + + # get a list of target devices from the ftrace file + props = dict() + tp = TestProps() + tf = self.openlog(self.ftracefile, 'r') + for line in tf: + if tp.stampInfo(line, self): + continue + # parse only valid lines, if this is not one move on + m = re.match(tp.ftrace_line_fmt, line) + if(not m or 'device_pm_callback_start' not in line): + continue + m = re.match('.*: (?P.*) (?P.*), parent: *(?P

.*), .*', m.group('msg')); + if(not m): + continue + dev = m.group('d') + if dev not in props: + props[dev] = DevProps() + tf.close() + + # now get the syspath for each target device + for dirname, dirnames, filenames in os.walk('/sys/devices'): + if(re.match('.*/power', dirname) and 'async' in filenames): + dev = dirname.split('/')[-2] + if dev in props and (not props[dev].syspath or len(dirname) < len(props[dev].syspath)): + props[dev].syspath = dirname[:-6] + + # now fill in the properties for our target devices + for dev in sorted(props): + dirname = props[dev].syspath + if not dirname or not os.path.exists(dirname): + continue + with open(dirname+'/power/async') as fp: + text = fp.read() + props[dev].isasync = False + if 'enabled' in text: + props[dev].isasync = True + fields = os.listdir(dirname) + if 'product' in fields: + with open(dirname+'/product', 'rb') as fp: + props[dev].altname = ascii(fp.read()) + elif 'name' in fields: + with open(dirname+'/name', 'rb') as fp: + props[dev].altname = ascii(fp.read()) + elif 'model' in fields: + with open(dirname+'/model', 'rb') as fp: + props[dev].altname = ascii(fp.read()) + elif 'description' in fields: + with open(dirname+'/description', 'rb') as fp: + props[dev].altname = ascii(fp.read()) + elif 'id' in fields: + with open(dirname+'/id', 'rb') as fp: + props[dev].altname = ascii(fp.read()) + elif 'idVendor' in fields and 'idProduct' in fields: + idv, idp = '', '' + with open(dirname+'/idVendor', 'rb') as fp: + idv = ascii(fp.read()).strip() + with open(dirname+'/idProduct', 'rb') as fp: + idp = ascii(fp.read()).strip() + props[dev].altname = '%s:%s' % (idv, idp) + if props[dev].altname: + out = props[dev].altname.strip().replace('\n', ' ')\ + .replace(',', ' ').replace(';', ' ') + props[dev].altname = out + + # add a devinfo line to the bottom of ftrace + out = '' + for dev in sorted(props): + out += props[dev].out(dev) + footer += '# platform-devinfo: %s\n' % self.b64zip(out) + + # add a line for each of these commands with their outputs + for name, cmdline, info in cmdafter: + footer += '# platform-%s: %s | %s\n' % (name, cmdline, self.b64zip(info)) + self.flog(footer) + return True + def commonPrefix(self, list): + if len(list) < 2: + return '' + prefix = list[0] + for s in list[1:]: + while s[:len(prefix)] != prefix and prefix: + prefix = prefix[:len(prefix)-1] + if not prefix: + break + if '/' in prefix and prefix[-1] != '/': + prefix = prefix[0:prefix.rfind('/')+1] + return prefix + def dictify(self, text, format): + out = dict() + header = True if format == 1 else False + delim = ' ' if format == 1 else ':' + for line in text.split('\n'): + if header: + header, out['@'] = False, line + continue + line = line.strip() + if delim in line: + data = line.split(delim, 1) + num = re.search(r'[\d]+', data[1]) + if format == 2 and num: + out[data[0].strip()] = num.group() + else: + out[data[0].strip()] = data[1] + return out + def cmdinfo(self, begin, debug=False): + out = [] + if begin: + self.cmd1 = dict() + for cargs in self.infocmds: + delta, name = cargs[0], cargs[1] + cmdline, cmdpath = ' '.join(cargs[2:]), self.getExec(cargs[2]) + if not cmdpath or (begin and not delta): + continue + self.dlog('[%s]' % cmdline) + try: + fp = Popen([cmdpath]+cargs[3:], stdout=PIPE, stderr=PIPE).stdout + info = ascii(fp.read()).strip() + fp.close() + except: + continue + if not debug and begin: + self.cmd1[name] = self.dictify(info, delta) + elif not debug and delta and name in self.cmd1: + before, after = self.cmd1[name], self.dictify(info, delta) + dinfo = ('\t%s\n' % before['@']) if '@' in before else '' + prefix = self.commonPrefix(list(before.keys())) + for key in sorted(before): + if key in after and before[key] != after[key]: + title = key.replace(prefix, '') + if delta == 2: + dinfo += '\t%s : %s -> %s\n' % \ + (title, before[key].strip(), after[key].strip()) + else: + dinfo += '%10s (start) : %s\n%10s (after) : %s\n' % \ + (title, before[key], title, after[key]) + dinfo = '\tnothing changed' if not dinfo else dinfo.rstrip() + out.append((name, cmdline, dinfo)) + else: + out.append((name, cmdline, '\tnothing' if not info else info)) + return out + def testVal(self, file, fmt='basic', value=''): + if file == 'restoreall': + for f in self.cfgdef: + if os.path.exists(f): + fp = open(f, 'w') + fp.write(self.cfgdef[f]) + fp.close() + self.cfgdef = dict() + elif value and os.path.exists(file): + fp = open(file, 'r+') + if fmt == 'radio': + m = re.match('.*\[(?P.*)\].*', fp.read()) + if m: + self.cfgdef[file] = m.group('v') + elif fmt == 'acpi': + line = fp.read().strip().split('\n')[-1] + m = re.match('.* (?P[0-9A-Fx]*) .*', line) + if m: + self.cfgdef[file] = m.group('v') + else: + self.cfgdef[file] = fp.read().strip() + fp.write(value) + fp.close() + def haveTurbostat(self): + if not self.tstat: + return False + cmd = self.getExec('turbostat') + if not cmd: + return False + fp = Popen([cmd, '-v'], stdout=PIPE, stderr=PIPE).stderr + out = ascii(fp.read()).strip() + fp.close() + if re.match('turbostat version .*', out): + self.vprint(out) + return True + return False + def turbostat(self): + cmd = self.getExec('turbostat') + rawout = keyline = valline = '' + fullcmd = '%s -q -S echo freeze > %s' % (cmd, self.powerfile) + fp = Popen(['sh', '-c', fullcmd], stdout=PIPE, stderr=PIPE).stderr + for line in fp: + line = ascii(line) + rawout += line + if keyline and valline: + continue + if re.match('(?i)Avg_MHz.*', line): + keyline = line.strip().split() + elif keyline: + valline = line.strip().split() + fp.close() + if not keyline or not valline or len(keyline) != len(valline): + errmsg = 'unrecognized turbostat output:\n'+rawout.strip() + self.vprint(errmsg) + if not self.verbose: + pprint(errmsg) + return '' + if self.verbose: + pprint(rawout.strip()) + out = [] + for key in keyline: + idx = keyline.index(key) + val = valline[idx] + out.append('%s=%s' % (key, val)) + return '|'.join(out) + def wifiDetails(self, dev): + try: + info = open('/sys/class/net/%s/device/uevent' % dev, 'r').read().strip() + except: + return dev + vals = [dev] + for prop in info.split('\n'): + if prop.startswith('DRIVER=') or prop.startswith('PCI_ID='): + vals.append(prop.split('=')[-1]) + return ':'.join(vals) + def checkWifi(self, dev=''): + try: + w = open('/proc/net/wireless', 'r').read().strip() + except: + return '' + for line in reversed(w.split('\n')): + m = re.match(' *(?P.*): (?P[0-9a-f]*) .*', w.split('\n')[-1]) + if not m or (dev and dev != m.group('dev')): + continue + return m.group('dev') + return '' + def pollWifi(self, dev, timeout=60): + start = time.time() + while (time.time() - start) < timeout: + w = self.checkWifi(dev) + if w: + return '%s reconnected %.2f' % \ + (self.wifiDetails(dev), max(0, time.time() - start)) + time.sleep(0.01) + return '%s timeout %d' % (self.wifiDetails(dev), timeout) + def errorSummary(self, errinfo, msg): + found = False + for entry in errinfo: + if re.match(entry['match'], msg): + entry['count'] += 1 + if self.hostname not in entry['urls']: + entry['urls'][self.hostname] = [self.htmlfile] + elif self.htmlfile not in entry['urls'][self.hostname]: + entry['urls'][self.hostname].append(self.htmlfile) + found = True + break + if found: + return + arr = msg.split() + for j in range(len(arr)): + if re.match('^[0-9,\-\.]*$', arr[j]): + arr[j] = '[0-9,\-\.]*' + else: + arr[j] = arr[j]\ + .replace('\\', '\\\\').replace(']', '\]').replace('[', '\[')\ + .replace('.', '\.').replace('+', '\+').replace('*', '\*')\ + .replace('(', '\(').replace(')', '\)').replace('}', '\}')\ + .replace('{', '\{') + mstr = ' *'.join(arr) + entry = { + 'line': msg, + 'match': mstr, + 'count': 1, + 'urls': {self.hostname: [self.htmlfile]} + } + errinfo.append(entry) + def multistat(self, start, idx, finish): + if 'time' in self.multitest: + id = '%d Duration=%dmin' % (idx+1, self.multitest['time']) + else: + id = '%d/%d' % (idx+1, self.multitest['count']) + t = time.time() + if 'start' not in self.multitest: + self.multitest['start'] = self.multitest['last'] = t + self.multitest['total'] = 0.0 + pprint('TEST (%s) START' % id) + return + dt = t - self.multitest['last'] + if not start: + if idx == 0 and self.multitest['delay'] > 0: + self.multitest['total'] += self.multitest['delay'] + pprint('TEST (%s) COMPLETE -- Duration %.1fs' % (id, dt)) + return + self.multitest['total'] += dt + self.multitest['last'] = t + avg = self.multitest['total'] / idx + if 'time' in self.multitest: + left = finish - datetime.now() + left -= timedelta(microseconds=left.microseconds) + else: + left = timedelta(seconds=((self.multitest['count'] - idx) * int(avg))) + pprint('TEST (%s) START - Avg Duration %.1fs, Time left %s' % \ + (id, avg, str(left))) + def multiinit(self, c, d): + sz, unit = 'count', 'm' + if c.endswith('d') or c.endswith('h') or c.endswith('m'): + sz, unit, c = 'time', c[-1], c[:-1] + self.multitest['run'] = True + self.multitest[sz] = getArgInt('multi: n d (exec count)', c, 1, 1000000, False) + self.multitest['delay'] = getArgInt('multi: n d (delay between tests)', d, 0, 3600, False) + if unit == 'd': + self.multitest[sz] *= 1440 + elif unit == 'h': + self.multitest[sz] *= 60 + def displayControl(self, cmd): + xset, ret = 'timeout 10 xset -d :0.0 {0}', 0 + if self.sudouser: + xset = 'sudo -u %s %s' % (self.sudouser, xset) + if cmd == 'init': + ret = call(xset.format('dpms 0 0 0'), shell=True) + if not ret: + ret = call(xset.format('s off'), shell=True) + elif cmd == 'reset': + ret = call(xset.format('s reset'), shell=True) + elif cmd in ['on', 'off', 'standby', 'suspend']: + b4 = self.displayControl('stat') + ret = call(xset.format('dpms force %s' % cmd), shell=True) + if not ret: + curr = self.displayControl('stat') + self.vprint('Display Switched: %s -> %s' % (b4, curr)) + if curr != cmd: + self.vprint('WARNING: Display failed to change to %s' % cmd) + if ret: + self.vprint('WARNING: Display failed to change to %s with xset' % cmd) + return ret + elif cmd == 'stat': + fp = Popen(xset.format('q').split(' '), stdout=PIPE).stdout + ret = 'unknown' + for line in fp: + m = re.match('[\s]*Monitor is (?P.*)', ascii(line)) + if(m and len(m.group('m')) >= 2): + out = m.group('m').lower() + ret = out[3:] if out[0:2] == 'in' else out + break + fp.close() + return ret + def setRuntimeSuspend(self, before=True): + if before: + # runtime suspend disable or enable + if self.rs > 0: + self.rstgt, self.rsval, self.rsdir = 'on', 'auto', 'enabled' + else: + self.rstgt, self.rsval, self.rsdir = 'auto', 'on', 'disabled' + pprint('CONFIGURING RUNTIME SUSPEND...') + self.rslist = deviceInfo(self.rstgt) + for i in self.rslist: + self.setVal(self.rsval, i) + pprint('runtime suspend %s on all devices (%d changed)' % (self.rsdir, len(self.rslist))) + pprint('waiting 5 seconds...') + time.sleep(5) + else: + # runtime suspend re-enable or re-disable + for i in self.rslist: + self.setVal(self.rstgt, i) + pprint('runtime suspend settings restored on %d devices' % len(self.rslist)) + +sysvals = SystemValues() +switchvalues = ['enable', 'disable', 'on', 'off', 'true', 'false', '1', '0'] +switchoff = ['disable', 'off', 'false', '0'] +suspendmodename = { + 'freeze': 'Freeze (S0)', + 'standby': 'Standby (S1)', + 'mem': 'Suspend (S3)', + 'disk': 'Hibernate (S4)' +} + +# Class: DevProps +# Description: +# Simple class which holds property values collected +# for all the devices used in the timeline. +class DevProps: + def __init__(self): + self.syspath = '' + self.altname = '' + self.isasync = True + self.xtraclass = '' + self.xtrainfo = '' + def out(self, dev): + return '%s,%s,%d;' % (dev, self.altname, self.isasync) + def debug(self, dev): + pprint('%s:\n\taltname = %s\n\t async = %s' % (dev, self.altname, self.isasync)) + def altName(self, dev): + if not self.altname or self.altname == dev: + return dev + return '%s [%s]' % (self.altname, dev) + def xtraClass(self): + if self.xtraclass: + return ' '+self.xtraclass + if not self.isasync: + return ' sync' + return '' + def xtraInfo(self): + if self.xtraclass: + return ' '+self.xtraclass + if self.isasync: + return ' (async)' + return ' (sync)' + +# Class: DeviceNode +# Description: +# A container used to create a device hierachy, with a single root node +# and a tree of child nodes. Used by Data.deviceTopology() +class DeviceNode: + def __init__(self, nodename, nodedepth): + self.name = nodename + self.children = [] + self.depth = nodedepth + +# Class: Data +# Description: +# The primary container for suspend/resume test data. There is one for +# each test run. The data is organized into a cronological hierarchy: +# Data.dmesg { +# phases { +# 10 sequential, non-overlapping phases of S/R +# contents: times for phase start/end, order/color data for html +# devlist { +# device callback or action list for this phase +# device { +# a single device callback or generic action +# contents: start/stop times, pid/cpu/driver info +# parents/children, html id for timeline/callgraph +# optionally includes an ftrace callgraph +# optionally includes dev/ps data +# } +# } +# } +# } +# +class Data: + phasedef = { + 'suspend_prepare': {'order': 0, 'color': '#CCFFCC'}, + 'suspend': {'order': 1, 'color': '#88FF88'}, + 'suspend_late': {'order': 2, 'color': '#00AA00'}, + 'suspend_noirq': {'order': 3, 'color': '#008888'}, + 'suspend_machine': {'order': 4, 'color': '#0000FF'}, + 'resume_machine': {'order': 5, 'color': '#FF0000'}, + 'resume_noirq': {'order': 6, 'color': '#FF9900'}, + 'resume_early': {'order': 7, 'color': '#FFCC00'}, + 'resume': {'order': 8, 'color': '#FFFF88'}, + 'resume_complete': {'order': 9, 'color': '#FFFFCC'}, + } + errlist = { + 'HWERROR' : r'.*\[ *Hardware Error *\].*', + 'FWBUG' : r'.*\[ *Firmware Bug *\].*', + 'BUG' : r'(?i).*\bBUG\b.*', + 'ERROR' : r'(?i).*\bERROR\b.*', + 'WARNING' : r'(?i).*\bWARNING\b.*', + 'FAULT' : r'(?i).*\bFAULT\b.*', + 'FAIL' : r'(?i).*\bFAILED\b.*', + 'INVALID' : r'(?i).*\bINVALID\b.*', + 'CRASH' : r'(?i).*\bCRASHED\b.*', + 'TIMEOUT' : r'(?i).*\bTIMEOUT\b.*', + 'IRQ' : r'.*\bgenirq: .*', + 'TASKFAIL': r'.*Freezing of tasks *.*', + 'ACPI' : r'.*\bACPI *(?P[A-Za-z]*) *Error[: ].*', + 'DISKFULL': r'.*\bNo space left on device.*', + 'USBERR' : r'.*usb .*device .*, error [0-9-]*', + 'ATAERR' : r' *ata[0-9\.]*: .*failed.*', + 'MEIERR' : r' *mei.*: .*failed.*', + 'TPMERR' : r'(?i) *tpm *tpm[0-9]*: .*error.*', + } + def __init__(self, num): + idchar = 'abcdefghij' + self.start = 0.0 # test start + self.end = 0.0 # test end + self.hwstart = 0 # rtc test start + self.hwend = 0 # rtc test end + self.tSuspended = 0.0 # low-level suspend start + self.tResumed = 0.0 # low-level resume start + self.tKernSus = 0.0 # kernel level suspend start + self.tKernRes = 0.0 # kernel level resume end + self.fwValid = False # is firmware data available + self.fwSuspend = 0 # time spent in firmware suspend + self.fwResume = 0 # time spent in firmware resume + self.html_device_id = 0 + self.stamp = 0 + self.outfile = '' + self.kerror = False + self.wifi = dict() + self.turbostat = 0 + self.enterfail = '' + self.currphase = '' + self.pstl = dict() # process timeline + self.testnumber = num + self.idstr = idchar[num] + self.dmesgtext = [] # dmesg text file in memory + self.dmesg = dict() # root data structure + self.errorinfo = {'suspend':[],'resume':[]} + self.tLow = [] # time spent in low-level suspends (standby/freeze) + self.devpids = [] + self.devicegroups = 0 + def sortedPhases(self): + return sorted(self.dmesg, key=lambda k:self.dmesg[k]['order']) + def initDevicegroups(self): + # called when phases are all finished being added + for phase in sorted(self.dmesg.keys()): + if '*' in phase: + p = phase.split('*') + pnew = '%s%d' % (p[0], len(p)) + self.dmesg[pnew] = self.dmesg.pop(phase) + self.devicegroups = [] + for phase in self.sortedPhases(): + self.devicegroups.append([phase]) + def nextPhase(self, phase, offset): + order = self.dmesg[phase]['order'] + offset + for p in self.dmesg: + if self.dmesg[p]['order'] == order: + return p + return '' + def lastPhase(self, depth=1): + plist = self.sortedPhases() + if len(plist) < depth: + return '' + return plist[-1*depth] + def turbostatInfo(self): + tp = TestProps() + out = {'syslpi':'N/A','pkgpc10':'N/A'} + for line in self.dmesgtext: + m = re.match(tp.tstatfmt, line) + if not m: + continue + for i in m.group('t').split('|'): + if 'SYS%LPI' in i: + out['syslpi'] = i.split('=')[-1]+'%' + elif 'pc10' in i: + out['pkgpc10'] = i.split('=')[-1]+'%' + break + return out + def extractErrorInfo(self): + lf = self.dmesgtext + if len(self.dmesgtext) < 1 and sysvals.dmesgfile: + lf = sysvals.openlog(sysvals.dmesgfile, 'r') + i = 0 + tp = TestProps() + list = [] + for line in lf: + i += 1 + if tp.stampInfo(line, sysvals): + continue + m = re.match('[ \t]*(\[ *)(?P[0-9\.]*)(\]) (?P.*)', line) + if not m: + continue + t = float(m.group('ktime')) + if t < self.start or t > self.end: + continue + dir = 'suspend' if t < self.tSuspended else 'resume' + msg = m.group('msg') + if re.match('capability: warning: .*', msg): + continue + for err in self.errlist: + if re.match(self.errlist[err], msg): + list.append((msg, err, dir, t, i, i)) + self.kerror = True + break + tp.msglist = [] + for msg, type, dir, t, idx1, idx2 in list: + tp.msglist.append(msg) + self.errorinfo[dir].append((type, t, idx1, idx2)) + if self.kerror: + sysvals.dmesglog = True + if len(self.dmesgtext) < 1 and sysvals.dmesgfile: + lf.close() + return tp + def setStart(self, time, msg=''): + self.start = time + if msg: + try: + self.hwstart = datetime.strptime(msg, sysvals.tmstart) + except: + self.hwstart = 0 + def setEnd(self, time, msg=''): + self.end = time + if msg: + try: + self.hwend = datetime.strptime(msg, sysvals.tmend) + except: + self.hwend = 0 + def isTraceEventOutsideDeviceCalls(self, pid, time): + for phase in self.sortedPhases(): + list = self.dmesg[phase]['list'] + for dev in list: + d = list[dev] + if(d['pid'] == pid and time >= d['start'] and + time < d['end']): + return False + return True + def sourcePhase(self, start): + for phase in self.sortedPhases(): + if 'machine' in phase: + continue + pend = self.dmesg[phase]['end'] + if start <= pend: + return phase + return 'resume_complete' + def sourceDevice(self, phaselist, start, end, pid, type): + tgtdev = '' + for phase in phaselist: + list = self.dmesg[phase]['list'] + for devname in list: + dev = list[devname] + # pid must match + if dev['pid'] != pid: + continue + devS = dev['start'] + devE = dev['end'] + if type == 'device': + # device target event is entirely inside the source boundary + if(start < devS or start >= devE or end <= devS or end > devE): + continue + elif type == 'thread': + # thread target event will expand the source boundary + if start < devS: + dev['start'] = start + if end > devE: + dev['end'] = end + tgtdev = dev + break + return tgtdev + def addDeviceFunctionCall(self, displayname, kprobename, proc, pid, start, end, cdata, rdata): + # try to place the call in a device + phases = self.sortedPhases() + tgtdev = self.sourceDevice(phases, start, end, pid, 'device') + # calls with device pids that occur outside device bounds are dropped + # TODO: include these somehow + if not tgtdev and pid in self.devpids: + return False + # try to place the call in a thread + if not tgtdev: + tgtdev = self.sourceDevice(phases, start, end, pid, 'thread') + # create new thread blocks, expand as new calls are found + if not tgtdev: + if proc == '<...>': + threadname = 'kthread-%d' % (pid) + else: + threadname = '%s-%d' % (proc, pid) + tgtphase = self.sourcePhase(start) + self.newAction(tgtphase, threadname, pid, '', start, end, '', ' kth', '') + return self.addDeviceFunctionCall(displayname, kprobename, proc, pid, start, end, cdata, rdata) + # this should not happen + if not tgtdev: + sysvals.vprint('[%f - %f] %s-%d %s %s %s' % \ + (start, end, proc, pid, kprobename, cdata, rdata)) + return False + # place the call data inside the src element of the tgtdev + if('src' not in tgtdev): + tgtdev['src'] = [] + dtf = sysvals.dev_tracefuncs + ubiquitous = False + if kprobename in dtf and 'ub' in dtf[kprobename]: + ubiquitous = True + title = cdata+' '+rdata + mstr = '\(.*\) *(?P.*) *\((?P.*)\+.* arg1=(?P.*)' + m = re.match(mstr, title) + if m: + c = m.group('caller') + a = m.group('args').strip() + r = m.group('ret') + if len(r) > 6: + r = '' + else: + r = 'ret=%s ' % r + if ubiquitous and c in dtf and 'ub' in dtf[c]: + return False + color = sysvals.kprobeColor(kprobename) + e = DevFunction(displayname, a, c, r, start, end, ubiquitous, proc, pid, color) + tgtdev['src'].append(e) + return True + def overflowDevices(self): + # get a list of devices that extend beyond the end of this test run + devlist = [] + for phase in self.sortedPhases(): + list = self.dmesg[phase]['list'] + for devname in list: + dev = list[devname] + if dev['end'] > self.end: + devlist.append(dev) + return devlist + def mergeOverlapDevices(self, devlist): + # merge any devices that overlap devlist + for dev in devlist: + devname = dev['name'] + for phase in self.sortedPhases(): + list = self.dmesg[phase]['list'] + if devname not in list: + continue + tdev = list[devname] + o = min(dev['end'], tdev['end']) - max(dev['start'], tdev['start']) + if o <= 0: + continue + dev['end'] = tdev['end'] + if 'src' not in dev or 'src' not in tdev: + continue + dev['src'] += tdev['src'] + del list[devname] + def usurpTouchingThread(self, name, dev): + # the caller test has priority of this thread, give it to him + for phase in self.sortedPhases(): + list = self.dmesg[phase]['list'] + if name in list: + tdev = list[name] + if tdev['start'] - dev['end'] < 0.1: + dev['end'] = tdev['end'] + if 'src' not in dev: + dev['src'] = [] + if 'src' in tdev: + dev['src'] += tdev['src'] + del list[name] + break + def stitchTouchingThreads(self, testlist): + # merge any threads between tests that touch + for phase in self.sortedPhases(): + list = self.dmesg[phase]['list'] + for devname in list: + dev = list[devname] + if 'htmlclass' not in dev or 'kth' not in dev['htmlclass']: + continue + for data in testlist: + data.usurpTouchingThread(devname, dev) + def optimizeDevSrc(self): + # merge any src call loops to reduce timeline size + for phase in self.sortedPhases(): + list = self.dmesg[phase]['list'] + for dev in list: + if 'src' not in list[dev]: + continue + src = list[dev]['src'] + p = 0 + for e in sorted(src, key=lambda event: event.time): + if not p or not e.repeat(p): + p = e + continue + # e is another iteration of p, move it into p + p.end = e.end + p.length = p.end - p.time + p.count += 1 + src.remove(e) + def trimTimeVal(self, t, t0, dT, left): + if left: + if(t > t0): + if(t - dT < t0): + return t0 + return t - dT + else: + return t + else: + if(t < t0 + dT): + if(t > t0): + return t0 + dT + return t + dT + else: + return t + def trimTime(self, t0, dT, left): + self.tSuspended = self.trimTimeVal(self.tSuspended, t0, dT, left) + self.tResumed = self.trimTimeVal(self.tResumed, t0, dT, left) + self.start = self.trimTimeVal(self.start, t0, dT, left) + self.tKernSus = self.trimTimeVal(self.tKernSus, t0, dT, left) + self.tKernRes = self.trimTimeVal(self.tKernRes, t0, dT, left) + self.end = self.trimTimeVal(self.end, t0, dT, left) + for phase in self.sortedPhases(): + p = self.dmesg[phase] + p['start'] = self.trimTimeVal(p['start'], t0, dT, left) + p['end'] = self.trimTimeVal(p['end'], t0, dT, left) + list = p['list'] + for name in list: + d = list[name] + d['start'] = self.trimTimeVal(d['start'], t0, dT, left) + d['end'] = self.trimTimeVal(d['end'], t0, dT, left) + d['length'] = d['end'] - d['start'] + if('ftrace' in d): + cg = d['ftrace'] + cg.start = self.trimTimeVal(cg.start, t0, dT, left) + cg.end = self.trimTimeVal(cg.end, t0, dT, left) + for line in cg.list: + line.time = self.trimTimeVal(line.time, t0, dT, left) + if('src' in d): + for e in d['src']: + e.time = self.trimTimeVal(e.time, t0, dT, left) + e.end = self.trimTimeVal(e.end, t0, dT, left) + e.length = e.end - e.time + for dir in ['suspend', 'resume']: + list = [] + for e in self.errorinfo[dir]: + type, tm, idx1, idx2 = e + tm = self.trimTimeVal(tm, t0, dT, left) + list.append((type, tm, idx1, idx2)) + self.errorinfo[dir] = list + def trimFreezeTime(self, tZero): + # trim out any standby or freeze clock time + lp = '' + for phase in self.sortedPhases(): + if 'resume_machine' in phase and 'suspend_machine' in lp: + tS, tR = self.dmesg[lp]['end'], self.dmesg[phase]['start'] + tL = tR - tS + if tL <= 0: + continue + left = True if tR > tZero else False + self.trimTime(tS, tL, left) + if 'waking' in self.dmesg[lp]: + tCnt = self.dmesg[lp]['waking'][0] + if self.dmesg[lp]['waking'][1] >= 0.001: + tTry = '-%.0f' % (round(self.dmesg[lp]['waking'][1] * 1000)) + else: + tTry = '-%.3f' % (self.dmesg[lp]['waking'][1] * 1000) + text = '%.0f (%s ms waking %d times)' % (tL * 1000, tTry, tCnt) + else: + text = '%.0f' % (tL * 1000) + self.tLow.append(text) + lp = phase + def getMemTime(self): + if not self.hwstart or not self.hwend: + return + stime = (self.tSuspended - self.start) * 1000000 + rtime = (self.end - self.tResumed) * 1000000 + hws = self.hwstart + timedelta(microseconds=stime) + hwr = self.hwend - timedelta(microseconds=rtime) + self.tLow.append('%.0f'%((hwr - hws).total_seconds() * 1000)) + def getTimeValues(self): + sktime = (self.tSuspended - self.tKernSus) * 1000 + rktime = (self.tKernRes - self.tResumed) * 1000 + return (sktime, rktime) + def setPhase(self, phase, ktime, isbegin, order=-1): + if(isbegin): + # phase start over current phase + if self.currphase: + if 'resume_machine' not in self.currphase: + sysvals.vprint('WARNING: phase %s failed to end' % self.currphase) + self.dmesg[self.currphase]['end'] = ktime + phases = self.dmesg.keys() + color = self.phasedef[phase]['color'] + count = len(phases) if order < 0 else order + # create unique name for every new phase + while phase in phases: + phase += '*' + self.dmesg[phase] = {'list': dict(), 'start': -1.0, 'end': -1.0, + 'row': 0, 'color': color, 'order': count} + self.dmesg[phase]['start'] = ktime + self.currphase = phase + else: + # phase end without a start + if phase not in self.currphase: + if self.currphase: + sysvals.vprint('WARNING: %s ended instead of %s, ftrace corruption?' % (phase, self.currphase)) + else: + sysvals.vprint('WARNING: %s ended without a start, ftrace corruption?' % phase) + return phase + phase = self.currphase + self.dmesg[phase]['end'] = ktime + self.currphase = '' + return phase + def sortedDevices(self, phase): + list = self.dmesg[phase]['list'] + return sorted(list, key=lambda k:list[k]['start']) + def fixupInitcalls(self, phase): + # if any calls never returned, clip them at system resume end + phaselist = self.dmesg[phase]['list'] + for devname in phaselist: + dev = phaselist[devname] + if(dev['end'] < 0): + for p in self.sortedPhases(): + if self.dmesg[p]['end'] > dev['start']: + dev['end'] = self.dmesg[p]['end'] + break + sysvals.vprint('%s (%s): callback didnt return' % (devname, phase)) + def deviceFilter(self, devicefilter): + for phase in self.sortedPhases(): + list = self.dmesg[phase]['list'] + rmlist = [] + for name in list: + keep = False + for filter in devicefilter: + if filter in name or \ + ('drv' in list[name] and filter in list[name]['drv']): + keep = True + if not keep: + rmlist.append(name) + for name in rmlist: + del list[name] + def fixupInitcallsThatDidntReturn(self): + # if any calls never returned, clip them at system resume end + for phase in self.sortedPhases(): + self.fixupInitcalls(phase) + def phaseOverlap(self, phases): + rmgroups = [] + newgroup = [] + for group in self.devicegroups: + for phase in phases: + if phase not in group: + continue + for p in group: + if p not in newgroup: + newgroup.append(p) + if group not in rmgroups: + rmgroups.append(group) + for group in rmgroups: + self.devicegroups.remove(group) + self.devicegroups.append(newgroup) + def newActionGlobal(self, name, start, end, pid=-1, color=''): + # which phase is this device callback or action in + phases = self.sortedPhases() + targetphase = 'none' + htmlclass = '' + overlap = 0.0 + myphases = [] + for phase in phases: + pstart = self.dmesg[phase]['start'] + pend = self.dmesg[phase]['end'] + # see if the action overlaps this phase + o = max(0, min(end, pend) - max(start, pstart)) + if o > 0: + myphases.append(phase) + # set the target phase to the one that overlaps most + if o > overlap: + if overlap > 0 and phase == 'post_resume': + continue + targetphase = phase + overlap = o + # if no target phase was found, pin it to the edge + if targetphase == 'none': + p0start = self.dmesg[phases[0]]['start'] + if start <= p0start: + targetphase = phases[0] + else: + targetphase = phases[-1] + if pid == -2: + htmlclass = ' bg' + elif pid == -3: + htmlclass = ' ps' + if len(myphases) > 1: + htmlclass = ' bg' + self.phaseOverlap(myphases) + if targetphase in phases: + newname = self.newAction(targetphase, name, pid, '', start, end, '', htmlclass, color) + return (targetphase, newname) + return False + def newAction(self, phase, name, pid, parent, start, end, drv, htmlclass='', color=''): + # new device callback for a specific phase + self.html_device_id += 1 + devid = '%s%d' % (self.idstr, self.html_device_id) + list = self.dmesg[phase]['list'] + length = -1.0 + if(start >= 0 and end >= 0): + length = end - start + if pid == -2 or name not in sysvals.tracefuncs.keys(): + i = 2 + origname = name + while(name in list): + name = '%s[%d]' % (origname, i) + i += 1 + list[name] = {'name': name, 'start': start, 'end': end, 'pid': pid, + 'par': parent, 'length': length, 'row': 0, 'id': devid, 'drv': drv } + if htmlclass: + list[name]['htmlclass'] = htmlclass + if color: + list[name]['color'] = color + return name + def findDevice(self, phase, name): + list = self.dmesg[phase]['list'] + mydev = '' + for devname in sorted(list): + if name == devname or re.match('^%s\[(?P[0-9]*)\]$' % name, devname): + mydev = devname + if mydev: + return list[mydev] + return False + def deviceChildren(self, devname, phase): + devlist = [] + list = self.dmesg[phase]['list'] + for child in list: + if(list[child]['par'] == devname): + devlist.append(child) + return devlist + def maxDeviceNameSize(self, phase): + size = 0 + for name in self.dmesg[phase]['list']: + if len(name) > size: + size = len(name) + return size + def printDetails(self): + sysvals.vprint('Timeline Details:') + sysvals.vprint(' test start: %f' % self.start) + sysvals.vprint('kernel suspend start: %f' % self.tKernSus) + tS = tR = False + for phase in self.sortedPhases(): + devlist = self.dmesg[phase]['list'] + dc, ps, pe = len(devlist), self.dmesg[phase]['start'], self.dmesg[phase]['end'] + if not tS and ps >= self.tSuspended: + sysvals.vprint(' machine suspended: %f' % self.tSuspended) + tS = True + if not tR and ps >= self.tResumed: + sysvals.vprint(' machine resumed: %f' % self.tResumed) + tR = True + sysvals.vprint('%20s: %f - %f (%d devices)' % (phase, ps, pe, dc)) + if sysvals.devdump: + sysvals.vprint(''.join('-' for i in range(80))) + maxname = '%d' % self.maxDeviceNameSize(phase) + fmt = '%3d) %'+maxname+'s - %f - %f' + c = 1 + for name in sorted(devlist): + s = devlist[name]['start'] + e = devlist[name]['end'] + sysvals.vprint(fmt % (c, name, s, e)) + c += 1 + sysvals.vprint(''.join('-' for i in range(80))) + sysvals.vprint(' kernel resume end: %f' % self.tKernRes) + sysvals.vprint(' test end: %f' % self.end) + def deviceChildrenAllPhases(self, devname): + devlist = [] + for phase in self.sortedPhases(): + list = self.deviceChildren(devname, phase) + for dev in sorted(list): + if dev not in devlist: + devlist.append(dev) + return devlist + def masterTopology(self, name, list, depth): + node = DeviceNode(name, depth) + for cname in list: + # avoid recursions + if name == cname: + continue + clist = self.deviceChildrenAllPhases(cname) + cnode = self.masterTopology(cname, clist, depth+1) + node.children.append(cnode) + return node + def printTopology(self, node): + html = '' + if node.name: + info = '' + drv = '' + for phase in self.sortedPhases(): + list = self.dmesg[phase]['list'] + if node.name in list: + s = list[node.name]['start'] + e = list[node.name]['end'] + if list[node.name]['drv']: + drv = ' {'+list[node.name]['drv']+'}' + info += ('

  • %s: %.3fms
  • ' % (phase, (e-s)*1000)) + html += '
  • '+node.name+drv+'' + if info: + html += '
      '+info+'
    ' + html += '
  • ' + if len(node.children) > 0: + html += '
      ' + for cnode in node.children: + html += self.printTopology(cnode) + html += '
    ' + return html + def rootDeviceList(self): + # list of devices graphed + real = [] + for phase in self.sortedPhases(): + list = self.dmesg[phase]['list'] + for dev in sorted(list): + if list[dev]['pid'] >= 0 and dev not in real: + real.append(dev) + # list of top-most root devices + rootlist = [] + for phase in self.sortedPhases(): + list = self.dmesg[phase]['list'] + for dev in sorted(list): + pdev = list[dev]['par'] + pid = list[dev]['pid'] + if(pid < 0 or re.match('[0-9]*-[0-9]*\.[0-9]*[\.0-9]*\:[\.0-9]*$', pdev)): + continue + if pdev and pdev not in real and pdev not in rootlist: + rootlist.append(pdev) + return rootlist + def deviceTopology(self): + rootlist = self.rootDeviceList() + master = self.masterTopology('', rootlist, 0) + return self.printTopology(master) + def selectTimelineDevices(self, widfmt, tTotal, mindevlen): + # only select devices that will actually show up in html + self.tdevlist = dict() + for phase in self.dmesg: + devlist = [] + list = self.dmesg[phase]['list'] + for dev in list: + length = (list[dev]['end'] - list[dev]['start']) * 1000 + width = widfmt % (((list[dev]['end']-list[dev]['start'])*100)/tTotal) + if length >= mindevlen: + devlist.append(dev) + self.tdevlist[phase] = devlist + def addHorizontalDivider(self, devname, devend): + phase = 'suspend_prepare' + self.newAction(phase, devname, -2, '', \ + self.start, devend, '', ' sec', '') + if phase not in self.tdevlist: + self.tdevlist[phase] = [] + self.tdevlist[phase].append(devname) + d = DevItem(0, phase, self.dmesg[phase]['list'][devname]) + return d + def addProcessUsageEvent(self, name, times): + # get the start and end times for this process + maxC = 0 + tlast = 0 + start = -1 + end = -1 + for t in sorted(times): + if tlast == 0: + tlast = t + continue + if name in self.pstl[t]: + if start == -1 or tlast < start: + start = tlast + if end == -1 or t > end: + end = t + tlast = t + if start == -1 or end == -1: + return 0 + # add a new action for this process and get the object + out = self.newActionGlobal(name, start, end, -3) + if not out: + return 0 + phase, devname = out + dev = self.dmesg[phase]['list'][devname] + # get the cpu exec data + tlast = 0 + clast = 0 + cpuexec = dict() + for t in sorted(times): + if tlast == 0 or t <= start or t > end: + tlast = t + continue + list = self.pstl[t] + c = 0 + if name in list: + c = list[name] + if c > maxC: + maxC = c + if c != clast: + key = (tlast, t) + cpuexec[key] = c + tlast = t + clast = c + dev['cpuexec'] = cpuexec + return maxC + def createProcessUsageEvents(self): + # get an array of process names + proclist = [] + for t in sorted(self.pstl): + pslist = self.pstl[t] + for ps in sorted(pslist): + if ps not in proclist: + proclist.append(ps) + # get a list of data points for suspend and resume + tsus = [] + tres = [] + for t in sorted(self.pstl): + if t < self.tSuspended: + tsus.append(t) + else: + tres.append(t) + # process the events for suspend and resume + if len(proclist) > 0: + sysvals.vprint('Process Execution:') + for ps in proclist: + c = self.addProcessUsageEvent(ps, tsus) + if c > 0: + sysvals.vprint('%25s (sus): %d' % (ps, c)) + c = self.addProcessUsageEvent(ps, tres) + if c > 0: + sysvals.vprint('%25s (res): %d' % (ps, c)) + def handleEndMarker(self, time, msg=''): + dm = self.dmesg + self.setEnd(time, msg) + self.initDevicegroups() + # give suspend_prepare an end if needed + if 'suspend_prepare' in dm and dm['suspend_prepare']['end'] < 0: + dm['suspend_prepare']['end'] = time + # assume resume machine ends at next phase start + if 'resume_machine' in dm and dm['resume_machine']['end'] < 0: + np = self.nextPhase('resume_machine', 1) + if np: + dm['resume_machine']['end'] = dm[np]['start'] + # if kernel resume end not found, assume its the end marker + if self.tKernRes == 0.0: + self.tKernRes = time + # if kernel suspend start not found, assume its the end marker + if self.tKernSus == 0.0: + self.tKernSus = time + # set resume complete to end at end marker + if 'resume_complete' in dm: + dm['resume_complete']['end'] = time + def debugPrint(self): + for p in self.sortedPhases(): + list = self.dmesg[p]['list'] + for devname in sorted(list): + dev = list[devname] + if 'ftrace' in dev: + dev['ftrace'].debugPrint(' [%s]' % devname) + +# Class: DevFunction +# Description: +# A container for kprobe function data we want in the dev timeline +class DevFunction: + def __init__(self, name, args, caller, ret, start, end, u, proc, pid, color): + self.row = 0 + self.count = 1 + self.name = name + self.args = args + self.caller = caller + self.ret = ret + self.time = start + self.length = end - start + self.end = end + self.ubiquitous = u + self.proc = proc + self.pid = pid + self.color = color + def title(self): + cnt = '' + if self.count > 1: + cnt = '(x%d)' % self.count + l = '%0.3fms' % (self.length * 1000) + if self.ubiquitous: + title = '%s(%s)%s <- %s, %s(%s)' % \ + (self.name, self.args, cnt, self.caller, self.ret, l) + else: + title = '%s(%s) %s%s(%s)' % (self.name, self.args, self.ret, cnt, l) + return title.replace('"', '') + def text(self): + if self.count > 1: + text = '%s(x%d)' % (self.name, self.count) + else: + text = self.name + return text + def repeat(self, tgt): + # is the tgt call just a repeat of this call (e.g. are we in a loop) + dt = self.time - tgt.end + # only combine calls if -all- attributes are identical + if tgt.caller == self.caller and \ + tgt.name == self.name and tgt.args == self.args and \ + tgt.proc == self.proc and tgt.pid == self.pid and \ + tgt.ret == self.ret and dt >= 0 and \ + dt <= sysvals.callloopmaxgap and \ + self.length < sysvals.callloopmaxlen: + return True + return False + +# Class: FTraceLine +# Description: +# A container for a single line of ftrace data. There are six basic types: +# callgraph line: +# call: " dpm_run_callback() {" +# return: " }" +# leaf: " dpm_run_callback();" +# trace event: +# tracing_mark_write: SUSPEND START or RESUME COMPLETE +# suspend_resume: phase or custom exec block data +# device_pm_callback: device callback info +class FTraceLine: + def __init__(self, t, m='', d=''): + self.length = 0.0 + self.fcall = False + self.freturn = False + self.fevent = False + self.fkprobe = False + self.depth = 0 + self.name = '' + self.type = '' + self.time = float(t) + if not m and not d: + return + # is this a trace event + if(d == 'traceevent' or re.match('^ *\/\* *(?P.*) \*\/ *$', m)): + if(d == 'traceevent'): + # nop format trace event + msg = m + else: + # function_graph format trace event + em = re.match('^ *\/\* *(?P.*) \*\/ *$', m) + msg = em.group('msg') + + emm = re.match('^(?P.*?): (?P.*)', msg) + if(emm): + self.name = emm.group('msg') + self.type = emm.group('call') + else: + self.name = msg + km = re.match('^(?P.*)_cal$', self.type) + if km: + self.fcall = True + self.fkprobe = True + self.type = km.group('n') + return + km = re.match('^(?P.*)_ret$', self.type) + if km: + self.freturn = True + self.fkprobe = True + self.type = km.group('n') + return + self.fevent = True + return + # convert the duration to seconds + if(d): + self.length = float(d)/1000000 + # the indentation determines the depth + match = re.match('^(?P *)(?P.*)$', m) + if(not match): + return + self.depth = self.getDepth(match.group('d')) + m = match.group('o') + # function return + if(m[0] == '}'): + self.freturn = True + if(len(m) > 1): + # includes comment with function name + match = re.match('^} *\/\* *(?P.*) *\*\/$', m) + if(match): + self.name = match.group('n').strip() + # function call + else: + self.fcall = True + # function call with children + if(m[-1] == '{'): + match = re.match('^(?P.*) *\(.*', m) + if(match): + self.name = match.group('n').strip() + # function call with no children (leaf) + elif(m[-1] == ';'): + self.freturn = True + match = re.match('^(?P.*) *\(.*', m) + if(match): + self.name = match.group('n').strip() + # something else (possibly a trace marker) + else: + self.name = m + def isCall(self): + return self.fcall and not self.freturn + def isReturn(self): + return self.freturn and not self.fcall + def isLeaf(self): + return self.fcall and self.freturn + def getDepth(self, str): + return len(str)/2 + def debugPrint(self, info=''): + if self.isLeaf(): + pprint(' -- %12.6f (depth=%02d): %s(); (%.3f us) %s' % (self.time, \ + self.depth, self.name, self.length*1000000, info)) + elif self.freturn: + pprint(' -- %12.6f (depth=%02d): %s} (%.3f us) %s' % (self.time, \ + self.depth, self.name, self.length*1000000, info)) + else: + pprint(' -- %12.6f (depth=%02d): %s() { (%.3f us) %s' % (self.time, \ + self.depth, self.name, self.length*1000000, info)) + def startMarker(self): + # Is this the starting line of a suspend? + if not self.fevent: + return False + if sysvals.usetracemarkers: + if(self.name.startswith('SUSPEND START')): + return True + return False + else: + if(self.type == 'suspend_resume' and + re.match('suspend_enter\[.*\] begin', self.name)): + return True + return False + def endMarker(self): + # Is this the ending line of a resume? + if not self.fevent: + return False + if sysvals.usetracemarkers: + if(self.name.startswith('RESUME COMPLETE')): + return True + return False + else: + if(self.type == 'suspend_resume' and + re.match('thaw_processes\[.*\] end', self.name)): + return True + return False + +# Class: FTraceCallGraph +# Description: +# A container for the ftrace callgraph of a single recursive function. +# This can be a dpm_run_callback, dpm_prepare, or dpm_complete callgraph +# Each instance is tied to a single device in a single phase, and is +# comprised of an ordered list of FTraceLine objects +class FTraceCallGraph: + vfname = 'missing_function_name' + def __init__(self, pid, sv): + self.id = '' + self.invalid = False + self.name = '' + self.partial = False + self.ignore = False + self.start = -1.0 + self.end = -1.0 + self.list = [] + self.depth = 0 + self.pid = pid + self.sv = sv + def addLine(self, line): + # if this is already invalid, just leave + if(self.invalid): + if(line.depth == 0 and line.freturn): + return 1 + return 0 + # invalidate on bad depth + if(self.depth < 0): + self.invalidate(line) + return 0 + # ignore data til we return to the current depth + if self.ignore: + if line.depth > self.depth: + return 0 + else: + self.list[-1].freturn = True + self.list[-1].length = line.time - self.list[-1].time + self.ignore = False + # if this is a return at self.depth, no more work is needed + if line.depth == self.depth and line.isReturn(): + if line.depth == 0: + self.end = line.time + return 1 + return 0 + # compare current depth with this lines pre-call depth + prelinedep = line.depth + if line.isReturn(): + prelinedep += 1 + last = 0 + lasttime = line.time + if len(self.list) > 0: + last = self.list[-1] + lasttime = last.time + if last.isLeaf(): + lasttime += last.length + # handle low misalignments by inserting returns + mismatch = prelinedep - self.depth + warning = self.sv.verbose and abs(mismatch) > 1 + info = [] + if mismatch < 0: + idx = 0 + # add return calls to get the depth down + while prelinedep < self.depth: + self.depth -= 1 + if idx == 0 and last and last.isCall(): + # special case, turn last call into a leaf + last.depth = self.depth + last.freturn = True + last.length = line.time - last.time + if warning: + info.append(('[make leaf]', last)) + else: + vline = FTraceLine(lasttime) + vline.depth = self.depth + vline.name = self.vfname + vline.freturn = True + self.list.append(vline) + if warning: + if idx == 0: + info.append(('', last)) + info.append(('[add return]', vline)) + idx += 1 + if warning: + info.append(('', line)) + # handle high misalignments by inserting calls + elif mismatch > 0: + idx = 0 + if warning: + info.append(('', last)) + # add calls to get the depth up + while prelinedep > self.depth: + if idx == 0 and line.isReturn(): + # special case, turn this return into a leaf + line.fcall = True + prelinedep -= 1 + if warning: + info.append(('[make leaf]', line)) + else: + vline = FTraceLine(lasttime) + vline.depth = self.depth + vline.name = self.vfname + vline.fcall = True + self.list.append(vline) + self.depth += 1 + if not last: + self.start = vline.time + if warning: + info.append(('[add call]', vline)) + idx += 1 + if warning and ('[make leaf]', line) not in info: + info.append(('', line)) + if warning: + pprint('WARNING: ftrace data missing, corrections made:') + for i in info: + t, obj = i + if obj: + obj.debugPrint(t) + # process the call and set the new depth + skipadd = False + md = self.sv.max_graph_depth + if line.isCall(): + # ignore blacklisted/overdepth funcs + if (md and self.depth >= md - 1) or (line.name in self.sv.cgblacklist): + self.ignore = True + else: + self.depth += 1 + elif line.isReturn(): + self.depth -= 1 + # remove blacklisted/overdepth/empty funcs that slipped through + if (last and last.isCall() and last.depth == line.depth) or \ + (md and last and last.depth >= md) or \ + (line.name in self.sv.cgblacklist): + while len(self.list) > 0 and self.list[-1].depth > line.depth: + self.list.pop(-1) + if len(self.list) == 0: + self.invalid = True + return 1 + self.list[-1].freturn = True + self.list[-1].length = line.time - self.list[-1].time + self.list[-1].name = line.name + skipadd = True + if len(self.list) < 1: + self.start = line.time + # check for a mismatch that returned all the way to callgraph end + res = 1 + if mismatch < 0 and self.list[-1].depth == 0 and self.list[-1].freturn: + line = self.list[-1] + skipadd = True + res = -1 + if not skipadd: + self.list.append(line) + if(line.depth == 0 and line.freturn): + if(self.start < 0): + self.start = line.time + self.end = line.time + if line.fcall: + self.end += line.length + if self.list[0].name == self.vfname: + self.invalid = True + if res == -1: + self.partial = True + return res + return 0 + def invalidate(self, line): + if(len(self.list) > 0): + first = self.list[0] + self.list = [] + self.list.append(first) + self.invalid = True + id = 'task %s' % (self.pid) + window = '(%f - %f)' % (self.start, line.time) + if(self.depth < 0): + pprint('Data misalignment for '+id+\ + ' (buffer overflow), ignoring this callback') + else: + pprint('Too much data for '+id+\ + ' '+window+', ignoring this callback') + def slice(self, dev): + minicg = FTraceCallGraph(dev['pid'], self.sv) + minicg.name = self.name + mydepth = -1 + good = False + for l in self.list: + if(l.time < dev['start'] or l.time > dev['end']): + continue + if mydepth < 0: + if l.name == 'mutex_lock' and l.freturn: + mydepth = l.depth + continue + elif l.depth == mydepth and l.name == 'mutex_unlock' and l.fcall: + good = True + break + l.depth -= mydepth + minicg.addLine(l) + if not good or len(minicg.list) < 1: + return 0 + return minicg + def repair(self, enddepth): + # bring the depth back to 0 with additional returns + fixed = False + last = self.list[-1] + for i in reversed(range(enddepth)): + t = FTraceLine(last.time) + t.depth = i + t.freturn = True + fixed = self.addLine(t) + if fixed != 0: + self.end = last.time + return True + return False + def postProcess(self): + if len(self.list) > 0: + self.name = self.list[0].name + stack = dict() + cnt = 0 + last = 0 + for l in self.list: + # ftrace bug: reported duration is not reliable + # check each leaf and clip it at max possible length + if last and last.isLeaf(): + if last.length > l.time - last.time: + last.length = l.time - last.time + if l.isCall(): + stack[l.depth] = l + cnt += 1 + elif l.isReturn(): + if(l.depth not in stack): + if self.sv.verbose: + pprint('Post Process Error: Depth missing') + l.debugPrint() + return False + # calculate call length from call/return lines + cl = stack[l.depth] + cl.length = l.time - cl.time + if cl.name == self.vfname: + cl.name = l.name + stack.pop(l.depth) + l.length = 0 + cnt -= 1 + last = l + if(cnt == 0): + # trace caught the whole call tree + return True + elif(cnt < 0): + if self.sv.verbose: + pprint('Post Process Error: Depth is less than 0') + return False + # trace ended before call tree finished + return self.repair(cnt) + def deviceMatch(self, pid, data): + found = '' + # add the callgraph data to the device hierarchy + borderphase = { + 'dpm_prepare': 'suspend_prepare', + 'dpm_complete': 'resume_complete' + } + if(self.name in borderphase): + p = borderphase[self.name] + list = data.dmesg[p]['list'] + for devname in list: + dev = list[devname] + if(pid == dev['pid'] and + self.start <= dev['start'] and + self.end >= dev['end']): + cg = self.slice(dev) + if cg: + dev['ftrace'] = cg + found = devname + return found + for p in data.sortedPhases(): + if(data.dmesg[p]['start'] <= self.start and + self.start <= data.dmesg[p]['end']): + list = data.dmesg[p]['list'] + for devname in sorted(list, key=lambda k:list[k]['start']): + dev = list[devname] + if(pid == dev['pid'] and + self.start <= dev['start'] and + self.end >= dev['end']): + dev['ftrace'] = self + found = devname + break + break + return found + def newActionFromFunction(self, data): + name = self.name + if name in ['dpm_run_callback', 'dpm_prepare', 'dpm_complete']: + return + fs = self.start + fe = self.end + if fs < data.start or fe > data.end: + return + phase = '' + for p in data.sortedPhases(): + if(data.dmesg[p]['start'] <= self.start and + self.start < data.dmesg[p]['end']): + phase = p + break + if not phase: + return + out = data.newActionGlobal(name, fs, fe, -2) + if out: + phase, myname = out + data.dmesg[phase]['list'][myname]['ftrace'] = self + def debugPrint(self, info=''): + pprint('%s pid=%d [%f - %f] %.3f us' % \ + (self.name, self.pid, self.start, self.end, + (self.end - self.start)*1000000)) + for l in self.list: + if l.isLeaf(): + pprint('%f (%02d): %s(); (%.3f us)%s' % (l.time, \ + l.depth, l.name, l.length*1000000, info)) + elif l.freturn: + pprint('%f (%02d): %s} (%.3f us)%s' % (l.time, \ + l.depth, l.name, l.length*1000000, info)) + else: + pprint('%f (%02d): %s() { (%.3f us)%s' % (l.time, \ + l.depth, l.name, l.length*1000000, info)) + pprint(' ') + +class DevItem: + def __init__(self, test, phase, dev): + self.test = test + self.phase = phase + self.dev = dev + def isa(self, cls): + if 'htmlclass' in self.dev and cls in self.dev['htmlclass']: + return True + return False + +# Class: Timeline +# Description: +# A container for a device timeline which calculates +# all the html properties to display it correctly +class Timeline: + html_tblock = '
    \n' + html_device = '
    {6}
    \n' + html_phase = '
    {5}
    \n' + html_phaselet = '
    \n' + html_legend = '
     {2}
    \n' + def __init__(self, rowheight, scaleheight): + self.html = '' + self.height = 0 # total timeline height + self.scaleH = scaleheight # timescale (top) row height + self.rowH = rowheight # device row height + self.bodyH = 0 # body height + self.rows = 0 # total timeline rows + self.rowlines = dict() + self.rowheight = dict() + def createHeader(self, sv, stamp): + if(not stamp['time']): + return + self.html += '' \ + % (sv.title, sv.version) + if sv.logmsg and sv.testlog: + self.html += '' + if sv.dmesglog: + self.html += '' + if sv.ftracelog: + self.html += '' + headline_stamp = '
    {0} {1} {2} {3}
    \n' + self.html += headline_stamp.format(stamp['host'], stamp['kernel'], + stamp['mode'], stamp['time']) + if 'man' in stamp and 'plat' in stamp and 'cpu' in stamp and \ + stamp['man'] and stamp['plat'] and stamp['cpu']: + headline_sysinfo = '
    {0} {1} with {2}
    \n' + self.html += headline_sysinfo.format(stamp['man'], stamp['plat'], stamp['cpu']) + + # Function: getDeviceRows + # Description: + # determine how may rows the device funcs will take + # Arguments: + # rawlist: the list of devices/actions for a single phase + # Output: + # The total number of rows needed to display this phase of the timeline + def getDeviceRows(self, rawlist): + # clear all rows and set them to undefined + sortdict = dict() + for item in rawlist: + item.row = -1 + sortdict[item] = item.length + sortlist = sorted(sortdict, key=sortdict.get, reverse=True) + remaining = len(sortlist) + rowdata = dict() + row = 1 + # try to pack each row with as many ranges as possible + while(remaining > 0): + if(row not in rowdata): + rowdata[row] = [] + for i in sortlist: + if(i.row >= 0): + continue + s = i.time + e = i.time + i.length + valid = True + for ritem in rowdata[row]: + rs = ritem.time + re = ritem.time + ritem.length + if(not (((s <= rs) and (e <= rs)) or + ((s >= re) and (e >= re)))): + valid = False + break + if(valid): + rowdata[row].append(i) + i.row = row + remaining -= 1 + row += 1 + return row + # Function: getPhaseRows + # Description: + # Organize the timeline entries into the smallest + # number of rows possible, with no entry overlapping + # Arguments: + # devlist: the list of devices/actions in a group of contiguous phases + # Output: + # The total number of rows needed to display this phase of the timeline + def getPhaseRows(self, devlist, row=0, sortby='length'): + # clear all rows and set them to undefined + remaining = len(devlist) + rowdata = dict() + sortdict = dict() + myphases = [] + # initialize all device rows to -1 and calculate devrows + for item in devlist: + dev = item.dev + tp = (item.test, item.phase) + if tp not in myphases: + myphases.append(tp) + dev['row'] = -1 + if sortby == 'start': + # sort by start 1st, then length 2nd + sortdict[item] = (-1*float(dev['start']), float(dev['end']) - float(dev['start'])) + else: + # sort by length 1st, then name 2nd + sortdict[item] = (float(dev['end']) - float(dev['start']), item.dev['name']) + if 'src' in dev: + dev['devrows'] = self.getDeviceRows(dev['src']) + # sort the devlist by length so that large items graph on top + sortlist = sorted(sortdict, key=sortdict.get, reverse=True) + orderedlist = [] + for item in sortlist: + if item.dev['pid'] == -2: + orderedlist.append(item) + for item in sortlist: + if item not in orderedlist: + orderedlist.append(item) + # try to pack each row with as many devices as possible + while(remaining > 0): + rowheight = 1 + if(row not in rowdata): + rowdata[row] = [] + for item in orderedlist: + dev = item.dev + if(dev['row'] < 0): + s = dev['start'] + e = dev['end'] + valid = True + for ritem in rowdata[row]: + rs = ritem.dev['start'] + re = ritem.dev['end'] + if(not (((s <= rs) and (e <= rs)) or + ((s >= re) and (e >= re)))): + valid = False + break + if(valid): + rowdata[row].append(item) + dev['row'] = row + remaining -= 1 + if 'devrows' in dev and dev['devrows'] > rowheight: + rowheight = dev['devrows'] + for t, p in myphases: + if t not in self.rowlines or t not in self.rowheight: + self.rowlines[t] = dict() + self.rowheight[t] = dict() + if p not in self.rowlines[t] or p not in self.rowheight[t]: + self.rowlines[t][p] = dict() + self.rowheight[t][p] = dict() + rh = self.rowH + # section headers should use a different row height + if len(rowdata[row]) == 1 and \ + 'htmlclass' in rowdata[row][0].dev and \ + 'sec' in rowdata[row][0].dev['htmlclass']: + rh = 15 + self.rowlines[t][p][row] = rowheight + self.rowheight[t][p][row] = rowheight * rh + row += 1 + if(row > self.rows): + self.rows = int(row) + return row + def phaseRowHeight(self, test, phase, row): + return self.rowheight[test][phase][row] + def phaseRowTop(self, test, phase, row): + top = 0 + for i in sorted(self.rowheight[test][phase]): + if i >= row: + break + top += self.rowheight[test][phase][i] + return top + def calcTotalRows(self): + # Calculate the heights and offsets for the header and rows + maxrows = 0 + standardphases = [] + for t in self.rowlines: + for p in self.rowlines[t]: + total = 0 + for i in sorted(self.rowlines[t][p]): + total += self.rowlines[t][p][i] + if total > maxrows: + maxrows = total + if total == len(self.rowlines[t][p]): + standardphases.append((t, p)) + self.height = self.scaleH + (maxrows*self.rowH) + self.bodyH = self.height - self.scaleH + # if there is 1 line per row, draw them the standard way + for t, p in standardphases: + for i in sorted(self.rowheight[t][p]): + self.rowheight[t][p][i] = float(self.bodyH)/len(self.rowlines[t][p]) + def createZoomBox(self, mode='command', testcount=1): + # Create bounding box, add buttons + html_zoombox = '
    \n' + html_timeline = '
    \n
    \n' + html_devlist1 = '' + html_devlist2 = '\n' + if mode != 'command': + if testcount > 1: + self.html += html_devlist2 + self.html += html_devlist1.format('1') + else: + self.html += html_devlist1.format('') + self.html += html_zoombox + self.html += html_timeline.format('dmesg', self.height) + # Function: createTimeScale + # Description: + # Create the timescale for a timeline block + # Arguments: + # m0: start time (mode begin) + # mMax: end time (mode end) + # tTotal: total timeline time + # mode: suspend or resume + # Output: + # The html code needed to display the time scale + def createTimeScale(self, m0, mMax, tTotal, mode): + timescale = '
    {1}
    \n' + rline = '
    {0}
    \n' + output = '
    \n' + # set scale for timeline + mTotal = mMax - m0 + tS = 0.1 + if(tTotal <= 0): + return output+'
    \n' + if(tTotal > 4): + tS = 1 + divTotal = int(mTotal/tS) + 1 + divEdge = (mTotal - tS*(divTotal-1))*100/mTotal + for i in range(divTotal): + htmlline = '' + if(mode == 'suspend'): + pos = '%0.3f' % (100 - ((float(i)*tS*100)/mTotal) - divEdge) + val = '%0.fms' % (float(i-divTotal+1)*tS*1000) + if(i == divTotal - 1): + val = mode + htmlline = timescale.format(pos, val) + else: + pos = '%0.3f' % (100 - ((float(i)*tS*100)/mTotal)) + val = '%0.fms' % (float(i)*tS*1000) + htmlline = timescale.format(pos, val) + if(i == 0): + htmlline = rline.format(mode) + output += htmlline + self.html += output+'
    \n' + +# Class: TestProps +# Description: +# A list of values describing the properties of these test runs +class TestProps: + stampfmt = '# [a-z]*-(?P[0-9]{2})(?P[0-9]{2})(?P[0-9]{2})-'+\ + '(?P[0-9]{2})(?P[0-9]{2})(?P[0-9]{2})'+\ + ' (?P.*) (?P.*) (?P.*)$' + wififmt = '^# wifi *(?P\S*) *(?P\S*) *(?P[0-9\.]+).*' + tstatfmt = '^# turbostat (?P\S*)' + testerrfmt = '^# enter_sleep_error (?P.*)' + sysinfofmt = '^# sysinfo .*' + cmdlinefmt = '^# command \| (?P.*)' + kparamsfmt = '^# kparams \| (?P.*)' + devpropfmt = '# Device Properties: .*' + pinfofmt = '# platform-(?P[a-z,A-Z,0-9]*): (?P.*)' + tracertypefmt = '# tracer: (?P.*)' + firmwarefmt = '# fwsuspend (?P[0-9]*) fwresume (?P[0-9]*)$' + procexecfmt = 'ps - (?P.*)$' + ftrace_line_fmt_fg = \ + '^ *(?P
    \n' + + # timeline is finished + devtl.html += '
    \n\n' + + # draw a legend which describes the phases by color + if sysvals.suspendmode != 'command': + phasedef = testruns[-1].phasedef + devtl.html += '
    \n' + pdelta = 100.0/len(phasedef.keys()) + pmargin = pdelta / 4.0 + for phase in sorted(phasedef, key=lambda k:phasedef[k]['order']): + id, p = '', phasedef[phase] + for word in phase.split('_'): + id += word[0] + order = '%.2f' % ((p['order'] * pdelta) + pmargin) + name = phase.replace('_', '  ') + devtl.html += devtl.html_legend.format(order, p['color'], name, id) + devtl.html += '
    \n' + + hf = open(sysvals.htmlfile, 'w') + addCSS(hf, sysvals, len(testruns), kerror) + + # write the device timeline + hf.write(devtl.html) + hf.write('
    \n') + hf.write('\n') + + # write the ftrace data (callgraph) + if sysvals.cgtest >= 0 and len(testruns) > sysvals.cgtest: + data = testruns[sysvals.cgtest] + else: + data = testruns[-1] + if sysvals.usecallgraph: + addCallgraphs(sysvals, hf, data) + + # add the test log as a hidden div + if sysvals.testlog and sysvals.logmsg: + hf.write('\n') + # add the dmesg log as a hidden div + if sysvals.dmesglog and sysvals.dmesgfile: + hf.write('\n') + # add the ftrace log as a hidden div + if sysvals.ftracelog and sysvals.ftracefile: + hf.write('\n') + + # write the footer and close + addScriptCode(hf, testruns) + hf.write('\n\n') + hf.close() + return True + +def addCSS(hf, sv, testcount=1, kerror=False, extra=''): + kernel = sv.stamp['kernel'] + host = sv.hostname[0].upper()+sv.hostname[1:] + mode = sv.suspendmode + if sv.suspendmode in suspendmodename: + mode = suspendmodename[sv.suspendmode] + title = host+' '+mode+' '+kernel + + # various format changes by flags + cgchk = 'checked' + cgnchk = 'not(:checked)' + if sv.cgexp: + cgchk = 'not(:checked)' + cgnchk = 'checked' + + hoverZ = 'z-index:8;' + if sv.usedevsrc: + hoverZ = '' + + devlistpos = 'absolute' + if testcount > 1: + devlistpos = 'relative' + + scaleTH = 20 + if kerror: + scaleTH = 60 + + # write the html header first (html head, css code, up to body start) + html_header = '\n\n\n\ + \n\ + '+title+'\n\ + \n\n\n' + hf.write(html_header) + +# Function: addScriptCode +# Description: +# Adds the javascript code to the output html +# Arguments: +# hf: the open html file pointer +# testruns: array of Data objects from parseKernelLog or parseTraceLog +def addScriptCode(hf, testruns): + t0 = testruns[0].start * 1000 + tMax = testruns[-1].end * 1000 + # create an array in javascript memory with the device details + detail = ' var devtable = [];\n' + for data in testruns: + topo = data.deviceTopology() + detail += ' devtable[%d] = "%s";\n' % (data.testnumber, topo) + detail += ' var bounds = [%f,%f];\n' % (t0, tMax) + # add the code which will manipulate the data in the browser + script_code = \ + '\n' + hf.write(script_code); + +# Function: executeSuspend +# Description: +# Execute system suspend through the sysfs interface, then copy the output +# dmesg and ftrace files to the test output directory. +def executeSuspend(quiet=False): + sv, tp, pm = sysvals, sysvals.tpath, ProcessMonitor() + if sv.wifi: + wifi = sv.checkWifi() + sv.dlog('wifi check, connected device is "%s"' % wifi) + testdata = [] + # run these commands to prepare the system for suspend + if sv.display: + if not quiet: + pprint('SET DISPLAY TO %s' % sv.display.upper()) + ret = sv.displayControl(sv.display) + sv.dlog('xset display %s, ret = %d' % (sv.display, ret)) + time.sleep(1) + if sv.sync: + if not quiet: + pprint('SYNCING FILESYSTEMS') + sv.dlog('syncing filesystems') + call('sync', shell=True) + sv.dlog('read dmesg') + sv.initdmesg() + # start ftrace + if(sv.usecallgraph or sv.usetraceevents): + if not quiet: + pprint('START TRACING') + sv.dlog('start ftrace tracing') + sv.fsetVal('1', 'tracing_on') + if sv.useprocmon: + sv.dlog('start the process monitor') + pm.start() + sv.dlog('run the cmdinfo list before') + sv.cmdinfo(True) + # execute however many s/r runs requested + for count in range(1,sv.execcount+1): + # x2delay in between test runs + if(count > 1 and sv.x2delay > 0): + sv.fsetVal('WAIT %d' % sv.x2delay, 'trace_marker') + time.sleep(sv.x2delay/1000.0) + sv.fsetVal('WAIT END', 'trace_marker') + # start message + if sv.testcommand != '': + pprint('COMMAND START') + else: + if(sv.rtcwake): + pprint('SUSPEND START') + else: + pprint('SUSPEND START (press a key to resume)') + # set rtcwake + if(sv.rtcwake): + if not quiet: + pprint('will issue an rtcwake in %d seconds' % sv.rtcwaketime) + sv.dlog('enable RTC wake alarm') + sv.rtcWakeAlarmOn() + # start of suspend trace marker + if(sv.usecallgraph or sv.usetraceevents): + sv.fsetVal(datetime.now().strftime(sv.tmstart), 'trace_marker') + # predelay delay + if(count == 1 and sv.predelay > 0): + sv.fsetVal('WAIT %d' % sv.predelay, 'trace_marker') + time.sleep(sv.predelay/1000.0) + sv.fsetVal('WAIT END', 'trace_marker') + # initiate suspend or command + sv.dlog('system executing a suspend') + tdata = {'error': ''} + if sv.testcommand != '': + res = call(sv.testcommand+' 2>&1', shell=True); + if res != 0: + tdata['error'] = 'cmd returned %d' % res + else: + mode = sv.suspendmode + if sv.memmode and os.path.exists(sv.mempowerfile): + mode = 'mem' + sv.testVal(sv.mempowerfile, 'radio', sv.memmode) + if sv.diskmode and os.path.exists(sv.diskpowerfile): + mode = 'disk' + sv.testVal(sv.diskpowerfile, 'radio', sv.diskmode) + if sv.acpidebug: + sv.testVal(sv.acpipath, 'acpi', '0xe') + if mode == 'freeze' and sv.haveTurbostat(): + # execution will pause here + turbo = sv.turbostat() + if turbo: + tdata['turbo'] = turbo + else: + pf = open(sv.powerfile, 'w') + pf.write(mode) + # execution will pause here + try: + pf.close() + except Exception as e: + tdata['error'] = str(e) + sv.dlog('system returned from resume') + # reset everything + sv.testVal('restoreall') + if(sv.rtcwake): + sv.dlog('disable RTC wake alarm') + sv.rtcWakeAlarmOff() + # postdelay delay + if(count == sv.execcount and sv.postdelay > 0): + sv.fsetVal('WAIT %d' % sv.postdelay, 'trace_marker') + time.sleep(sv.postdelay/1000.0) + sv.fsetVal('WAIT END', 'trace_marker') + # return from suspend + pprint('RESUME COMPLETE') + if(sv.usecallgraph or sv.usetraceevents): + sv.fsetVal(datetime.now().strftime(sv.tmend), 'trace_marker') + if sv.wifi and wifi: + tdata['wifi'] = sv.pollWifi(wifi) + sv.dlog('wifi check, %s' % tdata['wifi']) + if(sv.suspendmode == 'mem' or sv.suspendmode == 'command'): + sv.dlog('read the ACPI FPDT') + tdata['fw'] = getFPDT(False) + testdata.append(tdata) + sv.dlog('run the cmdinfo list after') + cmdafter = sv.cmdinfo(False) + # stop ftrace + if(sv.usecallgraph or sv.usetraceevents): + if sv.useprocmon: + sv.dlog('stop the process monitor') + pm.stop() + sv.fsetVal('0', 'tracing_on') + # grab a copy of the dmesg output + if not quiet: + pprint('CAPTURING DMESG') + sysvals.dlog('EXECUTION TRACE END') + sv.getdmesg(testdata) + # grab a copy of the ftrace output + if(sv.usecallgraph or sv.usetraceevents): + if not quiet: + pprint('CAPTURING TRACE') + op = sv.writeDatafileHeader(sv.ftracefile, testdata) + fp = open(tp+'trace', 'r') + for line in fp: + op.write(line) + op.close() + sv.fsetVal('', 'trace') + sv.platforminfo(cmdafter) + +def readFile(file): + if os.path.islink(file): + return os.readlink(file).split('/')[-1] + else: + return sysvals.getVal(file).strip() + +# Function: ms2nice +# Description: +# Print out a very concise time string in minutes and seconds +# Output: +# The time string, e.g. "1901m16s" +def ms2nice(val): + val = int(val) + h = val // 3600000 + m = (val // 60000) % 60 + s = (val // 1000) % 60 + if h > 0: + return '%d:%02d:%02d' % (h, m, s) + if m > 0: + return '%02d:%02d' % (m, s) + return '%ds' % s + +def yesno(val): + list = {'enabled':'A', 'disabled':'S', 'auto':'E', 'on':'D', + 'active':'A', 'suspended':'S', 'suspending':'S'} + if val not in list: + return ' ' + return list[val] + +# Function: deviceInfo +# Description: +# Detect all the USB hosts and devices currently connected and add +# a list of USB device names to sysvals for better timeline readability +def deviceInfo(output=''): + if not output: + pprint('LEGEND\n'\ + '---------------------------------------------------------------------------------------------\n'\ + ' A = async/sync PM queue (A/S) C = runtime active children\n'\ + ' R = runtime suspend enabled/disabled (E/D) rACTIVE = runtime active (min/sec)\n'\ + ' S = runtime status active/suspended (A/S) rSUSPEND = runtime suspend (min/sec)\n'\ + ' U = runtime usage count\n'\ + '---------------------------------------------------------------------------------------------\n'\ + 'DEVICE NAME A R S U C rACTIVE rSUSPEND\n'\ + '---------------------------------------------------------------------------------------------') + + res = [] + tgtval = 'runtime_status' + lines = dict() + for dirname, dirnames, filenames in os.walk('/sys/devices'): + if(not re.match('.*/power', dirname) or + 'control' not in filenames or + tgtval not in filenames): + continue + name = '' + dirname = dirname[:-6] + device = dirname.split('/')[-1] + power = dict() + power[tgtval] = readFile('%s/power/%s' % (dirname, tgtval)) + # only list devices which support runtime suspend + if power[tgtval] not in ['active', 'suspended', 'suspending']: + continue + for i in ['product', 'driver', 'subsystem']: + file = '%s/%s' % (dirname, i) + if os.path.exists(file): + name = readFile(file) + break + for i in ['async', 'control', 'runtime_status', 'runtime_usage', + 'runtime_active_kids', 'runtime_active_time', + 'runtime_suspended_time']: + if i in filenames: + power[i] = readFile('%s/power/%s' % (dirname, i)) + if output: + if power['control'] == output: + res.append('%s/power/control' % dirname) + continue + lines[dirname] = '%-26s %-26s %1s %1s %1s %1s %1s %10s %10s' % \ + (device[:26], name[:26], + yesno(power['async']), \ + yesno(power['control']), \ + yesno(power['runtime_status']), \ + power['runtime_usage'], \ + power['runtime_active_kids'], \ + ms2nice(power['runtime_active_time']), \ + ms2nice(power['runtime_suspended_time'])) + for i in sorted(lines): + print(lines[i]) + return res + +# Function: getModes +# Description: +# Determine the supported power modes on this system +# Output: +# A string list of the available modes +def getModes(): + modes = [] + if(os.path.exists(sysvals.powerfile)): + fp = open(sysvals.powerfile, 'r') + modes = fp.read().split() + fp.close() + if(os.path.exists(sysvals.mempowerfile)): + deep = False + fp = open(sysvals.mempowerfile, 'r') + for m in fp.read().split(): + memmode = m.strip('[]') + if memmode == 'deep': + deep = True + else: + modes.append('mem-%s' % memmode) + fp.close() + if 'mem' in modes and not deep: + modes.remove('mem') + if('disk' in modes and os.path.exists(sysvals.diskpowerfile)): + fp = open(sysvals.diskpowerfile, 'r') + for m in fp.read().split(): + modes.append('disk-%s' % m.strip('[]')) + fp.close() + return modes + +# Function: dmidecode +# Description: +# Read the bios tables and pull out system info +# Arguments: +# mempath: /dev/mem or custom mem path +# fatal: True to exit on error, False to return empty dict +# Output: +# A dict object with all available key/values +def dmidecode(mempath, fatal=False): + out = dict() + + # the list of values to retrieve, with hardcoded (type, idx) + info = { + 'bios-vendor': (0, 4), + 'bios-version': (0, 5), + 'bios-release-date': (0, 8), + 'system-manufacturer': (1, 4), + 'system-product-name': (1, 5), + 'system-version': (1, 6), + 'system-serial-number': (1, 7), + 'baseboard-manufacturer': (2, 4), + 'baseboard-product-name': (2, 5), + 'baseboard-version': (2, 6), + 'baseboard-serial-number': (2, 7), + 'chassis-manufacturer': (3, 4), + 'chassis-type': (3, 5), + 'chassis-version': (3, 6), + 'chassis-serial-number': (3, 7), + 'processor-manufacturer': (4, 7), + 'processor-version': (4, 16), + } + if(not os.path.exists(mempath)): + if(fatal): + doError('file does not exist: %s' % mempath) + return out + if(not os.access(mempath, os.R_OK)): + if(fatal): + doError('file is not readable: %s' % mempath) + return out + + # by default use legacy scan, but try to use EFI first + memaddr = 0xf0000 + memsize = 0x10000 + for ep in ['/sys/firmware/efi/systab', '/proc/efi/systab']: + if not os.path.exists(ep) or not os.access(ep, os.R_OK): + continue + fp = open(ep, 'r') + buf = fp.read() + fp.close() + i = buf.find('SMBIOS=') + if i >= 0: + try: + memaddr = int(buf[i+7:], 16) + memsize = 0x20 + except: + continue + + # read in the memory for scanning + try: + fp = open(mempath, 'rb') + fp.seek(memaddr) + buf = fp.read(memsize) + except: + if(fatal): + doError('DMI table is unreachable, sorry') + else: + pprint('WARNING: /dev/mem is not readable, ignoring DMI data') + return out + fp.close() + + # search for either an SM table or DMI table + i = base = length = num = 0 + while(i < memsize): + if buf[i:i+4] == b'_SM_' and i < memsize - 16: + length = struct.unpack('H', buf[i+22:i+24])[0] + base, num = struct.unpack('IH', buf[i+24:i+30]) + break + elif buf[i:i+5] == b'_DMI_': + length = struct.unpack('H', buf[i+6:i+8])[0] + base, num = struct.unpack('IH', buf[i+8:i+14]) + break + i += 16 + if base == 0 and length == 0 and num == 0: + if(fatal): + doError('Neither SMBIOS nor DMI were found') + else: + return out + + # read in the SM or DMI table + try: + fp = open(mempath, 'rb') + fp.seek(base) + buf = fp.read(length) + except: + if(fatal): + doError('DMI table is unreachable, sorry') + else: + pprint('WARNING: /dev/mem is not readable, ignoring DMI data') + return out + fp.close() + + # scan the table for the values we want + count = i = 0 + while(count < num and i <= len(buf) - 4): + type, size, handle = struct.unpack('BBH', buf[i:i+4]) + n = i + size + while n < len(buf) - 1: + if 0 == struct.unpack('H', buf[n:n+2])[0]: + break + n += 1 + data = buf[i+size:n+2].split(b'\0') + for name in info: + itype, idxadr = info[name] + if itype == type: + idx = struct.unpack('B', buf[i+idxadr:i+idxadr+1])[0] + if idx > 0 and idx < len(data) - 1: + s = data[idx-1].decode('utf-8') + if s.strip() and s.strip().lower() != 'to be filled by o.e.m.': + out[name] = s + i = n + 2 + count += 1 + return out + +# Function: getFPDT +# Description: +# Read the acpi bios tables and pull out FPDT, the firmware data +# Arguments: +# output: True to output the info to stdout, False otherwise +def getFPDT(output): + rectype = {} + rectype[0] = 'Firmware Basic Boot Performance Record' + rectype[1] = 'S3 Performance Table Record' + prectype = {} + prectype[0] = 'Basic S3 Resume Performance Record' + prectype[1] = 'Basic S3 Suspend Performance Record' + + sysvals.rootCheck(True) + if(not os.path.exists(sysvals.fpdtpath)): + if(output): + doError('file does not exist: %s' % sysvals.fpdtpath) + return False + if(not os.access(sysvals.fpdtpath, os.R_OK)): + if(output): + doError('file is not readable: %s' % sysvals.fpdtpath) + return False + if(not os.path.exists(sysvals.mempath)): + if(output): + doError('file does not exist: %s' % sysvals.mempath) + return False + if(not os.access(sysvals.mempath, os.R_OK)): + if(output): + doError('file is not readable: %s' % sysvals.mempath) + return False + + fp = open(sysvals.fpdtpath, 'rb') + buf = fp.read() + fp.close() + + if(len(buf) < 36): + if(output): + doError('Invalid FPDT table data, should '+\ + 'be at least 36 bytes') + return False + + table = struct.unpack('4sIBB6s8sI4sI', buf[0:36]) + if(output): + pprint('\n'\ + 'Firmware Performance Data Table (%s)\n'\ + ' Signature : %s\n'\ + ' Table Length : %u\n'\ + ' Revision : %u\n'\ + ' Checksum : 0x%x\n'\ + ' OEM ID : %s\n'\ + ' OEM Table ID : %s\n'\ + ' OEM Revision : %u\n'\ + ' Creator ID : %s\n'\ + ' Creator Revision : 0x%x\n'\ + '' % (ascii(table[0]), ascii(table[0]), table[1], table[2], + table[3], ascii(table[4]), ascii(table[5]), table[6], + ascii(table[7]), table[8])) + + if(table[0] != b'FPDT'): + if(output): + doError('Invalid FPDT table') + return False + if(len(buf) <= 36): + return False + i = 0 + fwData = [0, 0] + records = buf[36:] + try: + fp = open(sysvals.mempath, 'rb') + except: + pprint('WARNING: /dev/mem is not readable, ignoring the FPDT data') + return False + while(i < len(records)): + header = struct.unpack('HBB', records[i:i+4]) + if(header[0] not in rectype): + i += header[1] + continue + if(header[1] != 16): + i += header[1] + continue + addr = struct.unpack('Q', records[i+8:i+16])[0] + try: + fp.seek(addr) + first = fp.read(8) + except: + if(output): + pprint('Bad address 0x%x in %s' % (addr, sysvals.mempath)) + return [0, 0] + rechead = struct.unpack('4sI', first) + recdata = fp.read(rechead[1]-8) + if(rechead[0] == b'FBPT'): + record = struct.unpack('HBBIQQQQQ', recdata[:48]) + if(output): + pprint('%s (%s)\n'\ + ' Reset END : %u ns\n'\ + ' OS Loader LoadImage Start : %u ns\n'\ + ' OS Loader StartImage Start : %u ns\n'\ + ' ExitBootServices Entry : %u ns\n'\ + ' ExitBootServices Exit : %u ns'\ + '' % (rectype[header[0]], ascii(rechead[0]), record[4], record[5], + record[6], record[7], record[8])) + elif(rechead[0] == b'S3PT'): + if(output): + pprint('%s (%s)' % (rectype[header[0]], ascii(rechead[0]))) + j = 0 + while(j < len(recdata)): + prechead = struct.unpack('HBB', recdata[j:j+4]) + if(prechead[0] not in prectype): + continue + if(prechead[0] == 0): + record = struct.unpack('IIQQ', recdata[j:j+prechead[1]]) + fwData[1] = record[2] + if(output): + pprint(' %s\n'\ + ' Resume Count : %u\n'\ + ' FullResume : %u ns\n'\ + ' AverageResume : %u ns'\ + '' % (prectype[prechead[0]], record[1], + record[2], record[3])) + elif(prechead[0] == 1): + record = struct.unpack('QQ', recdata[j+4:j+prechead[1]]) + fwData[0] = record[1] - record[0] + if(output): + pprint(' %s\n'\ + ' SuspendStart : %u ns\n'\ + ' SuspendEnd : %u ns\n'\ + ' SuspendTime : %u ns'\ + '' % (prectype[prechead[0]], record[0], + record[1], fwData[0])) + + j += prechead[1] + if(output): + pprint('') + i += header[1] + fp.close() + return fwData + +# Function: statusCheck +# Description: +# Verify that the requested command and options will work, and +# print the results to the terminal +# Output: +# True if the test will work, False if not +def statusCheck(probecheck=False): + status = '' + + pprint('Checking this system (%s)...' % platform.node()) + + # check we have root access + res = sysvals.colorText('NO (No features of this tool will work!)') + if(sysvals.rootCheck(False)): + res = 'YES' + pprint(' have root access: %s' % res) + if(res != 'YES'): + pprint(' Try running this script with sudo') + return 'missing root access' + + # check sysfs is mounted + res = sysvals.colorText('NO (No features of this tool will work!)') + if(os.path.exists(sysvals.powerfile)): + res = 'YES' + pprint(' is sysfs mounted: %s' % res) + if(res != 'YES'): + return 'sysfs is missing' + + # check target mode is a valid mode + if sysvals.suspendmode != 'command': + res = sysvals.colorText('NO') + modes = getModes() + if(sysvals.suspendmode in modes): + res = 'YES' + else: + status = '%s mode is not supported' % sysvals.suspendmode + pprint(' is "%s" a valid power mode: %s' % (sysvals.suspendmode, res)) + if(res == 'NO'): + pprint(' valid power modes are: %s' % modes) + pprint(' please choose one with -m') + + # check if ftrace is available + res = sysvals.colorText('NO') + ftgood = sysvals.verifyFtrace() + if(ftgood): + res = 'YES' + elif(sysvals.usecallgraph): + status = 'ftrace is not properly supported' + pprint(' is ftrace supported: %s' % res) + + # check if kprobes are available + if sysvals.usekprobes: + res = sysvals.colorText('NO') + sysvals.usekprobes = sysvals.verifyKprobes() + if(sysvals.usekprobes): + res = 'YES' + else: + sysvals.usedevsrc = False + pprint(' are kprobes supported: %s' % res) + + # what data source are we using + res = 'DMESG' + if(ftgood): + sysvals.usetraceevents = True + for e in sysvals.traceevents: + if not os.path.exists(sysvals.epath+e): + sysvals.usetraceevents = False + if(sysvals.usetraceevents): + res = 'FTRACE (all trace events found)' + pprint(' timeline data source: %s' % res) + + # check if rtcwake + res = sysvals.colorText('NO') + if(sysvals.rtcpath != ''): + res = 'YES' + elif(sysvals.rtcwake): + status = 'rtcwake is not properly supported' + pprint(' is rtcwake supported: %s' % res) + + # check info commands + pprint(' optional commands this tool may use for info:') + no = sysvals.colorText('MISSING') + yes = sysvals.colorText('FOUND', 32) + for c in ['turbostat', 'mcelog', 'lspci', 'lsusb']: + if c == 'turbostat': + res = yes if sysvals.haveTurbostat() else no + else: + res = yes if sysvals.getExec(c) else no + pprint(' %s: %s' % (c, res)) + + if not probecheck: + return status + + # verify kprobes + if sysvals.usekprobes: + for name in sysvals.tracefuncs: + sysvals.defaultKprobe(name, sysvals.tracefuncs[name]) + if sysvals.usedevsrc: + for name in sysvals.dev_tracefuncs: + sysvals.defaultKprobe(name, sysvals.dev_tracefuncs[name]) + sysvals.addKprobes(True) + + return status + +# Function: doError +# Description: +# generic error function for catastrphic failures +# Arguments: +# msg: the error message to print +# help: True if printHelp should be called after, False otherwise +def doError(msg, help=False): + if(help == True): + printHelp() + pprint('ERROR: %s\n' % msg) + sysvals.outputResult({'error':msg}) + sys.exit(1) + +# Function: getArgInt +# Description: +# pull out an integer argument from the command line with checks +def getArgInt(name, args, min, max, main=True): + if main: + try: + arg = next(args) + except: + doError(name+': no argument supplied', True) + else: + arg = args + try: + val = int(arg) + except: + doError(name+': non-integer value given', True) + if(val < min or val > max): + doError(name+': value should be between %d and %d' % (min, max), True) + return val + +# Function: getArgFloat +# Description: +# pull out a float argument from the command line with checks +def getArgFloat(name, args, min, max, main=True): + if main: + try: + arg = next(args) + except: + doError(name+': no argument supplied', True) + else: + arg = args + try: + val = float(arg) + except: + doError(name+': non-numerical value given', True) + if(val < min or val > max): + doError(name+': value should be between %f and %f' % (min, max), True) + return val + +def processData(live=False, quiet=False): + if not quiet: + pprint('PROCESSING: %s' % sysvals.htmlfile) + sysvals.vprint('usetraceevents=%s, usetracemarkers=%s, usekprobes=%s' % \ + (sysvals.usetraceevents, sysvals.usetracemarkers, sysvals.usekprobes)) + error = '' + if(sysvals.usetraceevents): + testruns, error = parseTraceLog(live) + if sysvals.dmesgfile: + for data in testruns: + data.extractErrorInfo() + else: + testruns = loadKernelLog() + for data in testruns: + parseKernelLog(data) + if(sysvals.ftracefile and (sysvals.usecallgraph or sysvals.usetraceevents)): + appendIncompleteTraceLog(testruns) + if not sysvals.stamp: + pprint('ERROR: data does not include the expected stamp') + return (testruns, {'error': 'timeline generation failed'}) + shown = ['bios', 'biosdate', 'cpu', 'host', 'kernel', 'man', 'memfr', + 'memsz', 'mode', 'numcpu', 'plat', 'time', 'wifi'] + sysvals.vprint('System Info:') + for key in sorted(sysvals.stamp): + if key in shown: + sysvals.vprint(' %-8s : %s' % (key.upper(), sysvals.stamp[key])) + sysvals.vprint('Command:\n %s' % sysvals.cmdline) + for data in testruns: + if data.turbostat: + idx, s = 0, 'Turbostat:\n ' + for val in data.turbostat.split('|'): + idx += len(val) + 1 + if idx >= 80: + idx = 0 + s += '\n ' + s += val + ' ' + sysvals.vprint(s) + data.printDetails() + if len(sysvals.platinfo) > 0: + sysvals.vprint('\nPlatform Info:') + for info in sysvals.platinfo: + sysvals.vprint('[%s - %s]' % (info[0], info[1])) + sysvals.vprint(info[2]) + sysvals.vprint('') + if sysvals.cgdump: + for data in testruns: + data.debugPrint() + sys.exit(0) + if len(testruns) < 1: + pprint('ERROR: Not enough test data to build a timeline') + return (testruns, {'error': 'timeline generation failed'}) + sysvals.vprint('Creating the html timeline (%s)...' % sysvals.htmlfile) + createHTML(testruns, error) + if not quiet: + pprint('DONE: %s' % sysvals.htmlfile) + data = testruns[0] + stamp = data.stamp + stamp['suspend'], stamp['resume'] = data.getTimeValues() + if data.fwValid: + stamp['fwsuspend'], stamp['fwresume'] = data.fwSuspend, data.fwResume + if error: + stamp['error'] = error + return (testruns, stamp) + +# Function: rerunTest +# Description: +# generate an output from an existing set of ftrace/dmesg logs +def rerunTest(htmlfile=''): + if sysvals.ftracefile: + doesTraceLogHaveTraceEvents() + if not sysvals.dmesgfile and not sysvals.usetraceevents: + doError('recreating this html output requires a dmesg file') + if htmlfile: + sysvals.htmlfile = htmlfile + else: + sysvals.setOutputFile() + if os.path.exists(sysvals.htmlfile): + if not os.path.isfile(sysvals.htmlfile): + doError('a directory already exists with this name: %s' % sysvals.htmlfile) + elif not os.access(sysvals.htmlfile, os.W_OK): + doError('missing permission to write to %s' % sysvals.htmlfile) + testruns, stamp = processData() + sysvals.resetlog() + return stamp + +# Function: runTest +# Description: +# execute a suspend/resume, gather the logs, and generate the output +def runTest(n=0, quiet=False): + # prepare for the test + sysvals.initTestOutput('suspend') + op = sysvals.writeDatafileHeader(sysvals.dmesgfile, []) + op.write('# EXECUTION TRACE START\n') + op.close() + if n <= 1: + if sysvals.rs != 0: + sysvals.dlog('%sabling runtime suspend' % ('en' if sysvals.rs > 0 else 'dis')) + sysvals.setRuntimeSuspend(True) + if sysvals.display: + ret = sysvals.displayControl('init') + sysvals.dlog('xset display init, ret = %d' % ret) + sysvals.dlog('initialize ftrace') + sysvals.initFtrace(quiet) + + # execute the test + executeSuspend(quiet) + sysvals.cleanupFtrace() + if sysvals.skiphtml: + sysvals.outputResult({}, n) + sysvals.sudoUserchown(sysvals.testdir) + return + testruns, stamp = processData(True, quiet) + for data in testruns: + del data + sysvals.sudoUserchown(sysvals.testdir) + sysvals.outputResult(stamp, n) + if 'error' in stamp: + return 2 + return 0 + +def find_in_html(html, start, end, firstonly=True): + cnt, out, list = len(html), [], [] + if firstonly: + m = re.search(start, html) + if m: + list.append(m) + else: + list = re.finditer(start, html) + for match in list: + s = match.end() + e = cnt if (len(out) < 1 or s + 10000 > cnt) else s + 10000 + m = re.search(end, html[s:e]) + if not m: + break + e = s + m.start() + str = html[s:e] + if end == 'ms': + num = re.search(r'[-+]?\d*\.\d+|\d+', str) + str = num.group() if num else 'NaN' + if firstonly: + return str + out.append(str) + if firstonly: + return '' + return out + +def data_from_html(file, outpath, issues, fulldetail=False): + html = open(file, 'r').read() + sysvals.htmlfile = os.path.relpath(file, outpath) + # extract general info + suspend = find_in_html(html, 'Kernel Suspend', 'ms') + resume = find_in_html(html, 'Kernel Resume', 'ms') + sysinfo = find_in_html(html, '
    ', '
    ') + line = find_in_html(html, '
    ', '
    ') + stmp = line.split() + if not suspend or not resume or len(stmp) != 8: + return False + try: + dt = datetime.strptime(' '.join(stmp[3:]), '%B %d %Y, %I:%M:%S %p') + except: + return False + sysvals.hostname = stmp[0] + tstr = dt.strftime('%Y/%m/%d %H:%M:%S') + error = find_in_html(html, '') + if error: + m = re.match('[a-z0-9]* failed in (?P

    \S*).*', error) + if m: + result = 'fail in %s' % m.group('p') + else: + result = 'fail' + else: + result = 'pass' + # extract error info + tp, ilist = False, [] + extra = dict() + log = find_in_html(html, '

    ').strip() + if log: + d = Data(0) + d.end = 999999999 + d.dmesgtext = log.split('\n') + tp = d.extractErrorInfo() + for msg in tp.msglist: + sysvals.errorSummary(issues, msg) + if stmp[2] == 'freeze': + extra = d.turbostatInfo() + elist = dict() + for dir in d.errorinfo: + for err in d.errorinfo[dir]: + if err[0] not in elist: + elist[err[0]] = 0 + elist[err[0]] += 1 + for i in elist: + ilist.append('%sx%d' % (i, elist[i]) if elist[i] > 1 else i) + wifi = find_in_html(html, 'Wifi Resume: ', '') + if wifi: + extra['wifi'] = wifi + low = find_in_html(html, 'freeze time: ', ' ms') + for lowstr in ['waking', '+']: + if not low: + break + if lowstr not in low: + continue + if lowstr == '+': + issue = 'S2LOOPx%d' % len(low.split('+')) + else: + m = re.match('.*waking *(?P[0-9]*) *times.*', low) + issue = 'S2WAKEx%s' % m.group('n') if m else 'S2WAKExNaN' + match = [i for i in issues if i['match'] == issue] + if len(match) > 0: + match[0]['count'] += 1 + if sysvals.hostname not in match[0]['urls']: + match[0]['urls'][sysvals.hostname] = [sysvals.htmlfile] + elif sysvals.htmlfile not in match[0]['urls'][sysvals.hostname]: + match[0]['urls'][sysvals.hostname].append(sysvals.htmlfile) + else: + issues.append({ + 'match': issue, 'count': 1, 'line': issue, + 'urls': {sysvals.hostname: [sysvals.htmlfile]}, + }) + ilist.append(issue) + # extract device info + devices = dict() + for line in html.split('\n'): + m = re.match(' *
    .*)\" class=\"thread.*', line) + if not m or 'thread kth' in line or 'thread sec' in line: + continue + m = re.match('(?P.*) \((?P[0-9,\.]*) ms\) (?P

    .*)', m.group('title')) + if not m: + continue + name, time, phase = m.group('n'), m.group('t'), m.group('p') + if ' async' in name or ' sync' in name: + name = ' '.join(name.split(' ')[:-1]) + if phase.startswith('suspend'): + d = 'suspend' + elif phase.startswith('resume'): + d = 'resume' + else: + continue + if d not in devices: + devices[d] = dict() + if name not in devices[d]: + devices[d][name] = 0.0 + devices[d][name] += float(time) + # create worst device info + worst = dict() + for d in ['suspend', 'resume']: + worst[d] = {'name':'', 'time': 0.0} + dev = devices[d] if d in devices else 0 + if dev and len(dev.keys()) > 0: + n = sorted(dev, key=lambda k:(dev[k], k), reverse=True)[0] + worst[d]['name'], worst[d]['time'] = n, dev[n] + data = { + 'mode': stmp[2], + 'host': stmp[0], + 'kernel': stmp[1], + 'sysinfo': sysinfo, + 'time': tstr, + 'result': result, + 'issues': ' '.join(ilist), + 'suspend': suspend, + 'resume': resume, + 'devlist': devices, + 'sus_worst': worst['suspend']['name'], + 'sus_worsttime': worst['suspend']['time'], + 'res_worst': worst['resume']['name'], + 'res_worsttime': worst['resume']['time'], + 'url': sysvals.htmlfile, + } + for key in extra: + data[key] = extra[key] + if fulldetail: + data['funclist'] = find_in_html(html, '

    (%s: %s), use "true/false" or "1/0"' % (name, value), True) + return False + +# Function: configFromFile +# Description: +# Configure the script via the info in a config file +def configFromFile(file): + Config = configparser.ConfigParser() + + Config.read(file) + sections = Config.sections() + overridekprobes = False + overridedevkprobes = False + if 'Settings' in sections: + for opt in Config.options('Settings'): + value = Config.get('Settings', opt).lower() + option = opt.lower() + if(option == 'verbose'): + sysvals.verbose = checkArgBool(option, value) + elif(option == 'addlogs'): + sysvals.dmesglog = sysvals.ftracelog = checkArgBool(option, value) + elif(option == 'dev'): + sysvals.usedevsrc = checkArgBool(option, value) + elif(option == 'proc'): + sysvals.useprocmon = checkArgBool(option, value) + elif(option == 'x2'): + if checkArgBool(option, value): + sysvals.execcount = 2 + elif(option == 'callgraph'): + sysvals.usecallgraph = checkArgBool(option, value) + elif(option == 'override-timeline-functions'): + overridekprobes = checkArgBool(option, value) + elif(option == 'override-dev-timeline-functions'): + overridedevkprobes = checkArgBool(option, value) + elif(option == 'skiphtml'): + sysvals.skiphtml = checkArgBool(option, value) + elif(option == 'sync'): + sysvals.sync = checkArgBool(option, value) + elif(option == 'rs' or option == 'runtimesuspend'): + if value in switchvalues: + if value in switchoff: + sysvals.rs = -1 + else: + sysvals.rs = 1 + else: + doError('invalid value --> (%s: %s), use "enable/disable"' % (option, value), True) + elif(option == 'display'): + disopt = ['on', 'off', 'standby', 'suspend'] + if value not in disopt: + doError('invalid value --> (%s: %s), use %s' % (option, value, disopt), True) + sysvals.display = value + elif(option == 'gzip'): + sysvals.gzip = checkArgBool(option, value) + elif(option == 'cgfilter'): + sysvals.setCallgraphFilter(value) + elif(option == 'cgskip'): + if value in switchoff: + sysvals.cgskip = '' + else: + sysvals.cgskip = sysvals.configFile(val) + if(not sysvals.cgskip): + doError('%s does not exist' % sysvals.cgskip) + elif(option == 'cgtest'): + sysvals.cgtest = getArgInt('cgtest', value, 0, 1, False) + elif(option == 'cgphase'): + d = Data(0) + if value not in d.phasedef: + doError('invalid phase --> (%s: %s), valid phases are %s'\ + % (option, value, d.phasedef.keys()), True) + sysvals.cgphase = value + elif(option == 'fadd'): + file = sysvals.configFile(value) + if(not file): + doError('%s does not exist' % value) + sysvals.addFtraceFilterFunctions(file) + elif(option == 'result'): + sysvals.result = value + elif(option == 'multi'): + nums = value.split() + if len(nums) != 2: + doError('multi requires 2 integers (exec_count and delay)', True) + sysvals.multiinit(nums[0], nums[1]) + elif(option == 'devicefilter'): + sysvals.setDeviceFilter(value) + elif(option == 'expandcg'): + sysvals.cgexp = checkArgBool(option, value) + elif(option == 'srgap'): + if checkArgBool(option, value): + sysvals.srgap = 5 + elif(option == 'mode'): + sysvals.suspendmode = value + elif(option == 'command' or option == 'cmd'): + sysvals.testcommand = value + elif(option == 'x2delay'): + sysvals.x2delay = getArgInt('x2delay', value, 0, 60000, False) + elif(option == 'predelay'): + sysvals.predelay = getArgInt('predelay', value, 0, 60000, False) + elif(option == 'postdelay'): + sysvals.postdelay = getArgInt('postdelay', value, 0, 60000, False) + elif(option == 'maxdepth'): + sysvals.max_graph_depth = getArgInt('maxdepth', value, 0, 1000, False) + elif(option == 'rtcwake'): + if value in switchoff: + sysvals.rtcwake = False + else: + sysvals.rtcwake = True + sysvals.rtcwaketime = getArgInt('rtcwake', value, 0, 3600, False) + elif(option == 'timeprec'): + sysvals.setPrecision(getArgInt('timeprec', value, 0, 6, False)) + elif(option == 'mindev'): + sysvals.mindevlen = getArgFloat('mindev', value, 0.0, 10000.0, False) + elif(option == 'callloop-maxgap'): + sysvals.callloopmaxgap = getArgFloat('callloop-maxgap', value, 0.0, 1.0, False) + elif(option == 'callloop-maxlen'): + sysvals.callloopmaxgap = getArgFloat('callloop-maxlen', value, 0.0, 1.0, False) + elif(option == 'mincg'): + sysvals.mincglen = getArgFloat('mincg', value, 0.0, 10000.0, False) + elif(option == 'bufsize'): + sysvals.bufsize = getArgInt('bufsize', value, 1, 1024*1024*8, False) + elif(option == 'output-dir'): + sysvals.outdir = sysvals.setOutputFolder(value) + + if sysvals.suspendmode == 'command' and not sysvals.testcommand: + doError('No command supplied for mode "command"') + + # compatibility errors + if sysvals.usedevsrc and sysvals.usecallgraph: + doError('-dev is not compatible with -f') + if sysvals.usecallgraph and sysvals.useprocmon: + doError('-proc is not compatible with -f') + + if overridekprobes: + sysvals.tracefuncs = dict() + if overridedevkprobes: + sysvals.dev_tracefuncs = dict() + + kprobes = dict() + kprobesec = 'dev_timeline_functions_'+platform.machine() + if kprobesec in sections: + for name in Config.options(kprobesec): + text = Config.get(kprobesec, name) + kprobes[name] = (text, True) + kprobesec = 'timeline_functions_'+platform.machine() + if kprobesec in sections: + for name in Config.options(kprobesec): + if name in kprobes: + doError('Duplicate timeline function found "%s"' % (name)) + text = Config.get(kprobesec, name) + kprobes[name] = (text, False) + + for name in kprobes: + function = name + format = name + color = '' + args = dict() + text, dev = kprobes[name] + data = text.split() + i = 0 + for val in data: + # bracketted strings are special formatting, read them separately + if val[0] == '[' and val[-1] == ']': + for prop in val[1:-1].split(','): + p = prop.split('=') + if p[0] == 'color': + try: + color = int(p[1], 16) + color = '#'+p[1] + except: + color = p[1] + continue + # first real arg should be the format string + if i == 0: + format = val + # all other args are actual function args + else: + d = val.split('=') + args[d[0]] = d[1] + i += 1 + if not function or not format: + doError('Invalid kprobe: %s' % name) + for arg in re.findall('{(?P[a-z,A-Z,0-9]*)}', format): + if arg not in args: + doError('Kprobe "%s" is missing argument "%s"' % (name, arg)) + if (dev and name in sysvals.dev_tracefuncs) or (not dev and name in sysvals.tracefuncs): + doError('Duplicate timeline function found "%s"' % (name)) + + kp = { + 'name': name, + 'func': function, + 'format': format, + sysvals.archargs: args + } + if color: + kp['color'] = color + if dev: + sysvals.dev_tracefuncs[name] = kp + else: + sysvals.tracefuncs[name] = kp + +# Function: printHelp +# Description: +# print out the help text +def printHelp(): + pprint('\n%s v%s\n'\ + 'Usage: sudo sleepgraph \n'\ + '\n'\ + 'Description:\n'\ + ' This tool is designed to assist kernel and OS developers in optimizing\n'\ + ' their linux stack\'s suspend/resume time. Using a kernel image built\n'\ + ' with a few extra options enabled, the tool will execute a suspend and\n'\ + ' capture dmesg and ftrace data until resume is complete. This data is\n'\ + ' transformed into a device timeline and an optional callgraph to give\n'\ + ' a detailed view of which devices/subsystems are taking the most\n'\ + ' time in suspend/resume.\n'\ + '\n'\ + ' If no specific command is given, the default behavior is to initiate\n'\ + ' a suspend/resume and capture the dmesg/ftrace output as an html timeline.\n'\ + '\n'\ + ' Generates output files in subdirectory: suspend-yymmdd-HHMMSS\n'\ + ' HTML output: _.html\n'\ + ' raw dmesg output: __dmesg.txt\n'\ + ' raw ftrace output: __ftrace.txt\n'\ + '\n'\ + 'Options:\n'\ + ' -h Print this help text\n'\ + ' -v Print the current tool version\n'\ + ' -config fn Pull arguments and config options from file fn\n'\ + ' -verbose Print extra information during execution and analysis\n'\ + ' -m mode Mode to initiate for suspend (default: %s)\n'\ + ' -o name Overrides the output subdirectory name when running a new test\n'\ + ' default: suspend-{date}-{time}\n'\ + ' -rtcwake t Wakeup t seconds after suspend, set t to "off" to disable (default: 15)\n'\ + ' -addlogs Add the dmesg and ftrace logs to the html output\n'\ + ' -noturbostat Dont use turbostat in freeze mode (default: disabled)\n'\ + ' -srgap Add a visible gap in the timeline between sus/res (default: disabled)\n'\ + ' -skiphtml Run the test and capture the trace logs, but skip the timeline (default: disabled)\n'\ + ' -result fn Export a results table to a text file for parsing.\n'\ + ' -wifi If a wifi connection is available, check that it reconnects after resume.\n'\ + ' [testprep]\n'\ + ' -sync Sync the filesystems before starting the test\n'\ + ' -rs on/off Enable/disable runtime suspend for all devices, restore all after test\n'\ + ' -display m Change the display mode to m for the test (on/off/standby/suspend)\n'\ + ' [advanced]\n'\ + ' -gzip Gzip the trace and dmesg logs to save space\n'\ + ' -cmd {s} Run the timeline over a custom command, e.g. "sync -d"\n'\ + ' -proc Add usermode process info into the timeline (default: disabled)\n'\ + ' -dev Add kernel function calls and threads to the timeline (default: disabled)\n'\ + ' -x2 Run two suspend/resumes back to back (default: disabled)\n'\ + ' -x2delay t Include t ms delay between multiple test runs (default: 0 ms)\n'\ + ' -predelay t Include t ms delay before 1st suspend (default: 0 ms)\n'\ + ' -postdelay t Include t ms delay after last resume (default: 0 ms)\n'\ + ' -mindev ms Discard all device blocks shorter than ms milliseconds (e.g. 0.001 for us)\n'\ + ' -multi n d Execute consecutive tests at seconds intervals. If is followed\n'\ + ' by a "d", "h", or "m" execute for days, hours, or mins instead.\n'\ + ' The outputs will be created in a new subdirectory with a summary page.\n'\ + ' -maxfail n Abort a -multi run after n consecutive fails (default is 0 = never abort)\n'\ + ' [debug]\n'\ + ' -f Use ftrace to create device callgraphs (default: disabled)\n'\ + ' -ftop Use ftrace on the top level call: "%s" (default: disabled)\n'\ + ' -maxdepth N limit the callgraph data to N call levels (default: 0=all)\n'\ + ' -expandcg pre-expand the callgraph data in the html output (default: disabled)\n'\ + ' -fadd file Add functions to be graphed in the timeline from a list in a text file\n'\ + ' -filter "d1,d2,..." Filter out all but this comma-delimited list of device names\n'\ + ' -mincg ms Discard all callgraphs shorter than ms milliseconds (e.g. 0.001 for us)\n'\ + ' -cgphase P Only show callgraph data for phase P (e.g. suspend_late)\n'\ + ' -cgtest N Only show callgraph data for test N (e.g. 0 or 1 in an x2 run)\n'\ + ' -timeprec N Number of significant digits in timestamps (0:S, [3:ms], 6:us)\n'\ + ' -cgfilter S Filter the callgraph output in the timeline\n'\ + ' -cgskip file Callgraph functions to skip, off to disable (default: cgskip.txt)\n'\ + ' -bufsize N Set trace buffer size to N kilo-bytes (default: all of free memory)\n'\ + ' -devdump Print out all the raw device data for each phase\n'\ + ' -cgdump Print out all the raw callgraph data\n'\ + '\n'\ + 'Other commands:\n'\ + ' -modes List available suspend modes\n'\ + ' -status Test to see if the system is enabled to run this tool\n'\ + ' -fpdt Print out the contents of the ACPI Firmware Performance Data Table\n'\ + ' -wificheck Print out wifi connection info\n'\ + ' -x Test xset by toggling the given mode (on/off/standby/suspend)\n'\ + ' -sysinfo Print out system info extracted from BIOS\n'\ + ' -devinfo Print out the pm settings of all devices which support runtime suspend\n'\ + ' -cmdinfo Print out all the platform info collected before and after suspend/resume\n'\ + ' -flist Print the list of functions currently being captured in ftrace\n'\ + ' -flistall Print all functions capable of being captured in ftrace\n'\ + ' -summary dir Create a summary of tests in this dir [-genhtml builds missing html]\n'\ + ' [redo]\n'\ + ' -ftrace ftracefile Create HTML output using ftrace input (used with -dmesg)\n'\ + ' -dmesg dmesgfile Create HTML output using dmesg (used with -ftrace)\n'\ + '' % (sysvals.title, sysvals.version, sysvals.suspendmode, sysvals.ftopfunc)) + return True + +# ----------------- MAIN -------------------- +# exec start (skipped if script is loaded as library) +if __name__ == '__main__': + genhtml = False + cmd = '' + simplecmds = ['-sysinfo', '-modes', '-fpdt', '-flist', '-flistall', + '-devinfo', '-status', '-xon', '-xoff', '-xstandby', '-xsuspend', + '-xinit', '-xreset', '-xstat', '-wificheck', '-cmdinfo'] + if '-f' in sys.argv: + sysvals.cgskip = sysvals.configFile('cgskip.txt') + # loop through the command line arguments + args = iter(sys.argv[1:]) + for arg in args: + if(arg == '-m'): + try: + val = next(args) + except: + doError('No mode supplied', True) + if val == 'command' and not sysvals.testcommand: + doError('No command supplied for mode "command"', True) + sysvals.suspendmode = val + elif(arg in simplecmds): + cmd = arg[1:] + elif(arg == '-h'): + printHelp() + sys.exit(0) + elif(arg == '-v'): + pprint("Version %s" % sysvals.version) + sys.exit(0) + elif(arg == '-x2'): + sysvals.execcount = 2 + elif(arg == '-x2delay'): + sysvals.x2delay = getArgInt('-x2delay', args, 0, 60000) + elif(arg == '-predelay'): + sysvals.predelay = getArgInt('-predelay', args, 0, 60000) + elif(arg == '-postdelay'): + sysvals.postdelay = getArgInt('-postdelay', args, 0, 60000) + elif(arg == '-f'): + sysvals.usecallgraph = True + elif(arg == '-ftop'): + sysvals.usecallgraph = True + sysvals.ftop = True + sysvals.usekprobes = False + elif(arg == '-skiphtml'): + sysvals.skiphtml = True + elif(arg == '-cgdump'): + sysvals.cgdump = True + elif(arg == '-devdump'): + sysvals.devdump = True + elif(arg == '-genhtml'): + genhtml = True + elif(arg == '-addlogs'): + sysvals.dmesglog = sysvals.ftracelog = True + elif(arg == '-nologs'): + sysvals.dmesglog = sysvals.ftracelog = False + elif(arg == '-addlogdmesg'): + sysvals.dmesglog = True + elif(arg == '-addlogftrace'): + sysvals.ftracelog = True + elif(arg == '-noturbostat'): + sysvals.tstat = False + elif(arg == '-verbose'): + sysvals.verbose = True + elif(arg == '-proc'): + sysvals.useprocmon = True + elif(arg == '-dev'): + sysvals.usedevsrc = True + elif(arg == '-sync'): + sysvals.sync = True + elif(arg == '-wifi'): + sysvals.wifi = True + elif(arg == '-gzip'): + sysvals.gzip = True + elif(arg == '-info'): + try: + val = next(args) + except: + doError('-info requires one string argument', True) + elif(arg == '-desc'): + try: + val = next(args) + except: + doError('-desc requires one string argument', True) + elif(arg == '-rs'): + try: + val = next(args) + except: + doError('-rs requires "enable" or "disable"', True) + if val.lower() in switchvalues: + if val.lower() in switchoff: + sysvals.rs = -1 + else: + sysvals.rs = 1 + else: + doError('invalid option: %s, use "enable/disable" or "on/off"' % val, True) + elif(arg == '-display'): + try: + val = next(args) + except: + doError('-display requires an mode value', True) + disopt = ['on', 'off', 'standby', 'suspend'] + if val.lower() not in disopt: + doError('valid display mode values are %s' % disopt, True) + sysvals.display = val.lower() + elif(arg == '-maxdepth'): + sysvals.max_graph_depth = getArgInt('-maxdepth', args, 0, 1000) + elif(arg == '-rtcwake'): + try: + val = next(args) + except: + doError('No rtcwake time supplied', True) + if val.lower() in switchoff: + sysvals.rtcwake = False + else: + sysvals.rtcwake = True + sysvals.rtcwaketime = getArgInt('-rtcwake', val, 0, 3600, False) + elif(arg == '-timeprec'): + sysvals.setPrecision(getArgInt('-timeprec', args, 0, 6)) + elif(arg == '-mindev'): + sysvals.mindevlen = getArgFloat('-mindev', args, 0.0, 10000.0) + elif(arg == '-mincg'): + sysvals.mincglen = getArgFloat('-mincg', args, 0.0, 10000.0) + elif(arg == '-bufsize'): + sysvals.bufsize = getArgInt('-bufsize', args, 1, 1024*1024*8) + elif(arg == '-cgtest'): + sysvals.cgtest = getArgInt('-cgtest', args, 0, 1) + elif(arg == '-cgphase'): + try: + val = next(args) + except: + doError('No phase name supplied', True) + d = Data(0) + if val not in d.phasedef: + doError('invalid phase --> (%s: %s), valid phases are %s'\ + % (arg, val, d.phasedef.keys()), True) + sysvals.cgphase = val + elif(arg == '-cgfilter'): + try: + val = next(args) + except: + doError('No callgraph functions supplied', True) + sysvals.setCallgraphFilter(val) + elif(arg == '-skipkprobe'): + try: + val = next(args) + except: + doError('No kprobe functions supplied', True) + sysvals.skipKprobes(val) + elif(arg == '-cgskip'): + try: + val = next(args) + except: + doError('No file supplied', True) + if val.lower() in switchoff: + sysvals.cgskip = '' + else: + sysvals.cgskip = sysvals.configFile(val) + if(not sysvals.cgskip): + doError('%s does not exist' % sysvals.cgskip) + elif(arg == '-callloop-maxgap'): + sysvals.callloopmaxgap = getArgFloat('-callloop-maxgap', args, 0.0, 1.0) + elif(arg == '-callloop-maxlen'): + sysvals.callloopmaxlen = getArgFloat('-callloop-maxlen', args, 0.0, 1.0) + elif(arg == '-cmd'): + try: + val = next(args) + except: + doError('No command string supplied', True) + sysvals.testcommand = val + sysvals.suspendmode = 'command' + elif(arg == '-expandcg'): + sysvals.cgexp = True + elif(arg == '-srgap'): + sysvals.srgap = 5 + elif(arg == '-maxfail'): + sysvals.maxfail = getArgInt('-maxfail', args, 0, 1000000) + elif(arg == '-multi'): + try: + c, d = next(args), next(args) + except: + doError('-multi requires two values', True) + sysvals.multiinit(c, d) + elif(arg == '-o'): + try: + val = next(args) + except: + doError('No subdirectory name supplied', True) + sysvals.outdir = sysvals.setOutputFolder(val) + elif(arg == '-config'): + try: + val = next(args) + except: + doError('No text file supplied', True) + file = sysvals.configFile(val) + if(not file): + doError('%s does not exist' % val) + configFromFile(file) + elif(arg == '-fadd'): + try: + val = next(args) + except: + doError('No text file supplied', True) + file = sysvals.configFile(val) + if(not file): + doError('%s does not exist' % val) + sysvals.addFtraceFilterFunctions(file) + elif(arg == '-dmesg'): + try: + val = next(args) + except: + doError('No dmesg file supplied', True) + sysvals.notestrun = True + sysvals.dmesgfile = val + if(os.path.exists(sysvals.dmesgfile) == False): + doError('%s does not exist' % sysvals.dmesgfile) + elif(arg == '-ftrace'): + try: + val = next(args) + except: + doError('No ftrace file supplied', True) + sysvals.notestrun = True + sysvals.ftracefile = val + if(os.path.exists(sysvals.ftracefile) == False): + doError('%s does not exist' % sysvals.ftracefile) + elif(arg == '-summary'): + try: + val = next(args) + except: + doError('No directory supplied', True) + cmd = 'summary' + sysvals.outdir = val + sysvals.notestrun = True + if(os.path.isdir(val) == False): + doError('%s is not accessible' % val) + elif(arg == '-filter'): + try: + val = next(args) + except: + doError('No devnames supplied', True) + sysvals.setDeviceFilter(val) + elif(arg == '-result'): + try: + val = next(args) + except: + doError('No result file supplied', True) + sysvals.result = val + sysvals.signalHandlerInit() + else: + doError('Invalid argument: '+arg, True) + + # compatibility errors + if(sysvals.usecallgraph and sysvals.usedevsrc): + doError('-dev is not compatible with -f') + if(sysvals.usecallgraph and sysvals.useprocmon): + doError('-proc is not compatible with -f') + + if sysvals.usecallgraph and sysvals.cgskip: + sysvals.vprint('Using cgskip file: %s' % sysvals.cgskip) + sysvals.setCallgraphBlacklist(sysvals.cgskip) + + # callgraph size cannot exceed device size + if sysvals.mincglen < sysvals.mindevlen: + sysvals.mincglen = sysvals.mindevlen + + # remove existing buffers before calculating memory + if(sysvals.usecallgraph or sysvals.usedevsrc): + sysvals.fsetVal('16', 'buffer_size_kb') + sysvals.cpuInfo() + + # just run a utility command and exit + if(cmd != ''): + ret = 0 + if(cmd == 'status'): + if not statusCheck(True): + ret = 1 + elif(cmd == 'fpdt'): + if not getFPDT(True): + ret = 1 + elif(cmd == 'sysinfo'): + sysvals.printSystemInfo(True) + elif(cmd == 'devinfo'): + deviceInfo() + elif(cmd == 'modes'): + pprint(getModes()) + elif(cmd == 'flist'): + sysvals.getFtraceFilterFunctions(True) + elif(cmd == 'flistall'): + sysvals.getFtraceFilterFunctions(False) + elif(cmd == 'summary'): + runSummary(sysvals.outdir, True, genhtml) + elif(cmd in ['xon', 'xoff', 'xstandby', 'xsuspend', 'xinit', 'xreset']): + sysvals.verbose = True + ret = sysvals.displayControl(cmd[1:]) + elif(cmd == 'xstat'): + pprint('Display Status: %s' % sysvals.displayControl('stat').upper()) + elif(cmd == 'wificheck'): + dev = sysvals.checkWifi() + if dev: + print('%s is connected' % sysvals.wifiDetails(dev)) + else: + print('No wifi connection found') + elif(cmd == 'cmdinfo'): + for out in sysvals.cmdinfo(False, True): + print('[%s - %s]\n%s\n' % out) + sys.exit(ret) + + # if instructed, re-analyze existing data files + if(sysvals.notestrun): + stamp = rerunTest(sysvals.outdir) + sysvals.outputResult(stamp) + sys.exit(0) + + # verify that we can run a test + error = statusCheck() + if(error): + doError(error) + + # extract mem/disk extra modes and convert + mode = sysvals.suspendmode + if mode.startswith('mem'): + memmode = mode.split('-', 1)[-1] if '-' in mode else 'deep' + if memmode == 'shallow': + mode = 'standby' + elif memmode == 's2idle': + mode = 'freeze' + else: + mode = 'mem' + sysvals.memmode = memmode + sysvals.suspendmode = mode + if mode.startswith('disk-'): + sysvals.diskmode = mode.split('-', 1)[-1] + sysvals.suspendmode = 'disk' + sysvals.systemInfo(dmidecode(sysvals.mempath)) + + failcnt, ret = 0, 0 + if sysvals.multitest['run']: + # run multiple tests in a separate subdirectory + if not sysvals.outdir: + if 'time' in sysvals.multitest: + s = '-%dm' % sysvals.multitest['time'] + else: + s = '-x%d' % sysvals.multitest['count'] + sysvals.outdir = datetime.now().strftime('suspend-%y%m%d-%H%M%S'+s) + if not os.path.isdir(sysvals.outdir): + os.makedirs(sysvals.outdir) + sysvals.sudoUserchown(sysvals.outdir) + finish = datetime.now() + if 'time' in sysvals.multitest: + finish += timedelta(minutes=sysvals.multitest['time']) + for i in range(sysvals.multitest['count']): + sysvals.multistat(True, i, finish) + if i != 0 and sysvals.multitest['delay'] > 0: + pprint('Waiting %d seconds...' % (sysvals.multitest['delay'])) + time.sleep(sysvals.multitest['delay']) + fmt = 'suspend-%y%m%d-%H%M%S' + sysvals.testdir = os.path.join(sysvals.outdir, datetime.now().strftime(fmt)) + ret = runTest(i+1, True) + failcnt = 0 if not ret else failcnt + 1 + if sysvals.maxfail > 0 and failcnt >= sysvals.maxfail: + pprint('Maximum fail count of %d reached, aborting multitest' % (sysvals.maxfail)) + break + time.sleep(5) + sysvals.resetlog() + sysvals.multistat(False, i, finish) + if 'time' in sysvals.multitest and datetime.now() >= finish: + break + if not sysvals.skiphtml: + runSummary(sysvals.outdir, False, False) + sysvals.sudoUserchown(sysvals.outdir) + else: + if sysvals.outdir: + sysvals.testdir = sysvals.outdir + # run the test in the current directory + ret = runTest() + + # reset to default values after testing + if sysvals.display: + sysvals.displayControl('reset') + if sysvals.rs != 0: + sysvals.setRuntimeSuspend(False) + sys.exit(ret) diff --git a/data/linux-small/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py b/data/linux-small/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py new file mode 100755 index 0000000..e15e206 --- /dev/null +++ b/data/linux-small/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py @@ -0,0 +1,615 @@ +#!/usr/bin/env python +# SPDX-License-Identifier: GPL-2.0-only +# -*- coding: utf-8 -*- +# +""" This utility can be used to debug and tune the performance of the +intel_pstate driver. This utility can be used in two ways: +- If there is Linux trace file with pstate_sample events enabled, then +this utility can parse the trace file and generate performance plots. +- If user has not specified a trace file as input via command line parameters, +then this utility enables and collects trace data for a user specified interval +and generates performance plots. + +Prerequisites: + Python version 2.7.x or higher + gnuplot 5.0 or higher + gnuplot-py 1.8 or higher + (Most of the distributions have these required packages. They may be called + gnuplot-py, phython-gnuplot or phython3-gnuplot, gnuplot-nox, ... ) + + HWP (Hardware P-States are disabled) + Kernel config for Linux trace is enabled + + see print_help(): for Usage and Output details + +""" +from __future__ import print_function +from datetime import datetime +import subprocess +import os +import time +import re +import signal +import sys +import getopt +import Gnuplot +from numpy import * +from decimal import * + +__author__ = "Srinivas Pandruvada" +__copyright__ = " Copyright (c) 2017, Intel Corporation. " +__license__ = "GPL version 2" + + +MAX_CPUS = 256 + +# Define the csv file columns +C_COMM = 18 +C_GHZ = 17 +C_ELAPSED = 16 +C_SAMPLE = 15 +C_DURATION = 14 +C_LOAD = 13 +C_BOOST = 12 +C_FREQ = 11 +C_TSC = 10 +C_APERF = 9 +C_MPERF = 8 +C_TO = 7 +C_FROM = 6 +C_SCALED = 5 +C_CORE = 4 +C_USEC = 3 +C_SEC = 2 +C_CPU = 1 + +global sample_num, last_sec_cpu, last_usec_cpu, start_time, testname + +# 11 digits covers uptime to 115 days +getcontext().prec = 11 + +sample_num =0 +last_sec_cpu = [0] * MAX_CPUS +last_usec_cpu = [0] * MAX_CPUS + +def print_help(): + print('intel_pstate_tracer.py:') + print(' Usage:') + print(' If the trace file is available, then to simply parse and plot, use (sudo not required):') + print(' ./intel_pstate_tracer.py [-c cpus] -t -n ') + print(' Or') + print(' ./intel_pstate_tracer.py [--cpu cpus] ---trace_file --name ') + print(' To generate trace file, parse and plot, use (sudo required):') + print(' sudo ./intel_pstate_tracer.py [-c cpus] -i -n -m ') + print(' Or') + print(' sudo ./intel_pstate_tracer.py [--cpu cpus] --interval --name --memory ') + print(' Optional argument:') + print(' cpus: comma separated list of CPUs') + print(' kbytes: Kilo bytes of memory per CPU to allocate to the trace buffer. Default: 10240') + print(' Output:') + print(' If not already present, creates a "results/test_name" folder in the current working directory with:') + print(' cpu.csv - comma seperated values file with trace contents and some additional calculations.') + print(' cpu???.csv - comma seperated values file for CPU number ???.') + print(' *.png - a variety of PNG format plot files created from the trace contents and the additional calculations.') + print(' Notes:') + print(' Avoid the use of _ (underscore) in test names, because in gnuplot it is a subscript directive.') + print(' Maximum number of CPUs is {0:d}. If there are more the script will abort with an error.'.format(MAX_CPUS)) + print(' Off-line CPUs cause the script to list some warnings, and create some empty files. Use the CPU mask feature for a clean run.') + print(' Empty y range warnings for autoscaled plots can occur and can be ignored.') + +def plot_perf_busy_with_sample(cpu_index): + """ Plot method to per cpu information """ + + file_name = 'cpu{:0>3}.csv'.format(cpu_index) + if os.path.exists(file_name): + output_png = "cpu%03d_perf_busy_vs_samples.png" % cpu_index + g_plot = common_all_gnuplot_settings(output_png) +# autoscale this one, no set y1 range + g_plot('set y2range [0:200]') + g_plot('set y2tics 0, 10') + g_plot('set title "{} : cpu perf busy vs. sample : CPU {:0>3} : {:%F %H:%M}"'.format(testname, cpu_index, datetime.now())) +# Override common + g_plot('set xlabel "Samples"') + g_plot('set ylabel "P-State"') + g_plot('set y2label "Scaled Busy/performance/io-busy(%)"') + set_4_plot_linestyles(g_plot) + g_plot('plot "' + file_name + '" using {:d}:{:d} with linespoints linestyle 1 axis x1y2 title "performance",\\'.format(C_SAMPLE, C_CORE)) + g_plot('"' + file_name + '" using {:d}:{:d} with linespoints linestyle 2 axis x1y2 title "scaled-busy",\\'.format(C_SAMPLE, C_SCALED)) + g_plot('"' + file_name + '" using {:d}:{:d} with linespoints linestyle 3 axis x1y2 title "io-boost",\\'.format(C_SAMPLE, C_BOOST)) + g_plot('"' + file_name + '" using {:d}:{:d} with linespoints linestyle 4 axis x1y1 title "P-State"'.format(C_SAMPLE, C_TO)) + +def plot_perf_busy(cpu_index): + """ Plot some per cpu information """ + + file_name = 'cpu{:0>3}.csv'.format(cpu_index) + if os.path.exists(file_name): + output_png = "cpu%03d_perf_busy.png" % cpu_index + g_plot = common_all_gnuplot_settings(output_png) +# autoscale this one, no set y1 range + g_plot('set y2range [0:200]') + g_plot('set y2tics 0, 10') + g_plot('set title "{} : perf busy : CPU {:0>3} : {:%F %H:%M}"'.format(testname, cpu_index, datetime.now())) + g_plot('set ylabel "P-State"') + g_plot('set y2label "Scaled Busy/performance/io-busy(%)"') + set_4_plot_linestyles(g_plot) + g_plot('plot "' + file_name + '" using {:d}:{:d} with linespoints linestyle 1 axis x1y2 title "performance",\\'.format(C_ELAPSED, C_CORE)) + g_plot('"' + file_name + '" using {:d}:{:d} with linespoints linestyle 2 axis x1y2 title "scaled-busy",\\'.format(C_ELAPSED, C_SCALED)) + g_plot('"' + file_name + '" using {:d}:{:d} with linespoints linestyle 3 axis x1y2 title "io-boost",\\'.format(C_ELAPSED, C_BOOST)) + g_plot('"' + file_name + '" using {:d}:{:d} with linespoints linestyle 4 axis x1y1 title "P-State"'.format(C_ELAPSED, C_TO)) + +def plot_durations(cpu_index): + """ Plot per cpu durations """ + + file_name = 'cpu{:0>3}.csv'.format(cpu_index) + if os.path.exists(file_name): + output_png = "cpu%03d_durations.png" % cpu_index + g_plot = common_all_gnuplot_settings(output_png) +# autoscale this one, no set y range + g_plot('set title "{} : durations : CPU {:0>3} : {:%F %H:%M}"'.format(testname, cpu_index, datetime.now())) + g_plot('set ylabel "Timer Duration (MilliSeconds)"') +# override common + g_plot('set key off') + set_4_plot_linestyles(g_plot) + g_plot('plot "' + file_name + '" using {:d}:{:d} with linespoints linestyle 1 axis x1y1'.format(C_ELAPSED, C_DURATION)) + +def plot_loads(cpu_index): + """ Plot per cpu loads """ + + file_name = 'cpu{:0>3}.csv'.format(cpu_index) + if os.path.exists(file_name): + output_png = "cpu%03d_loads.png" % cpu_index + g_plot = common_all_gnuplot_settings(output_png) + g_plot('set yrange [0:100]') + g_plot('set ytics 0, 10') + g_plot('set title "{} : loads : CPU {:0>3} : {:%F %H:%M}"'.format(testname, cpu_index, datetime.now())) + g_plot('set ylabel "CPU load (percent)"') +# override common + g_plot('set key off') + set_4_plot_linestyles(g_plot) + g_plot('plot "' + file_name + '" using {:d}:{:d} with linespoints linestyle 1 axis x1y1'.format(C_ELAPSED, C_LOAD)) + +def plot_pstate_cpu_with_sample(): + """ Plot all cpu information """ + + if os.path.exists('cpu.csv'): + output_png = 'all_cpu_pstates_vs_samples.png' + g_plot = common_all_gnuplot_settings(output_png) +# autoscale this one, no set y range +# override common + g_plot('set xlabel "Samples"') + g_plot('set ylabel "P-State"') + g_plot('set title "{} : cpu pstate vs. sample : {:%F %H:%M}"'.format(testname, datetime.now())) + title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ') + plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_SAMPLE, C_TO) + g_plot('title_list = "{}"'.format(title_list)) + g_plot(plot_str) + +def plot_pstate_cpu(): + """ Plot all cpu information from csv files """ + + output_png = 'all_cpu_pstates.png' + g_plot = common_all_gnuplot_settings(output_png) +# autoscale this one, no set y range + g_plot('set ylabel "P-State"') + g_plot('set title "{} : cpu pstates : {:%F %H:%M}"'.format(testname, datetime.now())) + +# the following command is really cool, but doesn't work with the CPU masking option because it aborts on the first missing file. +# plot_str = 'plot for [i=0:*] file=sprintf("cpu%03d.csv",i) title_s=sprintf("cpu%03d",i) file using 16:7 pt 7 ps 1 title title_s' +# + title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ') + plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_TO) + g_plot('title_list = "{}"'.format(title_list)) + g_plot(plot_str) + +def plot_load_cpu(): + """ Plot all cpu loads """ + + output_png = 'all_cpu_loads.png' + g_plot = common_all_gnuplot_settings(output_png) + g_plot('set yrange [0:100]') + g_plot('set ylabel "CPU load (percent)"') + g_plot('set title "{} : cpu loads : {:%F %H:%M}"'.format(testname, datetime.now())) + + title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ') + plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_LOAD) + g_plot('title_list = "{}"'.format(title_list)) + g_plot(plot_str) + +def plot_frequency_cpu(): + """ Plot all cpu frequencies """ + + output_png = 'all_cpu_frequencies.png' + g_plot = common_all_gnuplot_settings(output_png) +# autoscale this one, no set y range + g_plot('set ylabel "CPU Frequency (GHz)"') + g_plot('set title "{} : cpu frequencies : {:%F %H:%M}"'.format(testname, datetime.now())) + + title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ') + plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_FREQ) + g_plot('title_list = "{}"'.format(title_list)) + g_plot(plot_str) + +def plot_duration_cpu(): + """ Plot all cpu durations """ + + output_png = 'all_cpu_durations.png' + g_plot = common_all_gnuplot_settings(output_png) +# autoscale this one, no set y range + g_plot('set ylabel "Timer Duration (MilliSeconds)"') + g_plot('set title "{} : cpu durations : {:%F %H:%M}"'.format(testname, datetime.now())) + + title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ') + plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_DURATION) + g_plot('title_list = "{}"'.format(title_list)) + g_plot(plot_str) + +def plot_scaled_cpu(): + """ Plot all cpu scaled busy """ + + output_png = 'all_cpu_scaled.png' + g_plot = common_all_gnuplot_settings(output_png) +# autoscale this one, no set y range + g_plot('set ylabel "Scaled Busy (Unitless)"') + g_plot('set title "{} : cpu scaled busy : {:%F %H:%M}"'.format(testname, datetime.now())) + + title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ') + plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_SCALED) + g_plot('title_list = "{}"'.format(title_list)) + g_plot(plot_str) + +def plot_boost_cpu(): + """ Plot all cpu IO Boosts """ + + output_png = 'all_cpu_boost.png' + g_plot = common_all_gnuplot_settings(output_png) + g_plot('set yrange [0:100]') + g_plot('set ylabel "CPU IO Boost (percent)"') + g_plot('set title "{} : cpu io boost : {:%F %H:%M}"'.format(testname, datetime.now())) + + title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ') + plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_BOOST) + g_plot('title_list = "{}"'.format(title_list)) + g_plot(plot_str) + +def plot_ghz_cpu(): + """ Plot all cpu tsc ghz """ + + output_png = 'all_cpu_ghz.png' + g_plot = common_all_gnuplot_settings(output_png) +# autoscale this one, no set y range + g_plot('set ylabel "TSC Frequency (GHz)"') + g_plot('set title "{} : cpu TSC Frequencies (Sanity check calculation) : {:%F %H:%M}"'.format(testname, datetime.now())) + + title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ') + plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_GHZ) + g_plot('title_list = "{}"'.format(title_list)) + g_plot(plot_str) + +def common_all_gnuplot_settings(output_png): + """ common gnuplot settings for multiple CPUs one one graph. """ + + g_plot = common_gnuplot_settings() + g_plot('set output "' + output_png + '"') + return(g_plot) + +def common_gnuplot_settings(): + """ common gnuplot settings. """ + + g_plot = Gnuplot.Gnuplot(persist=1) +# The following line is for rigor only. It seems to be assumed for .csv files + g_plot('set datafile separator \",\"') + g_plot('set ytics nomirror') + g_plot('set xtics nomirror') + g_plot('set xtics font ", 10"') + g_plot('set ytics font ", 10"') + g_plot('set tics out scale 1.0') + g_plot('set grid') + g_plot('set key out horiz') + g_plot('set key bot center') + g_plot('set key samplen 2 spacing .8 font ", 9"') + g_plot('set term png size 1200, 600') + g_plot('set title font ", 11"') + g_plot('set ylabel font ", 10"') + g_plot('set xlabel font ", 10"') + g_plot('set xlabel offset 0, 0.5') + g_plot('set xlabel "Elapsed Time (Seconds)"') + return(g_plot) + +def set_4_plot_linestyles(g_plot): + """ set the linestyles used for 4 plots in 1 graphs. """ + + g_plot('set style line 1 linetype 1 linecolor rgb "green" pointtype -1') + g_plot('set style line 2 linetype 1 linecolor rgb "red" pointtype -1') + g_plot('set style line 3 linetype 1 linecolor rgb "purple" pointtype -1') + g_plot('set style line 4 linetype 1 linecolor rgb "blue" pointtype -1') + +def store_csv(cpu_int, time_pre_dec, time_post_dec, core_busy, scaled, _from, _to, mperf, aperf, tsc, freq_ghz, io_boost, common_comm, load, duration_ms, sample_num, elapsed_time, tsc_ghz): + """ Store master csv file information """ + + global graph_data_present + + if cpu_mask[cpu_int] == 0: + return + + try: + f_handle = open('cpu.csv', 'a') + string_buffer = "CPU_%03u, %05u, %06u, %u, %u, %u, %u, %u, %u, %u, %.4f, %u, %.2f, %.3f, %u, %.3f, %.3f, %s\n" % (cpu_int, int(time_pre_dec), int(time_post_dec), int(core_busy), int(scaled), int(_from), int(_to), int(mperf), int(aperf), int(tsc), freq_ghz, int(io_boost), load, duration_ms, sample_num, elapsed_time, tsc_ghz, common_comm) + f_handle.write(string_buffer); + f_handle.close() + except: + print('IO error cpu.csv') + return + + graph_data_present = True; + +def split_csv(): + """ seperate the all csv file into per CPU csv files. """ + + global current_max_cpu + + if os.path.exists('cpu.csv'): + for index in range(0, current_max_cpu + 1): + if cpu_mask[int(index)] != 0: + os.system('grep -m 1 common_cpu cpu.csv > cpu{:0>3}.csv'.format(index)) + os.system('grep CPU_{:0>3} cpu.csv >> cpu{:0>3}.csv'.format(index, index)) + +def fix_ownership(path): + """Change the owner of the file to SUDO_UID, if required""" + + uid = os.environ.get('SUDO_UID') + gid = os.environ.get('SUDO_GID') + if uid is not None: + os.chown(path, int(uid), int(gid)) + +def cleanup_data_files(): + """ clean up existing data files """ + + if os.path.exists('cpu.csv'): + os.remove('cpu.csv') + f_handle = open('cpu.csv', 'a') + f_handle.write('common_cpu, common_secs, common_usecs, core_busy, scaled_busy, from, to, mperf, aperf, tsc, freq, boost, load, duration_ms, sample_num, elapsed_time, tsc_ghz, common_comm') + f_handle.write('\n') + f_handle.close() + +def clear_trace_file(): + """ Clear trace file """ + + try: + f_handle = open('/sys/kernel/debug/tracing/trace', 'w') + f_handle.close() + except: + print('IO error clearing trace file ') + sys.exit(2) + +def enable_trace(): + """ Enable trace """ + + try: + open('/sys/kernel/debug/tracing/events/power/pstate_sample/enable' + , 'w').write("1") + except: + print('IO error enabling trace ') + sys.exit(2) + +def disable_trace(): + """ Disable trace """ + + try: + open('/sys/kernel/debug/tracing/events/power/pstate_sample/enable' + , 'w').write("0") + except: + print('IO error disabling trace ') + sys.exit(2) + +def set_trace_buffer_size(): + """ Set trace buffer size """ + + try: + with open('/sys/kernel/debug/tracing/buffer_size_kb', 'w') as fp: + fp.write(memory) + except: + print('IO error setting trace buffer size ') + sys.exit(2) + +def free_trace_buffer(): + """ Free the trace buffer memory """ + + try: + open('/sys/kernel/debug/tracing/buffer_size_kb' + , 'w').write("1") + except: + print('IO error freeing trace buffer ') + sys.exit(2) + +def read_trace_data(filename): + """ Read and parse trace data """ + + global current_max_cpu + global sample_num, last_sec_cpu, last_usec_cpu, start_time + + try: + data = open(filename, 'r').read() + except: + print('Error opening ', filename) + sys.exit(2) + + for line in data.splitlines(): + search_obj = \ + re.search(r'(^(.*?)\[)((\d+)[^\]])(.*?)(\d+)([.])(\d+)(.*?core_busy=)(\d+)(.*?scaled=)(\d+)(.*?from=)(\d+)(.*?to=)(\d+)(.*?mperf=)(\d+)(.*?aperf=)(\d+)(.*?tsc=)(\d+)(.*?freq=)(\d+)' + , line) + + if search_obj: + cpu = search_obj.group(3) + cpu_int = int(cpu) + cpu = str(cpu_int) + + time_pre_dec = search_obj.group(6) + time_post_dec = search_obj.group(8) + core_busy = search_obj.group(10) + scaled = search_obj.group(12) + _from = search_obj.group(14) + _to = search_obj.group(16) + mperf = search_obj.group(18) + aperf = search_obj.group(20) + tsc = search_obj.group(22) + freq = search_obj.group(24) + common_comm = search_obj.group(2).replace(' ', '') + + # Not all kernel versions have io_boost field + io_boost = '0' + search_obj = re.search(r'.*?io_boost=(\d+)', line) + if search_obj: + io_boost = search_obj.group(1) + + if sample_num == 0 : + start_time = Decimal(time_pre_dec) + Decimal(time_post_dec) / Decimal(1000000) + sample_num += 1 + + if last_sec_cpu[cpu_int] == 0 : + last_sec_cpu[cpu_int] = time_pre_dec + last_usec_cpu[cpu_int] = time_post_dec + else : + duration_us = (int(time_pre_dec) - int(last_sec_cpu[cpu_int])) * 1000000 + (int(time_post_dec) - int(last_usec_cpu[cpu_int])) + duration_ms = Decimal(duration_us) / Decimal(1000) + last_sec_cpu[cpu_int] = time_pre_dec + last_usec_cpu[cpu_int] = time_post_dec + elapsed_time = Decimal(time_pre_dec) + Decimal(time_post_dec) / Decimal(1000000) - start_time + load = Decimal(int(mperf)*100)/ Decimal(tsc) + freq_ghz = Decimal(freq)/Decimal(1000000) +# Sanity check calculation, typically anomalies indicate missed samples +# However, check for 0 (should never occur) + tsc_ghz = Decimal(0) + if duration_ms != Decimal(0) : + tsc_ghz = Decimal(tsc)/duration_ms/Decimal(1000000) + store_csv(cpu_int, time_pre_dec, time_post_dec, core_busy, scaled, _from, _to, mperf, aperf, tsc, freq_ghz, io_boost, common_comm, load, duration_ms, sample_num, elapsed_time, tsc_ghz) + + if cpu_int > current_max_cpu: + current_max_cpu = cpu_int +# End of for each trace line loop +# Now seperate the main overall csv file into per CPU csv files. + split_csv() + +def signal_handler(signal, frame): + print(' SIGINT: Forcing cleanup before exit.') + if interval: + disable_trace() + clear_trace_file() + # Free the memory + free_trace_buffer() + sys.exit(0) + +signal.signal(signal.SIGINT, signal_handler) + +interval = "" +filename = "" +cpu_list = "" +testname = "" +memory = "10240" +graph_data_present = False; + +valid1 = False +valid2 = False + +cpu_mask = zeros((MAX_CPUS,), dtype=int) + +try: + opts, args = getopt.getopt(sys.argv[1:],"ht:i:c:n:m:",["help","trace_file=","interval=","cpu=","name=","memory="]) +except getopt.GetoptError: + print_help() + sys.exit(2) +for opt, arg in opts: + if opt == '-h': + print() + sys.exit() + elif opt in ("-t", "--trace_file"): + valid1 = True + location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) + filename = os.path.join(location, arg) + elif opt in ("-i", "--interval"): + valid1 = True + interval = arg + elif opt in ("-c", "--cpu"): + cpu_list = arg + elif opt in ("-n", "--name"): + valid2 = True + testname = arg + elif opt in ("-m", "--memory"): + memory = arg + +if not (valid1 and valid2): + print_help() + sys.exit() + +if cpu_list: + for p in re.split("[,]", cpu_list): + if int(p) < MAX_CPUS : + cpu_mask[int(p)] = 1 +else: + for i in range (0, MAX_CPUS): + cpu_mask[i] = 1 + +if not os.path.exists('results'): + os.mkdir('results') + # The regular user needs to own the directory, not root. + fix_ownership('results') + +os.chdir('results') +if os.path.exists(testname): + print('The test name directory already exists. Please provide a unique test name. Test re-run not supported, yet.') + sys.exit() +os.mkdir(testname) +# The regular user needs to own the directory, not root. +fix_ownership(testname) +os.chdir(testname) + +# Temporary (or perhaps not) +cur_version = sys.version_info +print('python version (should be >= 2.7):') +print(cur_version) + +# Left as "cleanup" for potential future re-run ability. +cleanup_data_files() + +if interval: + filename = "/sys/kernel/debug/tracing/trace" + clear_trace_file() + set_trace_buffer_size() + enable_trace() + print('Sleeping for ', interval, 'seconds') + time.sleep(int(interval)) + disable_trace() + +current_max_cpu = 0 + +read_trace_data(filename) + +if interval: + clear_trace_file() + # Free the memory + free_trace_buffer() + +if graph_data_present == False: + print('No valid data to plot') + sys.exit(2) + +for cpu_no in range(0, current_max_cpu + 1): + plot_perf_busy_with_sample(cpu_no) + plot_perf_busy(cpu_no) + plot_durations(cpu_no) + plot_loads(cpu_no) + +plot_pstate_cpu_with_sample() +plot_pstate_cpu() +plot_load_cpu() +plot_frequency_cpu() +plot_duration_cpu() +plot_scaled_cpu() +plot_boost_cpu() +plot_ghz_cpu() + +# It is preferrable, but not necessary, that the regular user owns the files, not root. +for root, dirs, files in os.walk('.'): + for f in files: + fix_ownership(f) + +os.chdir('../../') diff --git a/data/linux-small/tools/rcu/rcu-cbs.py b/data/linux-small/tools/rcu/rcu-cbs.py new file mode 100644 index 0000000..f8b461b --- /dev/null +++ b/data/linux-small/tools/rcu/rcu-cbs.py @@ -0,0 +1,46 @@ +#!/usr/bin/env drgn +# SPDX-License-Identifier: GPL-2.0+ +# +# Dump out the number of RCU callbacks outstanding. +# +# On older kernels having multiple flavors of RCU, this dumps out the +# number of callbacks for the most heavily used flavor. +# +# Usage: sudo drgn rcu-cbs.py +# +# Copyright (C) 2021 Facebook, Inc. +# +# Authors: Paul E. McKenney + +import sys +import drgn +from drgn import NULL, Object +from drgn.helpers.linux import * + +def get_rdp0(prog): + try: + rdp0 = prog.variable('rcu_preempt_data', 'kernel/rcu/tree.c'); + except LookupError: + rdp0 = NULL; + + if rdp0 == NULL: + try: + rdp0 = prog.variable('rcu_sched_data', + 'kernel/rcu/tree.c'); + except LookupError: + rdp0 = NULL; + + if rdp0 == NULL: + rdp0 = prog.variable('rcu_data', 'kernel/rcu/tree.c'); + return rdp0.address_of_(); + +rdp0 = get_rdp0(prog); + +# Sum up RCU callbacks. +sum = 0; +for cpu in for_each_possible_cpu(prog): + rdp = per_cpu_ptr(rdp0, cpu); + len = rdp.cblist.len.value_(); + # print("CPU " + str(cpu) + " RCU callbacks: " + str(len)); + sum += len; +print("Number of RCU callbacks in flight: " + str(sum));
    ', '